diff options
Diffstat (limited to 'arch')
196 files changed, 1769 insertions, 1130 deletions
diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h index 5ebab58..f05bdb4 100644 --- a/arch/alpha/include/asm/io.h +++ b/arch/alpha/include/asm/io.h @@ -500,10 +500,14 @@ extern inline void writeq(u64 b, volatile void __iomem *addr) #define outb_p outb #define outw_p outw #define outl_p outl -#define readb_relaxed(addr) __raw_readb(addr) -#define readw_relaxed(addr) __raw_readw(addr) -#define readl_relaxed(addr) __raw_readl(addr) -#define readq_relaxed(addr) __raw_readq(addr) +#define readb_relaxed(addr) __raw_readb(addr) +#define readw_relaxed(addr) __raw_readw(addr) +#define readl_relaxed(addr) __raw_readl(addr) +#define readq_relaxed(addr) __raw_readq(addr) +#define writeb_relaxed(b, addr) __raw_writeb(b, addr) +#define writew_relaxed(b, addr) __raw_writew(b, addr) +#define writel_relaxed(b, addr) __raw_writel(b, addr) +#define writeq_relaxed(b, addr) __raw_writeq(b, addr) #define mmiowb() diff --git a/arch/alpha/include/asm/unistd.h b/arch/alpha/include/asm/unistd.h index f2c9440..c509d30 100644 --- a/arch/alpha/include/asm/unistd.h +++ b/arch/alpha/include/asm/unistd.h @@ -3,7 +3,7 @@ #include <uapi/asm/unistd.h> -#define NR_SYSCALLS 508 +#define NR_SYSCALLS 511 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_STAT64 diff --git a/arch/alpha/include/uapi/asm/unistd.h b/arch/alpha/include/uapi/asm/unistd.h index 53ae7bb..d214a035 100644 --- a/arch/alpha/include/uapi/asm/unistd.h +++ b/arch/alpha/include/uapi/asm/unistd.h @@ -469,5 +469,8 @@ #define __NR_process_vm_writev 505 #define __NR_kcmp 506 #define __NR_finit_module 507 +#define __NR_sched_setattr 508 +#define __NR_sched_getattr 509 +#define __NR_renameat2 510 #endif /* _UAPI_ALPHA_UNISTD_H */ diff --git a/arch/alpha/kernel/systbls.S b/arch/alpha/kernel/systbls.S index dca9b3f..24789713 100644 --- a/arch/alpha/kernel/systbls.S +++ b/arch/alpha/kernel/systbls.S @@ -526,6 +526,9 @@ sys_call_table: .quad sys_process_vm_writev /* 505 */ .quad sys_kcmp .quad sys_finit_module + .quad sys_sched_setattr + .quad sys_sched_getattr + .quad sys_renameat2 /* 510 */ .size sys_call_table, . - sys_call_table .type sys_call_table, @object diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 4670afc..9e11427 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -427,7 +427,7 @@ struct ic_inv_args { static void __ic_line_inv_vaddr_helper(void *info) { - struct ic_inv *ic_inv_args = (struct ic_inv_args *) info; + struct ic_inv_args *ic_inv = info; __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz); } @@ -581,6 +581,7 @@ void flush_icache_range(unsigned long kstart, unsigned long kend) tot_sz -= sz; } } +EXPORT_SYMBOL(flush_icache_range); /* * General purpose helper to make I and D cache lines consistent. diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index c49a775..32cbbd5 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1983,8 +1983,6 @@ config XIP_PHYS_ADDR config KEXEC bool "Kexec system call (EXPERIMENTAL)" depends on (!SMP || PM_SLEEP_SMP) - select CRYPTO - select CRYPTO_SHA256 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/arm/boot/dts/am4372.dtsi b/arch/arm/boot/dts/am4372.dtsi index 9b3d2ba..8689949 100644 --- a/arch/arm/boot/dts/am4372.dtsi +++ b/arch/arm/boot/dts/am4372.dtsi @@ -804,7 +804,7 @@ usb1: usb@48390000 { compatible = "synopsys,dwc3"; - reg = <0x48390000 0x17000>; + reg = <0x48390000 0x10000>; interrupts = <GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>; phys = <&usb2_phy1>; phy-names = "usb2-phy"; @@ -826,7 +826,7 @@ usb2: usb@483d0000 { compatible = "synopsys,dwc3"; - reg = <0x483d0000 0x17000>; + reg = <0x483d0000 0x10000>; interrupts = <GIC_SPI 174 IRQ_TYPE_LEVEL_HIGH>; phys = <&usb2_phy2>; phy-names = "usb2-phy"; diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts index 646a6ea..e7ac47f 100644 --- a/arch/arm/boot/dts/am437x-gp-evm.dts +++ b/arch/arm/boot/dts/am437x-gp-evm.dts @@ -260,7 +260,7 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&i2c0_pins>; - clock-frequency = <400000>; + clock-frequency = <100000>; tps65218: tps65218@24 { reg = <0x24>; @@ -424,7 +424,7 @@ ranges = <0 0 0 0x01000000>; /* minimum GPMC partition = 16MB */ nand@0,0 { reg = <0 0 4>; /* device IO registers */ - ti,nand-ecc-opt = "bch8"; + ti,nand-ecc-opt = "bch16"; ti,elm-id = <&elm>; nand-bus-width = <8>; gpmc,device-width = <1>; @@ -443,8 +443,6 @@ gpmc,rd-cycle-ns = <40>; gpmc,wr-cycle-ns = <40>; gpmc,wait-pin = <0>; - gpmc,wait-on-read; - gpmc,wait-on-write; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,clk-activation-ns = <0>; diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts index ed7dd239..ac3e485 100644 --- a/arch/arm/boot/dts/am43x-epos-evm.dts +++ b/arch/arm/boot/dts/am43x-epos-evm.dts @@ -435,13 +435,13 @@ }; &gpmc { - status = "okay"; + status = "okay"; /* Disable QSPI when enabling GPMC (NAND) */ pinctrl-names = "default"; pinctrl-0 = <&nand_flash_x8>; ranges = <0 0 0x08000000 0x10000000>; /* CS0: NAND */ nand@0,0 { reg = <0 0 0>; /* CS0, offset 0 */ - ti,nand-ecc-opt = "bch8"; + ti,nand-ecc-opt = "bch16"; ti,elm-id = <&elm>; nand-bus-width = <8>; gpmc,device-width = <1>; @@ -459,8 +459,7 @@ gpmc,access-ns = <30>; /* tCEA + 4*/ gpmc,rd-cycle-ns = <40>; gpmc,wr-cycle-ns = <40>; - gpmc,wait-on-read = "true"; - gpmc,wait-on-write = "true"; + gpmc,wait-pin = <0>; gpmc,bus-turnaround-ns = <0>; gpmc,cycle2cycle-delay-ns = <0>; gpmc,clk-activation-ns = <0>; @@ -557,7 +556,7 @@ }; &qspi { - status = "okay"; + status = "disabled"; /* Disable GPMC (NAND) when enabling QSPI */ pinctrl-names = "default"; pinctrl-0 = <&qspi1_default>; diff --git a/arch/arm/boot/dts/at91rm9200.dtsi b/arch/arm/boot/dts/at91rm9200.dtsi index 65ccf56..6c97d4a 100644 --- a/arch/arm/boot/dts/at91rm9200.dtsi +++ b/arch/arm/boot/dts/at91rm9200.dtsi @@ -149,7 +149,7 @@ usb: usbck { compatible = "atmel,at91rm9200-clk-usb"; #clock-cells = <0>; - atmel,clk-divisors = <1 2>; + atmel,clk-divisors = <1 2 0 0>; clocks = <&pllb>; }; diff --git a/arch/arm/boot/dts/at91sam9g20.dtsi b/arch/arm/boot/dts/at91sam9g20.dtsi index 31f7652..4e0abbd 100644 --- a/arch/arm/boot/dts/at91sam9g20.dtsi +++ b/arch/arm/boot/dts/at91sam9g20.dtsi @@ -40,6 +40,7 @@ }; pllb: pllbck { + compatible = "atmel,at91sam9g20-clk-pllb"; atmel,clk-input-range = <2000000 32000000>; atmel,pll-clk-output-ranges = <30000000 100000000 0 0>; }; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 50f8022..e03fbf3 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -8,6 +8,7 @@ /dts-v1/; #include "dra74x.dtsi" +#include <dt-bindings/gpio/gpio.h> / { model = "TI DRA742"; @@ -24,9 +25,29 @@ regulator-min-microvolt = <3300000>; regulator-max-microvolt = <3300000>; }; + + vtt_fixed: fixedregulator-vtt { + compatible = "regulator-fixed"; + regulator-name = "vtt_fixed"; + regulator-min-microvolt = <1350000>; + regulator-max-microvolt = <1350000>; + regulator-always-on; + regulator-boot-on; + enable-active-high; + gpio = <&gpio7 11 GPIO_ACTIVE_HIGH>; + }; }; &dra7_pmx_core { + pinctrl-names = "default"; + pinctrl-0 = <&vtt_pin>; + + vtt_pin: pinmux_vtt_pin { + pinctrl-single,pins = < + 0x3b4 (PIN_OUTPUT | MUX_MODE14) /* spi1_cs1.gpio7_11 */ + >; + }; + i2c1_pins: pinmux_i2c1_pins { pinctrl-single,pins = < 0x400 (PIN_INPUT | MUX_MODE0) /* i2c1_sda */ @@ -43,20 +64,19 @@ i2c3_pins: pinmux_i2c3_pins { pinctrl-single,pins = < - 0x410 (PIN_INPUT | MUX_MODE0) /* i2c3_sda */ - 0x414 (PIN_INPUT | MUX_MODE0) /* i2c3_scl */ + 0x288 (PIN_INPUT | MUX_MODE9) /* gpio6_14.i2c3_sda */ + 0x28c (PIN_INPUT | MUX_MODE9) /* gpio6_15.i2c3_scl */ >; }; mcspi1_pins: pinmux_mcspi1_pins { pinctrl-single,pins = < - 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi2_clk */ - 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi2_d1 */ - 0x3ac (PIN_INPUT | MUX_MODE0) /* spi2_d0 */ - 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs0 */ - 0x3b4 (PIN_INPUT_SLEW | MUX_MODE0) /* spi2_cs1 */ - 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs2 */ - 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi2_cs3 */ + 0x3a4 (PIN_INPUT | MUX_MODE0) /* spi1_sclk */ + 0x3a8 (PIN_INPUT | MUX_MODE0) /* spi1_d1 */ + 0x3ac (PIN_INPUT | MUX_MODE0) /* spi1_d0 */ + 0x3b0 (PIN_INPUT_SLEW | MUX_MODE0) /* spi1_cs0 */ + 0x3b8 (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs2.hdmi1_hpd */ + 0x3bc (PIN_INPUT_SLEW | MUX_MODE6) /* spi1_cs3.hdmi1_cec */ >; }; @@ -284,7 +304,7 @@ status = "okay"; pinctrl-names = "default"; pinctrl-0 = <&i2c3_pins>; - clock-frequency = <3400000>; + clock-frequency = <400000>; }; &mcspi1 { @@ -483,7 +503,7 @@ reg = <0x001c0000 0x00020000>; }; partition@7 { - label = "NAND.u-boot-env"; + label = "NAND.u-boot-env.backup1"; reg = <0x001e0000 0x00020000>; }; partition@8 { @@ -504,3 +524,8 @@ &usb2_phy2 { phy-supply = <&ldousb_reg>; }; + +&gpio7 { + ti,no-reset-on-init; + ti,no-idle-on-init; +}; diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 97f603c..d678152 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -245,7 +245,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio2: gpio@48055000 { @@ -256,7 +256,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio3: gpio@48057000 { @@ -267,7 +267,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio4: gpio@48059000 { @@ -278,7 +278,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio5: gpio@4805b000 { @@ -289,7 +289,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio6: gpio@4805d000 { @@ -300,7 +300,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio7: gpio@48051000 { @@ -311,7 +311,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; gpio8: gpio@48053000 { @@ -322,7 +322,7 @@ gpio-controller; #gpio-cells = <2>; interrupt-controller; - #interrupt-cells = <1>; + #interrupt-cells = <2>; }; uart1: serial@4806a000 { diff --git a/arch/arm/boot/dts/imx53-qsrb.dts b/arch/arm/boot/dts/imx53-qsrb.dts index f1bbf9a..82d623d 100644 --- a/arch/arm/boot/dts/imx53-qsrb.dts +++ b/arch/arm/boot/dts/imx53-qsrb.dts @@ -28,6 +28,12 @@ MX53_PAD_CSI0_DAT9__I2C1_SCL 0x400001ec >; }; + + pinctrl_pmic: pmicgrp { + fsl,pins = < + MX53_PAD_CSI0_DAT5__GPIO5_23 0x1e4 /* IRQ */ + >; + }; }; }; @@ -38,6 +44,8 @@ pmic: mc34708@8 { compatible = "fsl,mc34708"; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_pmic>; reg = <0x08>; interrupt-parent = <&gpio5>; interrupts = <23 0x8>; diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts index c8e51dd..7159854 100644 --- a/arch/arm/boot/dts/imx6dl-hummingboard.dts +++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts @@ -58,7 +58,7 @@ sound-spdif { compatible = "fsl,imx-audio-spdif"; - model = "imx-spdif"; + model = "On-board SPDIF"; /* IMX6 doesn't implement this yet */ spdif-controller = <&spdif>; spdif-out; @@ -181,11 +181,13 @@ }; &usbh1 { + disable-over-current; vbus-supply = <®_usbh1_vbus>; status = "okay"; }; &usbotg { + disable-over-current; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>; vbus-supply = <®_usbotg_vbus>; diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi index e8e7816..6a524ca 100644 --- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi +++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi @@ -61,7 +61,7 @@ sound-spdif { compatible = "fsl,imx-audio-spdif"; - model = "imx-spdif"; + model = "Integrated SPDIF"; /* IMX6 doesn't implement this yet */ spdif-controller = <&spdif>; spdif-out; @@ -130,16 +130,23 @@ fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; }; + pinctrl_cubox_i_usbh1: cubox-i-usbh1 { + fsl,pins = <MX6QDL_PAD_GPIO_3__USB_H1_OC 0x1b0b0>; + }; + pinctrl_cubox_i_usbh1_vbus: cubox-i-usbh1-vbus { fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>; }; - pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id { + pinctrl_cubox_i_usbotg: cubox-i-usbotg { /* - * The Cubox-i pulls this low, but as it's pointless + * The Cubox-i pulls ID low, but as it's pointless * leaving it as a pull-up, even if it is just 10uA. */ - fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>; + fsl,pins = < + MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059 + MX6QDL_PAD_KEY_COL4__USB_OTG_OC 0x1b0b0 + >; }; pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus { @@ -173,13 +180,15 @@ }; &usbh1 { + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_cubox_i_usbh1>; vbus-supply = <®_usbh1_vbus>; status = "okay"; }; &usbotg { pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>; + pinctrl-0 = <&pinctrl_cubox_i_usbotg>; vbus-supply = <®_usbotg_vbus>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi index d160666..db9f45b 100644 --- a/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi +++ b/arch/arm/boot/dts/imx6qdl-microsom-ar8035.dtsi @@ -17,7 +17,7 @@ enet { pinctrl_microsom_enet_ar8035: microsom-enet-ar8035 { fsl,pins = < - MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b0b0 + MX6QDL_PAD_ENET_MDIO__ENET_MDIO 0x1b8b0 MX6QDL_PAD_ENET_MDC__ENET_MDC 0x1b0b0 /* AR8035 reset */ MX6QDL_PAD_KEY_ROW4__GPIO4_IO15 0x130b0 diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index 3c3e6da..a9aae88 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -292,6 +292,7 @@ &uart3 { pinctrl-names = "default"; pinctrl-0 = <&uart3_pins>; + interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>; }; &gpio1 { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index b15f1a7..4361777 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -93,7 +93,7 @@ }; tv: connector { - compatible = "composite-connector"; + compatible = "composite-video-connector"; label = "tv"; port { @@ -353,7 +353,7 @@ }; twl_power: power { - compatible = "ti,twl4030-power-n900"; + compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off"; ti,use_poweroff; }; }; diff --git a/arch/arm/boot/dts/omap3430-sdp.dts b/arch/arm/boot/dts/omap3430-sdp.dts index 02f69f4..9bad94e 100644 --- a/arch/arm/boot/dts/omap3430-sdp.dts +++ b/arch/arm/boot/dts/omap3430-sdp.dts @@ -107,7 +107,7 @@ #address-cells = <1>; #size-cells = <1>; reg = <1 0 0x08000000>; - ti,nand-ecc-opt = "ham1"; + ti,nand-ecc-opt = "sw"; nand-bus-width = <8>; gpmc,cs-on-ns = <0>; gpmc,cs-rd-off-ns = <36>; diff --git a/arch/arm/boot/dts/omap3xxx-clocks.dtsi b/arch/arm/boot/dts/omap3xxx-clocks.dtsi index e47ff69..5c37500 100644 --- a/arch/arm/boot/dts/omap3xxx-clocks.dtsi +++ b/arch/arm/boot/dts/omap3xxx-clocks.dtsi @@ -467,6 +467,7 @@ ti,bit-shift = <0x1e>; reg = <0x0d00>; ti,set-bit-to-disable; + ti,set-rate-parent; }; dpll4_m6_ck: dpll4_m6_ck { diff --git a/arch/arm/boot/dts/omap54xx-clocks.dtsi b/arch/arm/boot/dts/omap54xx-clocks.dtsi index e67a23b..58c2746 100644 --- a/arch/arm/boot/dts/omap54xx-clocks.dtsi +++ b/arch/arm/boot/dts/omap54xx-clocks.dtsi @@ -367,10 +367,12 @@ l3_iclk_div: l3_iclk_div { #clock-cells = <0>; - compatible = "fixed-factor-clock"; + compatible = "ti,divider-clock"; + ti,max-div = <2>; + ti,bit-shift = <4>; + reg = <0x100>; clocks = <&dpll_core_h12x2_ck>; - clock-mult = <1>; - clock-div = <1>; + ti,index-power-of-two; }; gpu_l3_iclk: gpu_l3_iclk { @@ -383,10 +385,12 @@ l4_root_clk_div: l4_root_clk_div { #clock-cells = <0>; - compatible = "fixed-factor-clock"; + compatible = "ti,divider-clock"; + ti,max-div = <2>; + ti,bit-shift = <8>; + reg = <0x100>; clocks = <&l3_iclk_div>; - clock-mult = <1>; - clock-div = <1>; + ti,index-power-of-two; }; slimbus1_slimbus_clk: slimbus1_slimbus_clk { diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index 4a2000c..3e97a66 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -116,7 +116,6 @@ msp2: msp@80117000 { pinctrl-names = "default"; pinctrl-0 = <&msp2_default_mode>; - status = "okay"; }; msp3: msp@80125000 { diff --git a/arch/arm/boot/dts/twl6030.dtsi b/arch/arm/boot/dts/twl6030.dtsi index 2e3bd31..55eb35f 100644 --- a/arch/arm/boot/dts/twl6030.dtsi +++ b/arch/arm/boot/dts/twl6030.dtsi @@ -83,10 +83,6 @@ regulator-always-on; }; - clk32kg: regulator-clk32kg { - compatible = "ti,twl6030-clk32kg"; - }; - twl_usb_comparator: usb-comparator { compatible = "ti,twl6030-usb"; interrupts = <4>, <10>; diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c index 8809917..d86771a 100644 --- a/arch/arm/common/edma.c +++ b/arch/arm/common/edma.c @@ -1443,14 +1443,14 @@ void edma_assign_channel_eventq(unsigned channel, enum dma_event_q eventq_no) EXPORT_SYMBOL(edma_assign_channel_eventq); static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, - struct edma *edma_cc) + struct edma *edma_cc, int cc_id) { int i; u32 value, cccfg; s8 (*queue_priority_map)[2]; /* Decode the eDMA3 configuration from CCCFG register */ - cccfg = edma_read(0, EDMA_CCCFG); + cccfg = edma_read(cc_id, EDMA_CCCFG); value = GET_NUM_REGN(cccfg); edma_cc->num_region = BIT(value); @@ -1464,7 +1464,8 @@ static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, value = GET_NUM_EVQUE(cccfg); edma_cc->num_tc = value + 1; - dev_dbg(dev, "eDMA3 HW configuration (cccfg: 0x%08x):\n", cccfg); + dev_dbg(dev, "eDMA3 CC%d HW configuration (cccfg: 0x%08x):\n", cc_id, + cccfg); dev_dbg(dev, "num_region: %u\n", edma_cc->num_region); dev_dbg(dev, "num_channel: %u\n", edma_cc->num_channels); dev_dbg(dev, "num_slot: %u\n", edma_cc->num_slots); @@ -1684,7 +1685,7 @@ static int edma_probe(struct platform_device *pdev) return -ENOMEM; /* Get eDMA3 configuration from IP */ - ret = edma_setup_from_hw(dev, info[j], edma_cc[j]); + ret = edma_setup_from_hw(dev, info[j], edma_cc[j], j); if (ret) return ret; diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h index fd43f7f..79ecb4f 100644 --- a/arch/arm/include/asm/cacheflush.h +++ b/arch/arm/include/asm/cacheflush.h @@ -472,7 +472,6 @@ static inline void __sync_cache_range_r(volatile void *p, size_t size) "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR \n\t" \ "isb \n\t" \ "bl v7_flush_dcache_"__stringify(level)" \n\t" \ - "clrex \n\t" \ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR \n\t" \ "bic r0, r0, #(1 << 6) @ disable local coherency \n\t" \ "mcr p15, 0, r0, c1, c0, 1 @ set ACTLR \n\t" \ diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h index 963a251..819777d 100644 --- a/arch/arm/include/asm/cputype.h +++ b/arch/arm/include/asm/cputype.h @@ -74,6 +74,7 @@ #define ARM_CPU_PART_CORTEX_A12 0x4100c0d0 #define ARM_CPU_PART_CORTEX_A17 0x4100c0e0 #define ARM_CPU_PART_CORTEX_A15 0x4100c0f0 +#define ARM_CPU_PART_MASK 0xff00fff0 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000 @@ -179,7 +180,7 @@ static inline unsigned int __attribute_const__ read_cpuid_implementor(void) */ static inline unsigned int __attribute_const__ read_cpuid_part(void) { - return read_cpuid_id() & 0xff00fff0; + return read_cpuid_id() & ARM_CPU_PART_MASK; } static inline unsigned int __attribute_const__ __deprecated read_cpuid_part_number(void) diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h index f4b46d3..afb9caf 100644 --- a/arch/arm/include/asm/elf.h +++ b/arch/arm/include/asm/elf.h @@ -50,6 +50,7 @@ typedef struct user_fp elf_fpregset_t; #define R_ARM_ABS32 2 #define R_ARM_CALL 28 #define R_ARM_JUMP24 29 +#define R_ARM_TARGET1 38 #define R_ARM_V4BX 40 #define R_ARM_PREL31 42 #define R_ARM_MOVW_ABS_NC 43 diff --git a/arch/arm/include/asm/smp_plat.h b/arch/arm/include/asm/smp_plat.h index a252c0b..0ad7d49 100644 --- a/arch/arm/include/asm/smp_plat.h +++ b/arch/arm/include/asm/smp_plat.h @@ -8,6 +8,7 @@ #include <linux/cpumask.h> #include <linux/err.h> +#include <asm/cpu.h> #include <asm/cputype.h> /* @@ -25,6 +26,20 @@ static inline bool is_smp(void) #endif } +/** + * smp_cpuid_part() - return part id for a given cpu + * @cpu: logical cpu id. + * + * Return: part id of logical cpu passed as argument. + */ +static inline unsigned int smp_cpuid_part(int cpu) +{ + struct cpuinfo_arm *cpu_info = &per_cpu(cpu_data, cpu); + + return is_smp() ? cpu_info->cpuid & ARM_CPU_PART_MASK : + read_cpuid_part(); +} + /* all SMP configurations have the extended CPUID registers */ #ifndef CONFIG_MMU #define tlb_ops_need_broadcast() 0 diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h index 1109017..e8275ea 100644 --- a/arch/arm/include/asm/xen/page-coherent.h +++ b/arch/arm/include/asm/xen/page-coherent.h @@ -26,25 +26,14 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page, __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); } -static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, +void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, size_t size, enum dma_data_direction dir, - struct dma_attrs *attrs) -{ - if (__generic_dma_ops(hwdev)->unmap_page) - __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs); -} + struct dma_attrs *attrs); -static inline void xen_dma_sync_single_for_cpu(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (__generic_dma_ops(hwdev)->sync_single_for_cpu) - __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir); -} +void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir); + +void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir); -static inline void xen_dma_sync_single_for_device(struct device *hwdev, - dma_addr_t handle, size_t size, enum dma_data_direction dir) -{ - if (__generic_dma_ops(hwdev)->sync_single_for_device) - __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir); -} #endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h index ded062f..135c24a 100644 --- a/arch/arm/include/asm/xen/page.h +++ b/arch/arm/include/asm/xen/page.h @@ -33,7 +33,6 @@ typedef struct xpaddr { #define INVALID_P2M_ENTRY (~0UL) unsigned long __pfn_to_mfn(unsigned long pfn); -unsigned long __mfn_to_pfn(unsigned long mfn); extern struct rb_root phys_to_mach; static inline unsigned long pfn_to_mfn(unsigned long pfn) @@ -51,14 +50,6 @@ static inline unsigned long pfn_to_mfn(unsigned long pfn) static inline unsigned long mfn_to_pfn(unsigned long mfn) { - unsigned long pfn; - - if (phys_to_mach.rb_node != NULL) { - pfn = __mfn_to_pfn(mfn); - if (pfn != INVALID_P2M_ENTRY) - return pfn; - } - return mfn; } diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S index 8db307d..2fdf867 100644 --- a/arch/arm/kernel/entry-header.S +++ b/arch/arm/kernel/entry-header.S @@ -208,26 +208,21 @@ #endif .endif msr spsr_cxsf, \rpsr -#if defined(CONFIG_CPU_V6) - ldr r0, [sp] - strex r1, r2, [sp] @ clear the exclusive monitor - ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr -#elif defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr -#else - ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) + @ We must avoid clrex due to Cortex-A15 erratum #830321 + sub r0, sp, #4 @ uninhabited address + strex r1, r2, [r0] @ clear the exclusive monitor #endif + ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr .endm .macro restore_user_regs, fast = 0, offset = 0 ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC]! @ get pc msr spsr_cxsf, r1 @ save in spsr_svc -#if defined(CONFIG_CPU_V6) +#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_32v6K) + @ We must avoid clrex due to Cortex-A15 erratum #830321 strex r1, r2, [sp] @ clear the exclusive monitor -#elif defined(CONFIG_CPU_32v6K) - clrex @ clear the exclusive monitor #endif .if \fast ldmdb sp, {r1 - lr}^ @ get calling r1 - lr @@ -261,7 +256,10 @@ .endif ldr lr, [sp, #S_SP] @ top of the stack ldrd r0, r1, [sp, #S_LR] @ calling lr and pc - clrex @ clear the exclusive monitor + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r2, r1, [sp, #S_LR] @ clear the exclusive monitor + stmdb lr!, {r0, r1, \rpsr} @ calling lr and rfe context ldmia sp, {r0 - r12} mov sp, lr @@ -282,13 +280,16 @@ .endm #else /* ifdef CONFIG_CPU_V7M */ .macro restore_user_regs, fast = 0, offset = 0 - clrex @ clear the exclusive monitor mov r2, sp load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr lr, [sp, #\offset + S_PC] @ get pc add sp, sp, #\offset + S_SP msr spsr_cxsf, r1 @ save in spsr_svc + + @ We must avoid clrex due to Cortex-A15 erratum #830321 + strex r1, r2, [sp] @ clear the exclusive monitor + .if \fast ldmdb sp, {r1 - r12} @ get calling r1 - r12 .else diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c index 45e4781..6a4dffe 100644 --- a/arch/arm/kernel/module.c +++ b/arch/arm/kernel/module.c @@ -91,6 +91,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, break; case R_ARM_ABS32: + case R_ARM_TARGET1: *(u32 *)loc += sym->st_value; break; diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 4c979d4..a96a804 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -93,6 +93,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) else kvm_vcpu_block(vcpu); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; } diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S index 991415d..3988e72 100644 --- a/arch/arm/kvm/init.S +++ b/arch/arm/kvm/init.S @@ -99,6 +99,10 @@ __do_hyp_init: mrc p15, 0, r0, c10, c2, 1 mcr p15, 4, r0, c10, c2, 1 + @ Invalidate the stale TLBs from Bootloader + mcr p15, 4, r0, c8, c7, 0 @ TLBIALLH + dsb ish + @ Set the HSCTLR to: @ - ARM/THUMB exceptions: Kernel config (Thumb-2 kernel) @ - Endianness: Kernel config diff --git a/arch/arm/mach-at91/board-dt-rm9200.c b/arch/arm/mach-at91/board-dt-rm9200.c index 3a185fa..f4b6e91 100644 --- a/arch/arm/mach-at91/board-dt-rm9200.c +++ b/arch/arm/mach-at91/board-dt-rm9200.c @@ -14,6 +14,7 @@ #include <linux/gpio.h> #include <linux/of.h> #include <linux/of_irq.h> +#include <linux/clk-provider.h> #include <asm/setup.h> #include <asm/irq.h> @@ -35,13 +36,21 @@ static void __init at91rm9200_dt_init_irq(void) of_irq_init(irq_of_match); } +static void __init at91rm9200_dt_timer_init(void) +{ +#if defined(CONFIG_COMMON_CLK) + of_clk_init(NULL); +#endif + at91rm9200_timer_init(); +} + static const char *at91rm9200_dt_board_compat[] __initdata = { "atmel,at91rm9200", NULL }; DT_MACHINE_START(at91rm9200_dt, "Atmel AT91RM9200 (Device Tree)") - .init_time = at91rm9200_timer_init, + .init_time = at91rm9200_dt_timer_init, .map_io = at91_map_io, .handle_irq = at91_aic_handle_irq, .init_early = at91rm9200_dt_initialize, diff --git a/arch/arm/mach-bcm/Makefile b/arch/arm/mach-bcm/Makefile index 67c492a..b19a396 100644 --- a/arch/arm/mach-bcm/Makefile +++ b/arch/arm/mach-bcm/Makefile @@ -36,5 +36,4 @@ obj-$(CONFIG_ARCH_BCM_5301X) += bcm_5301x.o ifeq ($(CONFIG_ARCH_BRCMSTB),y) obj-y += brcmstb.o -obj-$(CONFIG_SMP) += headsmp-brcmstb.o platsmp-brcmstb.o endif diff --git a/arch/arm/mach-bcm/brcmstb.h b/arch/arm/mach-bcm/brcmstb.h deleted file mode 100644 index ec0c3d1..0000000 --- a/arch/arm/mach-bcm/brcmstb.h +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright (C) 2013-2014 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#ifndef __BRCMSTB_H__ -#define __BRCMSTB_H__ - -void brcmstb_secondary_startup(void); - -#endif /* __BRCMSTB_H__ */ diff --git a/arch/arm/mach-bcm/headsmp-brcmstb.S b/arch/arm/mach-bcm/headsmp-brcmstb.S deleted file mode 100644 index 199c1ea..0000000 --- a/arch/arm/mach-bcm/headsmp-brcmstb.S +++ /dev/null @@ -1,33 +0,0 @@ -/* - * SMP boot code for secondary CPUs - * Based on arch/arm/mach-tegra/headsmp.S - * - * Copyright (C) 2010 NVIDIA, Inc. - * Copyright (C) 2013-2014 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <asm/assembler.h> -#include <linux/linkage.h> -#include <linux/init.h> - - .section ".text.head", "ax" - -ENTRY(brcmstb_secondary_startup) - /* - * Ensure CPU is in a sane state by disabling all IRQs and switching - * into SVC mode. - */ - setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r0 - - bl v7_invalidate_l1 - b secondary_startup -ENDPROC(brcmstb_secondary_startup) diff --git a/arch/arm/mach-bcm/platsmp-brcmstb.c b/arch/arm/mach-bcm/platsmp-brcmstb.c deleted file mode 100644 index af780e9..0000000 --- a/arch/arm/mach-bcm/platsmp-brcmstb.c +++ /dev/null @@ -1,363 +0,0 @@ -/* - * Broadcom STB CPU SMP and hotplug support for ARM - * - * Copyright (C) 2013-2014 Broadcom Corporation - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation version 2. - * - * This program is distributed "as is" WITHOUT ANY WARRANTY of any - * kind, whether express or implied; without even the implied warranty - * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - */ - -#include <linux/delay.h> -#include <linux/errno.h> -#include <linux/init.h> -#include <linux/io.h> -#include <linux/of_address.h> -#include <linux/of_platform.h> -#include <linux/printk.h> -#include <linux/regmap.h> -#include <linux/smp.h> -#include <linux/mfd/syscon.h> -#include <linux/spinlock.h> - -#include <asm/cacheflush.h> -#include <asm/cp15.h> -#include <asm/mach-types.h> -#include <asm/smp_plat.h> - -#include "brcmstb.h" - -enum { - ZONE_MAN_CLKEN_MASK = BIT(0), - ZONE_MAN_RESET_CNTL_MASK = BIT(1), - ZONE_MAN_MEM_PWR_MASK = BIT(4), - ZONE_RESERVED_1_MASK = BIT(5), - ZONE_MAN_ISO_CNTL_MASK = BIT(6), - ZONE_MANUAL_CONTROL_MASK = BIT(7), - ZONE_PWR_DN_REQ_MASK = BIT(9), - ZONE_PWR_UP_REQ_MASK = BIT(10), - ZONE_BLK_RST_ASSERT_MASK = BIT(12), - ZONE_PWR_OFF_STATE_MASK = BIT(25), - ZONE_PWR_ON_STATE_MASK = BIT(26), - ZONE_DPG_PWR_STATE_MASK = BIT(28), - ZONE_MEM_PWR_STATE_MASK = BIT(29), - ZONE_RESET_STATE_MASK = BIT(31), - CPU0_PWR_ZONE_CTRL_REG = 1, - CPU_RESET_CONFIG_REG = 2, -}; - -static void __iomem *cpubiuctrl_block; -static void __iomem *hif_cont_block; -static u32 cpu0_pwr_zone_ctrl_reg; -static u32 cpu_rst_cfg_reg; -static u32 hif_cont_reg; - -#ifdef CONFIG_HOTPLUG_CPU -static DEFINE_PER_CPU_ALIGNED(int, per_cpu_sw_state); - -static int per_cpu_sw_state_rd(u32 cpu) -{ - sync_cache_r(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); - return per_cpu(per_cpu_sw_state, cpu); -} - -static void per_cpu_sw_state_wr(u32 cpu, int val) -{ - per_cpu(per_cpu_sw_state, cpu) = val; - dmb(); - sync_cache_w(SHIFT_PERCPU_PTR(&per_cpu_sw_state, per_cpu_offset(cpu))); - dsb_sev(); -} -#else -static inline void per_cpu_sw_state_wr(u32 cpu, int val) { } -#endif - -static void __iomem *pwr_ctrl_get_base(u32 cpu) -{ - void __iomem *base = cpubiuctrl_block + cpu0_pwr_zone_ctrl_reg; - base += (cpu_logical_map(cpu) * 4); - return base; -} - -static u32 pwr_ctrl_rd(u32 cpu) -{ - void __iomem *base = pwr_ctrl_get_base(cpu); - return readl_relaxed(base); -} - -static void pwr_ctrl_wr(u32 cpu, u32 val) -{ - void __iomem *base = pwr_ctrl_get_base(cpu); - writel(val, base); -} - -static void cpu_rst_cfg_set(u32 cpu, int set) -{ - u32 val; - val = readl_relaxed(cpubiuctrl_block + cpu_rst_cfg_reg); - if (set) - val |= BIT(cpu_logical_map(cpu)); - else - val &= ~BIT(cpu_logical_map(cpu)); - writel_relaxed(val, cpubiuctrl_block + cpu_rst_cfg_reg); -} - -static void cpu_set_boot_addr(u32 cpu, unsigned long boot_addr) -{ - const int reg_ofs = cpu_logical_map(cpu) * 8; - writel_relaxed(0, hif_cont_block + hif_cont_reg + reg_ofs); - writel_relaxed(boot_addr, hif_cont_block + hif_cont_reg + 4 + reg_ofs); -} - -static void brcmstb_cpu_boot(u32 cpu) -{ - pr_info("SMP: Booting CPU%d...\n", cpu); - - /* - * set the reset vector to point to the secondary_startup - * routine - */ - cpu_set_boot_addr(cpu, virt_to_phys(brcmstb_secondary_startup)); - - /* unhalt the cpu */ - cpu_rst_cfg_set(cpu, 0); -} - -static void brcmstb_cpu_power_on(u32 cpu) -{ - /* - * The secondary cores power was cut, so we must go through - * power-on initialization. - */ - u32 tmp; - - pr_info("SMP: Powering up CPU%d...\n", cpu); - - /* Request zone power up */ - pwr_ctrl_wr(cpu, ZONE_PWR_UP_REQ_MASK); - - /* Wait for the power up FSM to complete */ - do { - tmp = pwr_ctrl_rd(cpu); - } while (!(tmp & ZONE_PWR_ON_STATE_MASK)); - - per_cpu_sw_state_wr(cpu, 1); -} - -static int brcmstb_cpu_get_power_state(u32 cpu) -{ - int tmp = pwr_ctrl_rd(cpu); - return (tmp & ZONE_RESET_STATE_MASK) ? 0 : 1; -} - -#ifdef CONFIG_HOTPLUG_CPU - -static void brcmstb_cpu_die(u32 cpu) -{ - v7_exit_coherency_flush(all); - - /* Prevent all interrupts from reaching this CPU. */ - arch_local_irq_disable(); - - /* - * Final full barrier to ensure everything before this instruction has - * quiesced. - */ - isb(); - dsb(); - - per_cpu_sw_state_wr(cpu, 0); - - /* Sit and wait to die */ - wfi(); - - /* We should never get here... */ - panic("Spurious interrupt on CPU %d received!\n", cpu); -} - -static int brcmstb_cpu_kill(u32 cpu) -{ - u32 tmp; - - pr_info("SMP: Powering down CPU%d...\n", cpu); - - while (per_cpu_sw_state_rd(cpu)) - ; - - /* Program zone reset */ - pwr_ctrl_wr(cpu, ZONE_RESET_STATE_MASK | ZONE_BLK_RST_ASSERT_MASK | - ZONE_PWR_DN_REQ_MASK); - - /* Verify zone reset */ - tmp = pwr_ctrl_rd(cpu); - if (!(tmp & ZONE_RESET_STATE_MASK)) - pr_err("%s: Zone reset bit for CPU %d not asserted!\n", - __func__, cpu); - - /* Wait for power down */ - do { - tmp = pwr_ctrl_rd(cpu); - } while (!(tmp & ZONE_PWR_OFF_STATE_MASK)); - - /* Settle-time from Broadcom-internal DVT reference code */ - udelay(7); - - /* Assert reset on the CPU */ - cpu_rst_cfg_set(cpu, 1); - - return 1; -} - -#endif /* CONFIG_HOTPLUG_CPU */ - -static int __init setup_hifcpubiuctrl_regs(struct device_node *np) -{ - int rc = 0; - char *name; - struct device_node *syscon_np = NULL; - - name = "syscon-cpu"; - - syscon_np = of_parse_phandle(np, name, 0); - if (!syscon_np) { - pr_err("can't find phandle %s\n", name); - rc = -EINVAL; - goto cleanup; - } - - cpubiuctrl_block = of_iomap(syscon_np, 0); - if (!cpubiuctrl_block) { - pr_err("iomap failed for cpubiuctrl_block\n"); - rc = -EINVAL; - goto cleanup; - } - - rc = of_property_read_u32_index(np, name, CPU0_PWR_ZONE_CTRL_REG, - &cpu0_pwr_zone_ctrl_reg); - if (rc) { - pr_err("failed to read 1st entry from %s property (%d)\n", name, - rc); - rc = -EINVAL; - goto cleanup; - } - - rc = of_property_read_u32_index(np, name, CPU_RESET_CONFIG_REG, - &cpu_rst_cfg_reg); - if (rc) { - pr_err("failed to read 2nd entry from %s property (%d)\n", name, - rc); - rc = -EINVAL; - goto cleanup; - } - -cleanup: - if (syscon_np) - of_node_put(syscon_np); - - return rc; -} - -static int __init setup_hifcont_regs(struct device_node *np) -{ - int rc = 0; - char *name; - struct device_node *syscon_np = NULL; - - name = "syscon-cont"; - - syscon_np = of_parse_phandle(np, name, 0); - if (!syscon_np) { - pr_err("can't find phandle %s\n", name); - rc = -EINVAL; - goto cleanup; - } - - hif_cont_block = of_iomap(syscon_np, 0); - if (!hif_cont_block) { - pr_err("iomap failed for hif_cont_block\n"); - rc = -EINVAL; - goto cleanup; - } - - /* offset is at top of hif_cont_block */ - hif_cont_reg = 0; - -cleanup: - if (syscon_np) - of_node_put(syscon_np); - - return rc; -} - -static void __init brcmstb_cpu_ctrl_setup(unsigned int max_cpus) -{ - int rc; - struct device_node *np; - char *name; - - name = "brcm,brcmstb-smpboot"; - np = of_find_compatible_node(NULL, NULL, name); - if (!np) { - pr_err("can't find compatible node %s\n", name); - return; - } - - rc = setup_hifcpubiuctrl_regs(np); - if (rc) - return; - - rc = setup_hifcont_regs(np); - if (rc) - return; -} - -static DEFINE_SPINLOCK(boot_lock); - -static void brcmstb_secondary_init(unsigned int cpu) -{ - /* - * Synchronise with the boot thread. - */ - spin_lock(&boot_lock); - spin_unlock(&boot_lock); -} - -static int brcmstb_boot_secondary(unsigned int cpu, struct task_struct *idle) -{ - /* - * set synchronisation state between this boot processor - * and the secondary one - */ - spin_lock(&boot_lock); - - /* Bring up power to the core if necessary */ - if (brcmstb_cpu_get_power_state(cpu) == 0) - brcmstb_cpu_power_on(cpu); - - brcmstb_cpu_boot(cpu); - - /* - * now the secondary core is starting up let it run its - * calibrations, then wait for it to finish - */ - spin_unlock(&boot_lock); - - return 0; -} - -static struct smp_operations brcmstb_smp_ops __initdata = { - .smp_prepare_cpus = brcmstb_cpu_ctrl_setup, - .smp_secondary_init = brcmstb_secondary_init, - .smp_boot_secondary = brcmstb_boot_secondary, -#ifdef CONFIG_HOTPLUG_CPU - .cpu_kill = brcmstb_cpu_kill, - .cpu_die = brcmstb_cpu_die, -#endif -}; - -CPU_METHOD_OF_DECLARE(brcmstb_smp, "brcm,brahma-b15", &brcmstb_smp_ops); diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c index b2f8b60..dc9a764 100644 --- a/arch/arm/mach-exynos/mcpm-exynos.c +++ b/arch/arm/mach-exynos/mcpm-exynos.c @@ -43,7 +43,6 @@ "mcr p15, 0, r0, c1, c0, 0 @ set SCTLR\n\t" \ "isb\n\t"\ "bl v7_flush_dcache_"__stringify(level)"\n\t" \ - "clrex\n\t"\ "mrc p15, 0, r0, c1, c0, 1 @ get ACTLR\n\t" \ "bic r0, r0, #(1 << 6) @ disable local coherency\n\t" \ /* Dummy Load of a device register to avoid Erratum 799270 */ \ diff --git a/arch/arm/mach-omap2/board-flash.c b/arch/arm/mach-omap2/board-flash.c index e87f2a8..2d245c2 100644 --- a/arch/arm/mach-omap2/board-flash.c +++ b/arch/arm/mach-omap2/board-flash.c @@ -142,7 +142,7 @@ __init board_nand_init(struct mtd_partition *nand_parts, u8 nr_parts, u8 cs, board_nand_data.nr_parts = nr_parts; board_nand_data.devsize = nand_type; - board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_HW; + board_nand_data.ecc_opt = OMAP_ECC_HAM1_CODE_SW; gpmc_nand_init(&board_nand_data, gpmc_t); } #endif /* CONFIG_MTD_NAND_OMAP2 || CONFIG_MTD_NAND_OMAP2_MODULE */ diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c index 8897ad7..cb776431 100644 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ b/arch/arm/mach-omap2/gpmc-nand.c @@ -49,7 +49,8 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) return 0; /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ - if (ecc_opt == OMAP_ECC_HAM1_CODE_HW) + if (ecc_opt == OMAP_ECC_HAM1_CODE_HW || + ecc_opt == OMAP_ECC_HAM1_CODE_SW) return 1; else return 0; diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c index 8bc1338..2f97228 100644 --- a/arch/arm/mach-omap2/gpmc.c +++ b/arch/arm/mach-omap2/gpmc.c @@ -1207,8 +1207,7 @@ int gpmc_cs_program_settings(int cs, struct gpmc_settings *p) } } - if ((p->wait_on_read || p->wait_on_write) && - (p->wait_pin > gpmc_nr_waitpins)) { + if (p->wait_pin > gpmc_nr_waitpins) { pr_err("%s: invalid wait-pin (%d)\n", __func__, p->wait_pin); return -EINVAL; } @@ -1288,8 +1287,8 @@ void gpmc_read_settings_dt(struct device_node *np, struct gpmc_settings *p) p->wait_on_write = of_property_read_bool(np, "gpmc,wait-on-write"); if (!p->wait_on_read && !p->wait_on_write) - pr_warn("%s: read/write wait monitoring not enabled!\n", - __func__); + pr_debug("%s: rd/wr wait monitoring not enabled!\n", + __func__); } } @@ -1403,8 +1402,11 @@ static int gpmc_probe_nand_child(struct platform_device *pdev, pr_err("%s: ti,nand-ecc-opt not found\n", __func__); return -ENODEV; } - if (!strcmp(s, "ham1") || !strcmp(s, "sw") || - !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) + + if (!strcmp(s, "sw")) + gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_SW; + else if (!strcmp(s, "ham1") || + !strcmp(s, "hw") || !strcmp(s, "hw-romcode")) gpmc_nand_data->ecc_opt = OMAP_ECC_HAM1_CODE_HW; else if (!strcmp(s, "bch4")) diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c index d42022f..53841de 100644 --- a/arch/arm/mach-omap2/id.c +++ b/arch/arm/mach-omap2/id.c @@ -663,7 +663,7 @@ void __init dra7xxx_check_revision(void) default: /* Unknown default to latest silicon rev as default*/ - pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n", + pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%x)\n", __func__, idcode, hawkeye, rev); omap_revision = DRA752_REV_ES1_1; } diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c index 01ef59d..d22c30d 100644 --- a/arch/arm/mach-omap2/omap_device.c +++ b/arch/arm/mach-omap2/omap_device.c @@ -56,7 +56,7 @@ static void _add_clkdev(struct omap_device *od, const char *clk_alias, r = clk_get_sys(dev_name(&od->pdev->dev), clk_alias); if (!IS_ERR(r)) { - dev_warn(&od->pdev->dev, + dev_dbg(&od->pdev->dev, "alias %s already exists\n", clk_alias); clk_put(r); return; diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 6c074f3..8fd87a3 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -2185,6 +2185,8 @@ static int _enable(struct omap_hwmod *oh) oh->mux->pads_dynamic))) { omap_hwmod_mux(oh->mux, _HWMOD_STATE_ENABLED); _reconfigure_io_chain(); + } else if (oh->flags & HWMOD_FORCE_MSTANDBY) { + _reconfigure_io_chain(); } _add_initiator_dep(oh, mpu_oh); @@ -2291,6 +2293,8 @@ static int _idle(struct omap_hwmod *oh) if (oh->mux && oh->mux->pads_dynamic) { omap_hwmod_mux(oh->mux, _HWMOD_STATE_IDLE); _reconfigure_io_chain(); + } else if (oh->flags & HWMOD_FORCE_MSTANDBY) { + _reconfigure_io_chain(); } oh->_state = _HWMOD_STATE_IDLE; @@ -3345,6 +3349,9 @@ int __init omap_hwmod_register_links(struct omap_hwmod_ocp_if **ois) if (!ois) return 0; + if (ois[0] == NULL) /* Empty list */ + return 0; + if (!linkspace) { if (_alloc_linkspace(ois)) { pr_err("omap_hwmod: could not allocate link space\n"); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 2757abf..5684f11 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -35,6 +35,7 @@ #include "i2c.h" #include "mmc.h" #include "wd_timer.h" +#include "soc.h" /* Base offset for all DRA7XX interrupts external to MPUSS */ #define DRA7XX_IRQ_GIC_START 32 @@ -3261,7 +3262,6 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { &dra7xx_l4_per3__usb_otg_ss1, &dra7xx_l4_per3__usb_otg_ss2, &dra7xx_l4_per3__usb_otg_ss3, - &dra7xx_l4_per3__usb_otg_ss4, &dra7xx_l3_main_1__vcp1, &dra7xx_l4_per2__vcp1, &dra7xx_l3_main_1__vcp2, @@ -3270,8 +3270,26 @@ static struct omap_hwmod_ocp_if *dra7xx_hwmod_ocp_ifs[] __initdata = { NULL, }; +static struct omap_hwmod_ocp_if *dra74x_hwmod_ocp_ifs[] __initdata = { + &dra7xx_l4_per3__usb_otg_ss4, + NULL, +}; + +static struct omap_hwmod_ocp_if *dra72x_hwmod_ocp_ifs[] __initdata = { + NULL, +}; + int __init dra7xx_hwmod_init(void) { + int ret; + omap_hwmod_init(); - return omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs); + ret = omap_hwmod_register_links(dra7xx_hwmod_ocp_ifs); + + if (!ret && soc_is_dra74x()) + return omap_hwmod_register_links(dra74x_hwmod_ocp_ifs); + else if (!ret && soc_is_dra72x()) + return omap_hwmod_register_links(dra72x_hwmod_ocp_ifs); + + return ret; } diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h index 01ca808..4376f59 100644 --- a/arch/arm/mach-omap2/soc.h +++ b/arch/arm/mach-omap2/soc.h @@ -245,6 +245,8 @@ IS_AM_SUBCLASS(437x, 0x437) #define soc_is_omap54xx() 0 #define soc_is_omap543x() 0 #define soc_is_dra7xx() 0 +#define soc_is_dra74x() 0 +#define soc_is_dra72x() 0 #if defined(MULTI_OMAP2) # if defined(CONFIG_ARCH_OMAP2) @@ -393,7 +395,11 @@ IS_OMAP_TYPE(3430, 0x3430) #if defined(CONFIG_SOC_DRA7XX) #undef soc_is_dra7xx +#undef soc_is_dra74x +#undef soc_is_dra72x #define soc_is_dra7xx() (of_machine_is_compatible("ti,dra7")) +#define soc_is_dra74x() (of_machine_is_compatible("ti,dra74")) +#define soc_is_dra72x() (of_machine_is_compatible("ti,dra72")) #endif /* Various silicon revisions for omap2 */ diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c index 17435c1..126ddaf 100644 --- a/arch/arm/mach-shmobile/clock-r8a7790.c +++ b/arch/arm/mach-shmobile/clock-r8a7790.c @@ -183,8 +183,8 @@ enum { static struct clk div4_clks[DIV4_NR] = { [DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT), - [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1de0, CLK_ENABLE_ON_INIT), - [DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1de0, CLK_ENABLE_ON_INIT), + [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT), + [DIV4_SD1] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 0, 0x1df0, CLK_ENABLE_ON_INIT), }; /* DIV6 clocks */ diff --git a/arch/arm/mach-shmobile/clock-r8a7791.c b/arch/arm/mach-shmobile/clock-r8a7791.c index 10e193d..453b231 100644 --- a/arch/arm/mach-shmobile/clock-r8a7791.c +++ b/arch/arm/mach-shmobile/clock-r8a7791.c @@ -152,7 +152,7 @@ enum { static struct clk div4_clks[DIV4_NR] = { [DIV4_SDH] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 8, 0x0dff, CLK_ENABLE_ON_INIT), - [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1de0, CLK_ENABLE_ON_INIT), + [DIV4_SD0] = SH_CLK_DIV4(&pll1_clk, SDCKCR, 4, 0x1df0, CLK_ENABLE_ON_INIT), }; /* DIV6 clocks */ diff --git a/arch/arm/mach-shmobile/clock-sh73a0.c b/arch/arm/mach-shmobile/clock-sh73a0.c index d8c4048..02a6f45 100644 --- a/arch/arm/mach-shmobile/clock-sh73a0.c +++ b/arch/arm/mach-shmobile/clock-sh73a0.c @@ -644,7 +644,7 @@ static struct clk_lookup lookups[] = { CLKDEV_DEV_ID("sh-sci.5", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("e6cb0000.serial", &mstp_clks[MSTP207]), /* SCIFA5 */ CLKDEV_DEV_ID("sh-sci.8", &mstp_clks[MSTP206]), /* SCIFB */ - CLKDEV_DEV_ID("0xe6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */ + CLKDEV_DEV_ID("e6c3000.serial", &mstp_clks[MSTP206]), /* SCIFB */ CLKDEV_DEV_ID("sh-sci.0", &mstp_clks[MSTP204]), /* SCIFA0 */ CLKDEV_DEV_ID("e6c40000.serial", &mstp_clks[MSTP204]), /* SCIFA0 */ CLKDEV_DEV_ID("sh-sci.1", &mstp_clks[MSTP203]), /* SCIFA1 */ diff --git a/arch/arm/mach-vexpress/spc.c b/arch/arm/mach-vexpress/spc.c index 2c2754e..f61158c 100644 --- a/arch/arm/mach-vexpress/spc.c +++ b/arch/arm/mach-vexpress/spc.c @@ -426,9 +426,15 @@ static int ve_spc_populate_opps(uint32_t cluster) static int ve_init_opp_table(struct device *cpu_dev) { - int cluster = topology_physical_package_id(cpu_dev->id); - int idx, ret = 0, max_opp = info->num_opps[cluster]; - struct ve_spc_opp *opps = info->opps[cluster]; + int cluster; + int idx, ret = 0, max_opp; + struct ve_spc_opp *opps; + + cluster = topology_physical_package_id(cpu_dev->id); + cluster = cluster < 0 ? 0 : cluster; + + max_opp = info->num_opps[cluster]; + opps = info->opps[cluster]; for (idx = 0; idx < max_opp; idx++, opps++) { ret = dev_pm_opp_add(cpu_dev, opps->freq * 1000, opps->u_volt); @@ -537,6 +543,8 @@ static struct clk *ve_spc_clk_register(struct device *cpu_dev) spc->hw.init = &init; spc->cluster = topology_physical_package_id(cpu_dev->id); + spc->cluster = spc->cluster < 0 ? 0 : spc->cluster; + init.name = dev_name(cpu_dev); init.ops = &clk_spc_ops; init.flags = CLK_IS_ROOT | CLK_GET_RATE_NOCACHE; diff --git a/arch/arm/mm/abort-ev6.S b/arch/arm/mm/abort-ev6.S index 3815a82..8c48c5c 100644 --- a/arch/arm/mm/abort-ev6.S +++ b/arch/arm/mm/abort-ev6.S @@ -17,12 +17,6 @@ */ .align 5 ENTRY(v6_early_abort) -#ifdef CONFIG_CPU_V6 - sub r1, sp, #4 @ Get unused stack location - strex r0, r1, [r1] @ Clear the exclusive monitor -#elif defined(CONFIG_CPU_32v6K) - clrex -#endif mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR /* diff --git a/arch/arm/mm/abort-ev7.S b/arch/arm/mm/abort-ev7.S index 7033752..4812ad0 100644 --- a/arch/arm/mm/abort-ev7.S +++ b/arch/arm/mm/abort-ev7.S @@ -13,12 +13,6 @@ */ .align 5 ENTRY(v7_early_abort) - /* - * The effect of data aborts on on the exclusive access monitor are - * UNPREDICTABLE. Do a CLREX to clear the state - */ - clrex - mrc p15, 0, r1, c5, c0, 0 @ get FSR mrc p15, 0, r0, c6, c0, 0 @ get FAR diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile index 1296952..1f85bfe 100644 --- a/arch/arm/xen/Makefile +++ b/arch/arm/xen/Makefile @@ -1 +1 @@ -obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o +obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c index 98544c5..0e15f01 100644 --- a/arch/arm/xen/enlighten.c +++ b/arch/arm/xen/enlighten.c @@ -260,6 +260,12 @@ static int __init xen_guest_init(void) xen_domain_type = XEN_HVM_DOMAIN; xen_setup_features(); + + if (!xen_feature(XENFEAT_grant_map_identity)) { + pr_warn("Please upgrade your Xen.\n" + "If your platform has any non-coherent DMA devices, they won't work properly.\n"); + } + if (xen_feature(XENFEAT_dom0)) xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; else diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c new file mode 100644 index 0000000..3b99860 --- /dev/null +++ b/arch/arm/xen/mm32.c @@ -0,0 +1,202 @@ +#include <linux/cpu.h> +#include <linux/dma-mapping.h> +#include <linux/gfp.h> +#include <linux/highmem.h> + +#include <xen/features.h> + +static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt); +static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep); + +static int alloc_xen_mm32_scratch_page(int cpu) +{ + struct page *page; + unsigned long virt; + pmd_t *pmdp; + pte_t *ptep; + + if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL) + return 0; + + page = alloc_page(GFP_KERNEL); + if (page == NULL) { + pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu); + return -ENOMEM; + } + + virt = (unsigned long)__va(page_to_phys(page)); + pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt); + ptep = pte_offset_kernel(pmdp, virt); + + per_cpu(xen_mm32_scratch_virt, cpu) = virt; + per_cpu(xen_mm32_scratch_ptep, cpu) = ptep; + + return 0; +} + +static int xen_mm32_cpu_notify(struct notifier_block *self, + unsigned long action, void *hcpu) +{ + int cpu = (long)hcpu; + switch (action) { + case CPU_UP_PREPARE: + if (alloc_xen_mm32_scratch_page(cpu)) + return NOTIFY_BAD; + break; + default: + break; + } + return NOTIFY_OK; +} + +static struct notifier_block xen_mm32_cpu_notifier = { + .notifier_call = xen_mm32_cpu_notify, +}; + +static void* xen_mm32_remap_page(dma_addr_t handle) +{ + unsigned long virt = get_cpu_var(xen_mm32_scratch_virt); + pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep); + + *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL); + local_flush_tlb_kernel_page(virt); + + return (void*)virt; +} + +static void xen_mm32_unmap(void *vaddr) +{ + put_cpu_var(xen_mm32_scratch_virt); +} + + +/* functions called by SWIOTLB */ + +static void dma_cache_maint(dma_addr_t handle, unsigned long offset, + size_t size, enum dma_data_direction dir, + void (*op)(const void *, size_t, int)) +{ + unsigned long pfn; + size_t left = size; + + pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE; + offset %= PAGE_SIZE; + + do { + size_t len = left; + void *vaddr; + + if (!pfn_valid(pfn)) + { + /* Cannot map the page, we don't know its physical address. + * Return and hope for the best */ + if (!xen_feature(XENFEAT_grant_map_identity)) + return; + vaddr = xen_mm32_remap_page(handle) + offset; + op(vaddr, len, dir); + xen_mm32_unmap(vaddr - offset); + } else { + struct page *page = pfn_to_page(pfn); + + if (PageHighMem(page)) { + if (len + offset > PAGE_SIZE) + len = PAGE_SIZE - offset; + + if (cache_is_vipt_nonaliasing()) { + vaddr = kmap_atomic(page); + op(vaddr + offset, len, dir); + kunmap_atomic(vaddr); + } else { + vaddr = kmap_high_get(page); + if (vaddr) { + op(vaddr + offset, len, dir); + kunmap_high(page); + } + } + } else { + vaddr = page_address(page) + offset; + op(vaddr, len, dir); + } + } + + offset = 0; + pfn++; + left -= len; + } while (left); +} + +static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + /* Cannot use __dma_page_dev_to_cpu because we don't have a + * struct page for handle */ + + if (dir != DMA_TO_DEVICE) + outer_inv_range(handle, handle + size); + + dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area); +} + +static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir) +{ + + dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area); + + if (dir == DMA_FROM_DEVICE) { + outer_inv_range(handle, handle + size); + } else { + outer_clean_range(handle, handle + size); + } +} + +void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs) + +{ + if (!__generic_dma_ops(hwdev)->unmap_page) + return; + if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) + return; + + __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); +} + +void xen_dma_sync_single_for_cpu(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (!__generic_dma_ops(hwdev)->sync_single_for_cpu) + return; + __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir); +} + +void xen_dma_sync_single_for_device(struct device *hwdev, + dma_addr_t handle, size_t size, enum dma_data_direction dir) +{ + if (!__generic_dma_ops(hwdev)->sync_single_for_device) + return; + __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir); +} + +int __init xen_mm32_init(void) +{ + int cpu; + + if (!xen_initial_domain()) + return 0; + + register_cpu_notifier(&xen_mm32_cpu_notifier); + get_online_cpus(); + for_each_online_cpu(cpu) { + if (alloc_xen_mm32_scratch_page(cpu)) { + put_online_cpus(); + unregister_cpu_notifier(&xen_mm32_cpu_notifier); + return -ENOMEM; + } + } + put_online_cpus(); + + return 0; +} +arch_initcall(xen_mm32_init); diff --git a/arch/arm/xen/p2m.c b/arch/arm/xen/p2m.c index 97baf44..0548577 100644 --- a/arch/arm/xen/p2m.c +++ b/arch/arm/xen/p2m.c @@ -21,14 +21,12 @@ struct xen_p2m_entry { unsigned long pfn; unsigned long mfn; unsigned long nr_pages; - struct rb_node rbnode_mach; struct rb_node rbnode_phys; }; static rwlock_t p2m_lock; struct rb_root phys_to_mach = RB_ROOT; EXPORT_SYMBOL_GPL(phys_to_mach); -static struct rb_root mach_to_phys = RB_ROOT; static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) { @@ -41,8 +39,6 @@ static int xen_add_phys_to_mach_entry(struct xen_p2m_entry *new) parent = *link; entry = rb_entry(parent, struct xen_p2m_entry, rbnode_phys); - if (new->mfn == entry->mfn) - goto err_out; if (new->pfn == entry->pfn) goto err_out; @@ -88,64 +84,6 @@ unsigned long __pfn_to_mfn(unsigned long pfn) } EXPORT_SYMBOL_GPL(__pfn_to_mfn); -static int xen_add_mach_to_phys_entry(struct xen_p2m_entry *new) -{ - struct rb_node **link = &mach_to_phys.rb_node; - struct rb_node *parent = NULL; - struct xen_p2m_entry *entry; - int rc = 0; - - while (*link) { - parent = *link; - entry = rb_entry(parent, struct xen_p2m_entry, rbnode_mach); - - if (new->mfn == entry->mfn) - goto err_out; - if (new->pfn == entry->pfn) - goto err_out; - - if (new->mfn < entry->mfn) - link = &(*link)->rb_left; - else - link = &(*link)->rb_right; - } - rb_link_node(&new->rbnode_mach, parent, link); - rb_insert_color(&new->rbnode_mach, &mach_to_phys); - goto out; - -err_out: - rc = -EINVAL; - pr_warn("%s: cannot add pfn=%pa -> mfn=%pa: pfn=%pa -> mfn=%pa already exists\n", - __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); -out: - return rc; -} - -unsigned long __mfn_to_pfn(unsigned long mfn) -{ - struct rb_node *n = mach_to_phys.rb_node; - struct xen_p2m_entry *entry; - unsigned long irqflags; - - read_lock_irqsave(&p2m_lock, irqflags); - while (n) { - entry = rb_entry(n, struct xen_p2m_entry, rbnode_mach); - if (entry->mfn <= mfn && - entry->mfn + entry->nr_pages > mfn) { - read_unlock_irqrestore(&p2m_lock, irqflags); - return entry->pfn + (mfn - entry->mfn); - } - if (mfn < entry->mfn) - n = n->rb_left; - else - n = n->rb_right; - } - read_unlock_irqrestore(&p2m_lock, irqflags); - - return INVALID_P2M_ENTRY; -} -EXPORT_SYMBOL_GPL(__mfn_to_pfn); - int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, struct gnttab_map_grant_ref *kmap_ops, struct page **pages, unsigned int count) @@ -192,7 +130,6 @@ bool __set_phys_to_machine_multi(unsigned long pfn, p2m_entry = rb_entry(n, struct xen_p2m_entry, rbnode_phys); if (p2m_entry->pfn <= pfn && p2m_entry->pfn + p2m_entry->nr_pages > pfn) { - rb_erase(&p2m_entry->rbnode_mach, &mach_to_phys); rb_erase(&p2m_entry->rbnode_phys, &phys_to_mach); write_unlock_irqrestore(&p2m_lock, irqflags); kfree(p2m_entry); @@ -217,8 +154,7 @@ bool __set_phys_to_machine_multi(unsigned long pfn, p2m_entry->mfn = mfn; write_lock_irqsave(&p2m_lock, irqflags); - if ((rc = xen_add_phys_to_mach_entry(p2m_entry) < 0) || - (rc = xen_add_mach_to_phys_entry(p2m_entry) < 0)) { + if ((rc = xen_add_phys_to_mach_entry(p2m_entry)) < 0) { write_unlock_irqrestore(&p2m_lock, irqflags); return false; } diff --git a/arch/arm64/crypto/sha2-ce-glue.c b/arch/arm64/crypto/sha2-ce-glue.c index c294e67..ae67e88 100644 --- a/arch/arm64/crypto/sha2-ce-glue.c +++ b/arch/arm64/crypto/sha2-ce-glue.c @@ -150,7 +150,6 @@ static void sha2_finup(struct shash_desc *desc, const u8 *data, kernel_neon_begin_partial(28); sha2_ce_transform(blocks, data, sctx->state, NULL, len); kernel_neon_end(); - data += blocks * SHA256_BLOCK_SIZE; } static int sha224_finup(struct shash_desc *desc, const u8 *data, diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h index d064047..52b484b 100644 --- a/arch/arm64/include/asm/hw_breakpoint.h +++ b/arch/arm64/include/asm/hw_breakpoint.h @@ -79,7 +79,6 @@ static inline void decode_ctrl_reg(u32 reg, */ #define ARM_MAX_BRP 16 #define ARM_MAX_WRP 16 -#define ARM_MAX_HBP_SLOTS (ARM_MAX_BRP + ARM_MAX_WRP) /* Virtual debug register bases. */ #define AARCH64_DBG_REG_BVR 0 diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 3df21fe..286b1be 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -139,7 +139,7 @@ extern struct task_struct *cpu_switch_to(struct task_struct *prev, ((struct pt_regs *)(THREAD_START_SP + task_stack_page(p)) - 1) #define KSTK_EIP(tsk) ((unsigned long)task_pt_regs(tsk)->pc) -#define KSTK_ESP(tsk) ((unsigned long)task_pt_regs(tsk)->sp) +#define KSTK_ESP(tsk) user_stack_pointer(task_pt_regs(tsk)) /* * Prefetching support diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h index 501000f..41ed9e1 100644 --- a/arch/arm64/include/asm/ptrace.h +++ b/arch/arm64/include/asm/ptrace.h @@ -137,7 +137,7 @@ struct pt_regs { (!((regs)->pstate & PSR_F_BIT)) #define user_stack_pointer(regs) \ - (!compat_user_mode(regs)) ? ((regs)->sp) : ((regs)->compat_sp) + (!compat_user_mode(regs) ? (regs)->sp : (regs)->compat_sp) static inline unsigned long regs_return_value(struct pt_regs *regs) { diff --git a/arch/arm64/kernel/fpsimd.c b/arch/arm64/kernel/fpsimd.c index ad8aebb..3dca156 100644 --- a/arch/arm64/kernel/fpsimd.c +++ b/arch/arm64/kernel/fpsimd.c @@ -270,6 +270,7 @@ static int fpsimd_cpu_pm_notifier(struct notifier_block *self, case CPU_PM_ENTER: if (current->mm && !test_thread_flag(TIF_FOREIGN_FPSTATE)) fpsimd_save_state(¤t->thread.fpsimd_state); + this_cpu_write(fpsimd_last_state, NULL); break; case CPU_PM_EXIT: if (current->mm) diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S index bed0283..8730690 100644 --- a/arch/arm64/kernel/head.S +++ b/arch/arm64/kernel/head.S @@ -373,10 +373,6 @@ ENTRY(__boot_cpu_mode) .long 0 .popsection - .align 3 -2: .quad . - .quad PAGE_OFFSET - #ifdef CONFIG_SMP .align 3 1: .quad . diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 0f08dfd..dfa6e3e 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c @@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc) if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) return false; - if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) + if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { + affinity = cpu_online_mask; ret = true; + } - /* - * when using forced irq_set_affinity we must ensure that the cpu - * being offlined is not present in the affinity mask, it may be - * selected as the target CPU otherwise - */ - affinity = cpu_online_mask; c = irq_data_get_irq_chip(d); if (!c->irq_set_affinity) pr_debug("IRQ%u: unable to set affinity\n", d->irq); - else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) + else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) cpumask_copy(d->affinity, affinity); return ret; diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 422ebd6..6762ad7 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -24,6 +24,12 @@ u64 perf_reg_value(struct pt_regs *regs, int idx) return regs->compat_lr; } + if ((u32)idx == PERF_REG_ARM64_SP) + return regs->sp; + + if ((u32)idx == PERF_REG_ARM64_PC) + return regs->pc; + return regs->regs[idx]; } diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c index 1309d64..29d4869 100644 --- a/arch/arm64/kernel/process.c +++ b/arch/arm64/kernel/process.c @@ -230,9 +230,27 @@ void exit_thread(void) { } +static void tls_thread_flush(void) +{ + asm ("msr tpidr_el0, xzr"); + + if (is_compat_task()) { + current->thread.tp_value = 0; + + /* + * We need to ensure ordering between the shadow state and the + * hardware state, so that we don't corrupt the hardware state + * with a stale shadow state during context switch. + */ + barrier(); + asm ("msr tpidrro_el0, xzr"); + } +} + void flush_thread(void) { fpsimd_flush_thread(); + tls_thread_flush(); flush_ptrace_hw_breakpoint(current); } diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c index 70526cf..fe63ac5 100644 --- a/arch/arm64/kernel/ptrace.c +++ b/arch/arm64/kernel/ptrace.c @@ -87,7 +87,8 @@ static void ptrace_hbptriggered(struct perf_event *bp, break; } } - for (i = ARM_MAX_BRP; i < ARM_MAX_HBP_SLOTS && !bp; ++i) { + + for (i = 0; i < ARM_MAX_WRP; ++i) { if (current->thread.debug.hbp_watch[i] == bp) { info.si_errno = -((i << 1) + 1); break; @@ -662,8 +663,10 @@ static int compat_gpr_get(struct task_struct *target, kbuf += sizeof(reg); } else { ret = copy_to_user(ubuf, ®, sizeof(reg)); - if (ret) + if (ret) { + ret = -EFAULT; break; + } ubuf += sizeof(reg); } @@ -701,8 +704,10 @@ static int compat_gpr_set(struct task_struct *target, kbuf += sizeof(reg); } else { ret = copy_from_user(®, ubuf, sizeof(reg)); - if (ret) - return ret; + if (ret) { + ret = -EFAULT; + break; + } ubuf += sizeof(reg); } diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f6f0ccf..edb146d 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -78,6 +78,7 @@ unsigned int compat_elf_hwcap2 __read_mostly; #endif static const char *cpu_name; +static const char *machine_name; phys_addr_t __fdt_pointer __initdata; /* @@ -309,6 +310,8 @@ static void __init setup_machine_fdt(phys_addr_t dt_phys) while (true) cpu_relax(); } + + machine_name = of_flat_dt_get_machine_name(); } /* @@ -447,21 +450,10 @@ static int c_show(struct seq_file *m, void *v) { int i; - /* - * Dump out the common processor features in a single line. Userspace - * should read the hwcaps with getauxval(AT_HWCAP) rather than - * attempting to parse this. - */ - seq_puts(m, "features\t:"); - for (i = 0; hwcap_str[i]; i++) - if (elf_hwcap & (1 << i)) - seq_printf(m, " %s", hwcap_str[i]); - seq_puts(m, "\n\n"); + seq_printf(m, "Processor\t: %s rev %d (%s)\n", + cpu_name, read_cpuid_id() & 15, ELF_PLATFORM); for_each_online_cpu(i) { - struct cpuinfo_arm64 *cpuinfo = &per_cpu(cpu_data, i); - u32 midr = cpuinfo->reg_midr; - /* * glibc reads /proc/cpuinfo to determine the number of * online processors, looking for lines beginning with @@ -470,13 +462,25 @@ static int c_show(struct seq_file *m, void *v) #ifdef CONFIG_SMP seq_printf(m, "processor\t: %d\n", i); #endif - seq_printf(m, "implementer\t: 0x%02x\n", - MIDR_IMPLEMENTOR(midr)); - seq_printf(m, "variant\t\t: 0x%x\n", MIDR_VARIANT(midr)); - seq_printf(m, "partnum\t\t: 0x%03x\n", MIDR_PARTNUM(midr)); - seq_printf(m, "revision\t: 0x%x\n\n", MIDR_REVISION(midr)); } + /* dump out the processor features */ + seq_puts(m, "Features\t: "); + + for (i = 0; hwcap_str[i]; i++) + if (elf_hwcap & (1 << i)) + seq_printf(m, "%s ", hwcap_str[i]); + + seq_printf(m, "\nCPU implementer\t: 0x%02x\n", read_cpuid_id() >> 24); + seq_printf(m, "CPU architecture: AArch64\n"); + seq_printf(m, "CPU variant\t: 0x%x\n", (read_cpuid_id() >> 20) & 15); + seq_printf(m, "CPU part\t: 0x%03x\n", (read_cpuid_id() >> 4) & 0xfff); + seq_printf(m, "CPU revision\t: %d\n", read_cpuid_id() & 15); + + seq_puts(m, "\n"); + + seq_printf(m, "Hardware\t: %s\n", machine_name); + return 0; } diff --git a/arch/arm64/kernel/sys_compat.c b/arch/arm64/kernel/sys_compat.c index de2b022..dc47e53 100644 --- a/arch/arm64/kernel/sys_compat.c +++ b/arch/arm64/kernel/sys_compat.c @@ -79,6 +79,12 @@ long compat_arm_syscall(struct pt_regs *regs) case __ARM_NR_compat_set_tls: current->thread.tp_value = regs->regs[0]; + + /* + * Protect against register corruption from context switch. + * See comment in tls_thread_flush. + */ + barrier(); asm ("msr tpidrro_el0, %0" : : "r" (regs->regs[0])); return 0; diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index e28be51..34b8bd0 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -66,6 +66,8 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) else kvm_vcpu_block(vcpu); + kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); + return 1; } diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S index d968796..c319116 100644 --- a/arch/arm64/kvm/hyp-init.S +++ b/arch/arm64/kvm/hyp-init.S @@ -80,6 +80,10 @@ __do_hyp_init: msr mair_el2, x4 isb + /* Invalidate the stale TLBs from Bootloader */ + tlbi alle2 + dsb sy + mrs x4, sctlr_el2 and x4, x4, #SCTLR_EL2_EE // preserve endianness of EL2 ldr x5, =SCTLR_EL2_FLAGS diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5472c24..a83061f 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -149,8 +149,7 @@ void __init arm64_memblock_init(void) memblock_reserve(__virt_to_phys(initrd_start), initrd_end - initrd_start); #endif - if (!efi_enabled(EFI_MEMMAP)) - early_init_fdt_scan_reserved_mem(); + early_init_fdt_scan_reserved_mem(); /* 4GB maximum for 32-bit only capable devices */ if (IS_ENABLED(CONFIG_ZONE_DMA)) diff --git a/arch/hexagon/mm/cache.c b/arch/hexagon/mm/cache.c index fe14ccf..0c76c80 100644 --- a/arch/hexagon/mm/cache.c +++ b/arch/hexagon/mm/cache.c @@ -68,6 +68,7 @@ void flush_icache_range(unsigned long start, unsigned long end) ); local_irq_restore(flags); } +EXPORT_SYMBOL(flush_icache_range); void hexagon_clean_dcache_range(unsigned long start, unsigned long end) { diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 64aefb7..c84c88b 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -549,8 +549,6 @@ source "drivers/sn/Kconfig" config KEXEC bool "kexec system call" depends on !IA64_HP_SIM && (!SMP || HOTPLUG_CPU) - select CRYPTO - select CRYPTO_SHA256 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/ia64/include/uapi/asm/unistd.h b/arch/ia64/include/uapi/asm/unistd.h index 6a65bb7..18026b2 100644 --- a/arch/ia64/include/uapi/asm/unistd.h +++ b/arch/ia64/include/uapi/asm/unistd.h @@ -329,6 +329,6 @@ #define __NR_sched_getattr 1337 #define __NR_renameat2 1338 #define __NR_getrandom 1339 -#define __NR_memfd_create 1339 +#define __NR_memfd_create 1340 #endif /* _UAPI_ASM_IA64_UNISTD_H */ diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index 3ff8c9a..87b7c75 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -91,8 +91,6 @@ config MMU_SUN3 config KEXEC bool "kexec system call" depends on M68KCLASSIC - select CRYPTO - select CRYPTO_SHA256 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 1fcdd34..4ef7a54 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 352 +#define NR_syscalls 354 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 9cd82fb..b419c6b 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -357,5 +357,7 @@ #define __NR_sched_setattr 349 #define __NR_sched_getattr 350 #define __NR_renameat2 351 +#define __NR_getrandom 352 +#define __NR_memfd_create 353 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 501e102..05b46c2 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -372,4 +372,6 @@ ENTRY(sys_call_table) .long sys_sched_setattr .long sys_sched_getattr /* 350 */ .long sys_renameat2 + .long sys_getrandom + .long sys_memfd_create diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 40e1c1d..6feded3 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -127,7 +127,7 @@ config SECCOMP endmenu -menu "Advanced setup" +menu "Kernel features" config ADVANCED_OPTIONS bool "Prompt for advanced kernel configuration options" @@ -248,10 +248,10 @@ config MICROBLAZE_64K_PAGES endchoice -endmenu - source "mm/Kconfig" +endmenu + menu "Executable file formats" source "fs/Kconfig.binfmt" diff --git a/arch/microblaze/include/asm/entry.h b/arch/microblaze/include/asm/entry.h index b4a4cb1..596e485 100644 --- a/arch/microblaze/include/asm/entry.h +++ b/arch/microblaze/include/asm/entry.h @@ -15,6 +15,7 @@ #include <asm/percpu.h> #include <asm/ptrace.h> +#include <linux/linkage.h> /* * These are per-cpu variables required in entry.S, among other diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 0aa0057..59a89a6 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -98,13 +98,13 @@ static inline int access_ok(int type, const void __user *addr, if ((get_fs().seg < ((unsigned long)addr)) || (get_fs().seg < ((unsigned long)addr + size - 1))) { - pr_debug("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + pr_devel("ACCESS fail: %s at 0x%08x (size 0x%x), seg 0x%08x\n", type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, (u32)get_fs().seg); return 0; } ok: - pr_debug("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", + pr_devel("ACCESS OK: %s at 0x%08x (size 0x%x), seg 0x%08x\n", type ? "WRITE" : "READ ", (__force u32)addr, (u32)size, (u32)get_fs().seg); return 1; diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h index fd56a8f..ea4b233 100644 --- a/arch/microblaze/include/asm/unistd.h +++ b/arch/microblaze/include/asm/unistd.h @@ -38,6 +38,6 @@ #endif /* __ASSEMBLY__ */ -#define __NR_syscalls 381 +#define __NR_syscalls 387 #endif /* _ASM_MICROBLAZE_UNISTD_H */ diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index df51e78..574c430 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -546,6 +546,7 @@ config SGI_IP28 # select SYS_HAS_EARLY_PRINTK select SYS_SUPPORTS_64BIT_KERNEL select SYS_SUPPORTS_BIG_ENDIAN + select MIPS_L1_CACHE_SHIFT_7 help This is the SGI Indigo2 with R10000 processor. To compile a Linux kernel that runs on these, say Y here. @@ -2029,7 +2030,9 @@ config MIPS_CMP bool "MIPS CMP framework support (DEPRECATED)" depends on SYS_SUPPORTS_MIPS_CMP select MIPS_GIC_IPI + select SMP select SYNC_R4K + select SYS_SUPPORTS_SMP select WEAK_ORDERING default n help @@ -2396,8 +2399,6 @@ source "kernel/Kconfig.preempt" config KEXEC bool "Kexec system call" - select CRYPTO - select CRYPTO_SHA256 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/mips/Makefile b/arch/mips/Makefile index 9336509..bbac51e1 100644 --- a/arch/mips/Makefile +++ b/arch/mips/Makefile @@ -113,7 +113,16 @@ predef-le += -DMIPSEL -D_MIPSEL -D__MIPSEL -D__MIPSEL__ cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' && echo -EB $(undef-all) $(predef-be)) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += $(shell $(CC) -dumpmachine |grep -q 'mips.*el-.*' || echo -EL $(undef-all) $(predef-le)) -cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) +# For smartmips configurations, there are hundreds of warnings due to ISA overrides +# in assembly and header files. smartmips is only supported for MIPS32r1 onwards +# and there is no support for 64-bit. Various '.set mips2' or '.set mips3' or +# similar directives in the kernel will spam the build logs with the following warnings: +# Warning: the `smartmips' extension requires MIPS32 revision 1 or greater +# or +# Warning: the 64-bit MIPS architecture does not support the `smartmips' extension +# Pass -Wa,--no-warn to disable all assembler warnings until the kernel code has +# been fixed properly. +cflags-$(CONFIG_CPU_HAS_SMARTMIPS) += $(call cc-option,-msmartmips) -Wa,--no-warn cflags-$(CONFIG_CPU_MICROMIPS) += $(call cc-option,-mmicromips) cflags-$(CONFIG_SB1XXX_CORELIS) += $(call cc-option,-mno-sched-prolog) \ diff --git a/arch/mips/bcm63xx/irq.c b/arch/mips/bcm63xx/irq.c index 37eb2d1..b94bf44d 100644 --- a/arch/mips/bcm63xx/irq.c +++ b/arch/mips/bcm63xx/irq.c @@ -434,7 +434,7 @@ static void bcm63xx_init_irq(void) irq_stat_addr[0] += PERF_IRQSTAT_3368_REG; irq_mask_addr[0] += PERF_IRQMASK_3368_REG; irq_stat_addr[1] = 0; - irq_stat_addr[1] = 0; + irq_mask_addr[1] = 0; irq_bits = 32; ext_irq_count = 4; ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_3368; @@ -443,7 +443,7 @@ static void bcm63xx_init_irq(void) irq_stat_addr[0] += PERF_IRQSTAT_6328_REG(0); irq_mask_addr[0] += PERF_IRQMASK_6328_REG(0); irq_stat_addr[1] += PERF_IRQSTAT_6328_REG(1); - irq_stat_addr[1] += PERF_IRQMASK_6328_REG(1); + irq_mask_addr[1] += PERF_IRQMASK_6328_REG(1); irq_bits = 64; ext_irq_count = 4; is_ext_irq_cascaded = 1; diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c index b49c7ad..31903cf 100644 --- a/arch/mips/boot/compressed/decompress.c +++ b/arch/mips/boot/compressed/decompress.c @@ -13,6 +13,7 @@ #include <linux/types.h> #include <linux/kernel.h> +#include <linux/string.h> #include <asm/addrspace.h> diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h index d035298..51f80bd 100644 --- a/arch/mips/include/asm/cop2.h +++ b/arch/mips/include/asm/cop2.h @@ -16,8 +16,8 @@ extern void octeon_cop2_save(struct octeon_cop2_state *); extern void octeon_cop2_restore(struct octeon_cop2_state *); -#define cop2_save(r) octeon_cop2_save(r) -#define cop2_restore(r) octeon_cop2_restore(r) +#define cop2_save(r) octeon_cop2_save(&(r)->thread.cp2) +#define cop2_restore(r) octeon_cop2_restore(&(r)->thread.cp2) #define cop2_present 1 #define cop2_lazy_restore 1 @@ -26,26 +26,26 @@ extern void octeon_cop2_restore(struct octeon_cop2_state *); extern void nlm_cop2_save(struct nlm_cop2_state *); extern void nlm_cop2_restore(struct nlm_cop2_state *); -#define cop2_save(r) nlm_cop2_save(r) -#define cop2_restore(r) nlm_cop2_restore(r) + +#define cop2_save(r) nlm_cop2_save(&(r)->thread.cp2) +#define cop2_restore(r) nlm_cop2_restore(&(r)->thread.cp2) #define cop2_present 1 #define cop2_lazy_restore 0 #elif defined(CONFIG_CPU_LOONGSON3) -#define cop2_save(r) -#define cop2_restore(r) - #define cop2_present 1 #define cop2_lazy_restore 1 +#define cop2_save(r) do { (r); } while (0) +#define cop2_restore(r) do { (r); } while (0) #else #define cop2_present 0 #define cop2_lazy_restore 0 -#define cop2_save(r) -#define cop2_restore(r) +#define cop2_save(r) do { (r); } while (0) +#define cop2_restore(r) do { (r); } while (0) #endif enum cu2_ops { diff --git a/arch/mips/include/asm/mach-ip28/spaces.h b/arch/mips/include/asm/mach-ip28/spaces.h index 5d6a764..c4a9127 100644 --- a/arch/mips/include/asm/mach-ip28/spaces.h +++ b/arch/mips/include/asm/mach-ip28/spaces.h @@ -11,15 +11,8 @@ #ifndef _ASM_MACH_IP28_SPACES_H #define _ASM_MACH_IP28_SPACES_H -#define CAC_BASE _AC(0xa800000000000000, UL) - -#define HIGHMEM_START (~0UL) - #define PHYS_OFFSET _AC(0x20000000, UL) -#define UNCAC_BASE _AC(0xc0000000, UL) /* 0xa0000000 + PHYS_OFFSET */ -#define IO_BASE UNCAC_BASE - #include <asm/mach-generic/spaces.h> #endif /* _ASM_MACH_IP28_SPACES_H */ diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h index 5699ec3..3be8180 100644 --- a/arch/mips/include/asm/page.h +++ b/arch/mips/include/asm/page.h @@ -37,7 +37,7 @@ /* * This is used for calculating the real page sizes - * for FTLB or VTLB + FTLB confugrations. + * for FTLB or VTLB + FTLB configurations. */ static inline unsigned int page_size_ftlb(unsigned int mmuextdef) { @@ -223,7 +223,8 @@ static inline int pfn_valid(unsigned long pfn) #endif -#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys(kaddr))) +#define virt_to_page(kaddr) pfn_to_page(PFN_DOWN(virt_to_phys((void *) \ + (kaddr)))) extern int __virt_addr_valid(const volatile void *kaddr); #define virt_addr_valid(kaddr) \ diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h index 1e0f20a..eacf865 100644 --- a/arch/mips/include/asm/smp.h +++ b/arch/mips/include/asm/smp.h @@ -37,11 +37,6 @@ extern int __cpu_logical_map[NR_CPUS]; #define NO_PROC_ID (-1) -#define topology_physical_package_id(cpu) (cpu_data[cpu].package) -#define topology_core_id(cpu) (cpu_data[cpu].core) -#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) -#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) - #define SMP_RESCHEDULE_YOURSELF 0x1 /* XXX braindead */ #define SMP_CALL_FUNCTION 0x2 /* Octeon - Tell another core to flush its icache */ diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h index 495c104..b928b6f 100644 --- a/arch/mips/include/asm/switch_to.h +++ b/arch/mips/include/asm/switch_to.h @@ -92,7 +92,7 @@ do { \ KSTK_STATUS(prev) &= ~ST0_CU2; \ __c0_stat = read_c0_status(); \ write_c0_status(__c0_stat | ST0_CU2); \ - cop2_save(&prev->thread.cp2); \ + cop2_save(prev); \ write_c0_status(__c0_stat & ~ST0_CU2); \ } \ __clear_software_ll_bit(); \ @@ -111,7 +111,7 @@ do { \ (KSTK_STATUS(current) & ST0_CU2)) { \ __c0_stat = read_c0_status(); \ write_c0_status(__c0_stat | ST0_CU2); \ - cop2_restore(¤t->thread.cp2); \ + cop2_restore(current); \ write_c0_status(__c0_stat & ~ST0_CU2); \ } \ if (cpu_has_dsp) \ diff --git a/arch/mips/include/asm/topology.h b/arch/mips/include/asm/topology.h index 20ea485..3e307ec 100644 --- a/arch/mips/include/asm/topology.h +++ b/arch/mips/include/asm/topology.h @@ -9,5 +9,13 @@ #define __ASM_TOPOLOGY_H #include <topology.h> +#include <linux/smp.h> + +#ifdef CONFIG_SMP +#define topology_physical_package_id(cpu) (cpu_data[cpu].package) +#define topology_core_id(cpu) (cpu_data[cpu].core) +#define topology_core_cpumask(cpu) (&cpu_core_map[cpu]) +#define topology_thread_cpumask(cpu) (&cpu_sibling_map[cpu]) +#endif #endif /* __ASM_TOPOLOGY_H */ diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h index 9bc13ea..fdb4923 100644 --- a/arch/mips/include/uapi/asm/unistd.h +++ b/arch/mips/include/uapi/asm/unistd.h @@ -373,16 +373,18 @@ #define __NR_sched_getattr (__NR_Linux + 350) #define __NR_renameat2 (__NR_Linux + 351) #define __NR_seccomp (__NR_Linux + 352) +#define __NR_getrandom (__NR_Linux + 353) +#define __NR_memfd_create (__NR_Linux + 354) /* * Offset of the last Linux o32 flavoured syscall */ -#define __NR_Linux_syscalls 352 +#define __NR_Linux_syscalls 354 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ #define __NR_O32_Linux 4000 -#define __NR_O32_Linux_syscalls 352 +#define __NR_O32_Linux_syscalls 354 #if _MIPS_SIM == _MIPS_SIM_ABI64 @@ -703,16 +705,18 @@ #define __NR_sched_getattr (__NR_Linux + 310) #define __NR_renameat2 (__NR_Linux + 311) #define __NR_seccomp (__NR_Linux + 312) +#define __NR_getrandom (__NR_Linux + 313) +#define __NR_memfd_create (__NR_Linux + 314) /* * Offset of the last Linux 64-bit flavoured syscall */ -#define __NR_Linux_syscalls 312 +#define __NR_Linux_syscalls 314 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ #define __NR_64_Linux 5000 -#define __NR_64_Linux_syscalls 312 +#define __NR_64_Linux_syscalls 314 #if _MIPS_SIM == _MIPS_SIM_NABI32 @@ -1037,15 +1041,17 @@ #define __NR_sched_getattr (__NR_Linux + 314) #define __NR_renameat2 (__NR_Linux + 315) #define __NR_seccomp (__NR_Linux + 316) +#define __NR_getrandom (__NR_Linux + 317) +#define __NR_memfd_create (__NR_Linux + 318) /* * Offset of the last N32 flavoured syscall */ -#define __NR_Linux_syscalls 316 +#define __NR_Linux_syscalls 318 #endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ #define __NR_N32_Linux 6000 -#define __NR_N32_Linux_syscalls 316 +#define __NR_N32_Linux_syscalls 318 #endif /* _UAPI_ASM_UNISTD_H */ diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 992e184..50980bf3 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c @@ -71,8 +71,12 @@ machine_kexec(struct kimage *image) kexec_start_address = (unsigned long) phys_to_virt(image->start); - kexec_indirection_page = - (unsigned long) phys_to_virt(image->head & PAGE_MASK); + if (image->type == KEXEC_TYPE_DEFAULT) { + kexec_indirection_page = + (unsigned long) phys_to_virt(image->head & PAGE_MASK); + } else { + kexec_indirection_page = (unsigned long)&image->head; + } memcpy((void*)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index f93b4cb..744cd10 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S @@ -577,3 +577,5 @@ EXPORT(sys_call_table) PTR sys_sched_getattr /* 4350 */ PTR sys_renameat2 PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 03ebd99..002b1bc 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S @@ -432,4 +432,6 @@ EXPORT(sys_call_table) PTR sys_sched_getattr /* 5310 */ PTR sys_renameat2 PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create .size sys_call_table,.-sys_call_table diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index ebc9228..ca6cbbe 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S @@ -425,4 +425,6 @@ EXPORT(sysn32_call_table) PTR sys_sched_getattr PTR sys_renameat2 /* 6315 */ PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create .size sysn32_call_table,.-sysn32_call_table diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 25bb840..9e10d11 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S @@ -562,4 +562,6 @@ EXPORT(sys32_call_table) PTR sys_sched_getattr /* 4350 */ PTR sys_renameat2 PTR sys_seccomp + PTR sys_getrandom + PTR sys_memfd_create .size sys32_call_table,.-sys32_call_table diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c index 05a5661..9f7ecbd 100644 --- a/arch/mips/net/bpf_jit.c +++ b/arch/mips/net/bpf_jit.c @@ -793,6 +793,7 @@ static int build_body(struct jit_ctx *ctx) const struct sock_filter *inst; unsigned int i, off, load_order, condt; u32 k, b_off __maybe_unused; + int tmp; for (i = 0; i < prog->len; i++) { u16 code; @@ -1332,9 +1333,9 @@ jmp_cmp: case BPF_ANC | SKF_AD_PKTTYPE: ctx->flags |= SEEN_SKB; - off = pkt_type_offset(); + tmp = off = pkt_type_offset(); - if (off < 0) + if (tmp < 0) return -1; emit_load_byte(r_tmp, r_skb, off, ctx); /* Keep only the last 3 bits */ diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 6e75e20..1554a6f 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -321,6 +321,22 @@ source "fs/Kconfig" source "arch/parisc/Kconfig.debug" +config SECCOMP + def_bool y + prompt "Enable seccomp to safely compute untrusted bytecode" + ---help--- + This kernel feature is useful for number crunching applications + that may need to compute untrusted bytecode during their + execution. By using pipes or other transports made available to + the process as file descriptors supporting the read/write + syscalls, it's possible to isolate those applications in + their own address space using seccomp. Once seccomp is + enabled via prctl(PR_SET_SECCOMP), it cannot be disabled + and the task is only allowed to execute a few safe syscalls + defined by each seccomp mode. + + If unsure, say Y. Only embedded should say N here. + source "security/Kconfig" source "crypto/Kconfig" diff --git a/arch/parisc/hpux/sys_hpux.c b/arch/parisc/hpux/sys_hpux.c index d9dc6cd..e5c4da0 100644 --- a/arch/parisc/hpux/sys_hpux.c +++ b/arch/parisc/hpux/sys_hpux.c @@ -456,7 +456,7 @@ int hpux_sysfs(int opcode, unsigned long arg1, unsigned long arg2) } /* String could be altered by userspace after strlen_user() */ - fsname[len] = '\0'; + fsname[len - 1] = '\0'; printk(KERN_DEBUG "that is '%s' as (char *)\n", fsname); if ( !strcmp(fsname, "hfs") ) { diff --git a/arch/parisc/include/asm/seccomp.h b/arch/parisc/include/asm/seccomp.h new file mode 100644 index 0000000..015f788 --- /dev/null +++ b/arch/parisc/include/asm/seccomp.h @@ -0,0 +1,16 @@ +#ifndef _ASM_PARISC_SECCOMP_H +#define _ASM_PARISC_SECCOMP_H + +#include <linux/unistd.h> + +#define __NR_seccomp_read __NR_read +#define __NR_seccomp_write __NR_write +#define __NR_seccomp_exit __NR_exit +#define __NR_seccomp_sigreturn __NR_rt_sigreturn + +#define __NR_seccomp_read_32 __NR_read +#define __NR_seccomp_write_32 __NR_write +#define __NR_seccomp_exit_32 __NR_exit +#define __NR_seccomp_sigreturn_32 __NR_rt_sigreturn + +#endif /* _ASM_PARISC_SECCOMP_H */ diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h index 4b9b10c..a846118 100644 --- a/arch/parisc/include/asm/thread_info.h +++ b/arch/parisc/include/asm/thread_info.h @@ -60,6 +60,7 @@ struct thread_info { #define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ #define TIF_SINGLESTEP 9 /* single stepping? */ #define TIF_BLOCKSTEP 10 /* branch stepping? */ +#define TIF_SECCOMP 11 /* secure computing */ #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) @@ -70,11 +71,13 @@ struct thread_info { #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SINGLESTEP (1 << TIF_SINGLESTEP) #define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ _TIF_NEED_RESCHED) #define _TIF_SYSCALL_TRACE_MASK (_TIF_SYSCALL_TRACE | _TIF_SINGLESTEP | \ - _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT) + _TIF_BLOCKSTEP | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP) #ifdef CONFIG_64BIT # ifdef CONFIG_COMPAT diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 47e0e21..8667f18 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -830,8 +830,11 @@ #define __NR_sched_getattr (__NR_Linux + 335) #define __NR_utimes (__NR_Linux + 336) #define __NR_renameat2 (__NR_Linux + 337) +#define __NR_seccomp (__NR_Linux + 338) +#define __NR_getrandom (__NR_Linux + 339) +#define __NR_memfd_create (__NR_Linux + 340) -#define __NR_Linux_syscalls (__NR_renameat2 + 1) +#define __NR_Linux_syscalls (__NR_memfd_create + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/ptrace.c b/arch/parisc/kernel/ptrace.c index e842ee2..3bab724 100644 --- a/arch/parisc/kernel/ptrace.c +++ b/arch/parisc/kernel/ptrace.c @@ -270,6 +270,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) { long ret = 0; + /* Do the secure computing check first. */ + if (secure_computing(regs->gr[20])) { + /* seccomp failures shouldn't expose any additional code. */ + return -1; + } + if (test_thread_flag(TIF_SYSCALL_TRACE) && tracehook_report_syscall_entry(regs)) ret = -1L; diff --git a/arch/parisc/kernel/syscall.S b/arch/parisc/kernel/syscall.S index 8387860..7ef22e3 100644 --- a/arch/parisc/kernel/syscall.S +++ b/arch/parisc/kernel/syscall.S @@ -74,7 +74,7 @@ ENTRY(linux_gateway_page) /* ADDRESS 0xb0 to 0xb8, lws uses two insns for entry */ /* Light-weight-syscall entry must always be located at 0xb0 */ /* WARNING: Keep this number updated with table size changes */ -#define __NR_lws_entries (2) +#define __NR_lws_entries (3) lws_entry: gate lws_start, %r0 /* increase privilege */ @@ -502,7 +502,7 @@ lws_exit: /*************************************************** - Implementing CAS as an atomic operation: + Implementing 32bit CAS as an atomic operation: %r26 - Address to examine %r25 - Old value to check (old) @@ -659,6 +659,230 @@ cas_action: ASM_EXCEPTIONTABLE_ENTRY(2b-linux_gateway_page, 3b-linux_gateway_page) + /*************************************************** + New CAS implementation which uses pointers and variable size + information. The value pointed by old and new MUST NOT change + while performing CAS. The lock only protect the value at %r26. + + %r26 - Address to examine + %r25 - Pointer to the value to check (old) + %r24 - Pointer to the value to set (new) + %r23 - Size of the variable (0/1/2/3 for 8/16/32/64 bit) + %r28 - Return non-zero on failure + %r21 - Kernel error code + + %r21 has the following meanings: + + EAGAIN - CAS is busy, ldcw failed, try again. + EFAULT - Read or write failed. + + Scratch: r20, r22, r28, r29, r1, fr4 (32bit for 64bit CAS only) + + ****************************************************/ + + /* ELF32 Process entry path */ +lws_compare_and_swap_2: +#ifdef CONFIG_64BIT + /* Clip the input registers */ + depdi 0, 31, 32, %r26 + depdi 0, 31, 32, %r25 + depdi 0, 31, 32, %r24 + depdi 0, 31, 32, %r23 +#endif + + /* Check the validity of the size pointer */ + subi,>>= 4, %r23, %r0 + b,n lws_exit_nosys + + /* Jump to the functions which will load the old and new values into + registers depending on the their size */ + shlw %r23, 2, %r29 + blr %r29, %r0 + nop + + /* 8bit load */ +4: ldb 0(%sr3,%r25), %r25 + b cas2_lock_start +5: ldb 0(%sr3,%r24), %r24 + nop + nop + nop + nop + nop + + /* 16bit load */ +6: ldh 0(%sr3,%r25), %r25 + b cas2_lock_start +7: ldh 0(%sr3,%r24), %r24 + nop + nop + nop + nop + nop + + /* 32bit load */ +8: ldw 0(%sr3,%r25), %r25 + b cas2_lock_start +9: ldw 0(%sr3,%r24), %r24 + nop + nop + nop + nop + nop + + /* 64bit load */ +#ifdef CONFIG_64BIT +10: ldd 0(%sr3,%r25), %r25 +11: ldd 0(%sr3,%r24), %r24 +#else + /* Load new value into r22/r23 - high/low */ +10: ldw 0(%sr3,%r25), %r22 +11: ldw 4(%sr3,%r25), %r23 + /* Load new value into fr4 for atomic store later */ +12: flddx 0(%sr3,%r24), %fr4 +#endif + +cas2_lock_start: + /* Load start of lock table */ + ldil L%lws_lock_start, %r20 + ldo R%lws_lock_start(%r20), %r28 + + /* Extract four bits from r26 and hash lock (Bits 4-7) */ + extru %r26, 27, 4, %r20 + + /* Find lock to use, the hash is either one of 0 to + 15, multiplied by 16 (keep it 16-byte aligned) + and add to the lock table offset. */ + shlw %r20, 4, %r20 + add %r20, %r28, %r20 + + rsm PSW_SM_I, %r0 /* Disable interrupts */ + /* COW breaks can cause contention on UP systems */ + LDCW 0(%sr2,%r20), %r28 /* Try to acquire the lock */ + cmpb,<>,n %r0, %r28, cas2_action /* Did we get it? */ +cas2_wouldblock: + ldo 2(%r0), %r28 /* 2nd case */ + ssm PSW_SM_I, %r0 + b lws_exit /* Contended... */ + ldo -EAGAIN(%r0), %r21 /* Spin in userspace */ + + /* + prev = *addr; + if ( prev == old ) + *addr = new; + return prev; + */ + + /* NOTES: + This all works becuse intr_do_signal + and schedule both check the return iasq + and see that we are on the kernel page + so this process is never scheduled off + or is ever sent any signal of any sort, + thus it is wholly atomic from usrspaces + perspective + */ +cas2_action: + /* Jump to the correct function */ + blr %r29, %r0 + /* Set %r28 as non-zero for now */ + ldo 1(%r0),%r28 + + /* 8bit CAS */ +13: ldb,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +14: stb,ma %r24, 0(%sr3,%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 16bit CAS */ +15: ldh,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +16: sth,ma %r24, 0(%sr3,%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 32bit CAS */ +17: ldw,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +18: stw,ma %r24, 0(%sr3,%r26) + b cas2_end + copy %r0, %r28 + nop + nop + + /* 64bit CAS */ +#ifdef CONFIG_64BIT +19: ldd,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r25, %r0 + b,n cas2_end +20: std,ma %r24, 0(%sr3,%r26) + copy %r0, %r28 +#else + /* Compare first word */ +19: ldw,ma 0(%sr3,%r26), %r29 + sub,= %r29, %r22, %r0 + b,n cas2_end + /* Compare second word */ +20: ldw,ma 4(%sr3,%r26), %r29 + sub,= %r29, %r23, %r0 + b,n cas2_end + /* Perform the store */ +21: fstdx %fr4, 0(%sr3,%r26) + copy %r0, %r28 +#endif + +cas2_end: + /* Free lock */ + stw,ma %r20, 0(%sr2,%r20) + /* Enable interrupts */ + ssm PSW_SM_I, %r0 + /* Return to userspace, set no error */ + b lws_exit + copy %r0, %r21 + +22: + /* Error occurred on load or store */ + /* Free lock */ + stw %r20, 0(%sr2,%r20) + ssm PSW_SM_I, %r0 + ldo 1(%r0),%r28 + b lws_exit + ldo -EFAULT(%r0),%r21 /* set errno */ + nop + nop + nop + + /* Exception table entries, for the load and store, return EFAULT. + Each of the entries must be relocated. */ + ASM_EXCEPTIONTABLE_ENTRY(4b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(5b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(6b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(7b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(8b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(9b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(10b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(11b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(13b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(14b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(15b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(16b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(17b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(18b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(19b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(20b-linux_gateway_page, 22b-linux_gateway_page) +#ifndef CONFIG_64BIT + ASM_EXCEPTIONTABLE_ENTRY(12b-linux_gateway_page, 22b-linux_gateway_page) + ASM_EXCEPTIONTABLE_ENTRY(21b-linux_gateway_page, 22b-linux_gateway_page) +#endif + /* Make sure nothing else is placed on this page */ .align PAGE_SIZE END(linux_gateway_page) @@ -675,8 +899,9 @@ ENTRY(end_linux_gateway_page) /* Light-weight-syscall table */ /* Start of lws table. */ ENTRY(lws_table) - LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic compare and swap */ - LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic compare and swap */ + LWS_ENTRY(compare_and_swap32) /* 0 - ELF32 Atomic 32bit CAS */ + LWS_ENTRY(compare_and_swap64) /* 1 - ELF64 Atomic 32bit CAS */ + LWS_ENTRY(compare_and_swap_2) /* 2 - ELF32 Atomic 64bit CAS */ END(lws_table) /* End of lws table */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 84c5d3a..b563d9c 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -433,6 +433,9 @@ ENTRY_SAME(sched_getattr) /* 335 */ ENTRY_COMP(utimes) ENTRY_SAME(renameat2) + ENTRY_SAME(seccomp) + ENTRY_SAME(getrandom) + ENTRY_SAME(memfd_create) /* 340 */ /* Nothing yet */ diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index a577609f..4bc7b62 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -399,8 +399,6 @@ config PPC64_SUPPORTS_MEMORY_FAILURE config KEXEC bool "kexec system call" depends on (PPC_BOOK3S || FSL_BOOKE || (44x && !SMP)) - select CRYPTO - select CRYPTO_SHA256 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig index 4bee1a6..45fd06c 100644 --- a/arch/powerpc/configs/cell_defconfig +++ b/arch/powerpc/configs/cell_defconfig @@ -5,6 +5,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_FHANDLE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 diff --git a/arch/powerpc/configs/celleb_defconfig b/arch/powerpc/configs/celleb_defconfig index 6d7b22f..77d7bf3 100644 --- a/arch/powerpc/configs/celleb_defconfig +++ b/arch/powerpc/configs/celleb_defconfig @@ -5,6 +5,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=4 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y +CONFIG_FHANDLE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=15 diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig index 4b07bad..269d6e4 100644 --- a/arch/powerpc/configs/corenet64_smp_defconfig +++ b/arch/powerpc/configs/corenet64_smp_defconfig @@ -4,6 +4,7 @@ CONFIG_ALTIVEC=y CONFIG_SMP=y CONFIG_NR_CPUS=24 CONFIG_SYSVIPC=y +CONFIG_FHANDLE=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/powerpc/configs/g5_defconfig b/arch/powerpc/configs/g5_defconfig index 3c72fa6..7594c5a 100644 --- a/arch/powerpc/configs/g5_defconfig +++ b/arch/powerpc/configs/g5_defconfig @@ -5,6 +5,7 @@ CONFIG_NR_CPUS=4 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_BLK_DEV_INITRD=y diff --git a/arch/powerpc/configs/maple_defconfig b/arch/powerpc/configs/maple_defconfig index 95e545d..c8b6a9d 100644 --- a/arch/powerpc/configs/maple_defconfig +++ b/arch/powerpc/configs/maple_defconfig @@ -4,6 +4,7 @@ CONFIG_NR_CPUS=4 CONFIG_EXPERIMENTAL=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y # CONFIG_COMPAT_BRK is not set diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index cec044a..e5e7838 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig @@ -3,6 +3,7 @@ CONFIG_ALTIVEC=y CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_SYSVIPC=y +CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig index f26b267..f6c02f8 100644 --- a/arch/powerpc/configs/ppc64_defconfig +++ b/arch/powerpc/configs/ppc64_defconfig @@ -4,6 +4,7 @@ CONFIG_VSX=y CONFIG_SMP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_IRQ_DOMAIN_DEBUG=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 438e813..587f551 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig @@ -3,6 +3,7 @@ CONFIG_PPC_BOOK3E_64=y CONFIG_SMP=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_NO_HZ=y CONFIG_HIGH_RES_TIMERS=y CONFIG_TASKSTATS=y diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index fdee37f..2e637c8 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig @@ -5,6 +5,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=2 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_HIGH_RES_TIMERS=y CONFIG_BLK_DEV_INITRD=y CONFIG_RD_LZMA=y diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index a905063..50375f1 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig @@ -5,6 +5,7 @@ CONFIG_SMP=y CONFIG_NR_CPUS=2048 CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_IRQ_DOMAIN_DEBUG=y diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig index 58e3dbf..4428ee4 100644 --- a/arch/powerpc/configs/pseries_le_defconfig +++ b/arch/powerpc/configs/pseries_le_defconfig @@ -6,6 +6,7 @@ CONFIG_NR_CPUS=2048 CONFIG_CPU_LITTLE_ENDIAN=y CONFIG_SYSVIPC=y CONFIG_POSIX_MQUEUE=y +CONFIG_FHANDLE=y CONFIG_AUDIT=y CONFIG_AUDITSYSCALL=y CONFIG_IRQ_DOMAIN_DEBUG=y diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 279b80f..c0c61fa 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h @@ -47,6 +47,12 @@ STACK_FRAME_OVERHEAD + KERNEL_REDZONE_SIZE) #define STACK_FRAME_MARKER 12 +#if defined(_CALL_ELF) && _CALL_ELF == 2 +#define STACK_FRAME_MIN_SIZE 32 +#else +#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD +#endif + /* Size of dummy stack frame allocated when calling signal handler. */ #define __SIGNAL_FRAMESIZE 128 #define __SIGNAL_FRAMESIZE32 64 @@ -60,6 +66,7 @@ #define STACK_FRAME_REGS_MARKER ASM_CONST(0x72656773) #define STACK_INT_FRAME_SIZE (sizeof(struct pt_regs) + STACK_FRAME_OVERHEAD) #define STACK_FRAME_MARKER 2 +#define STACK_FRAME_MIN_SIZE STACK_FRAME_OVERHEAD /* Size of stack frame allocated when calling signal handler. */ #define __SIGNAL_FRAMESIZE 64 diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 542bc0f..7d8a6006 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -362,3 +362,6 @@ SYSCALL(ni_syscall) /* sys_kcmp */ SYSCALL_SPU(sched_setattr) SYSCALL_SPU(sched_getattr) SYSCALL_SPU(renameat2) +SYSCALL_SPU(seccomp) +SYSCALL_SPU(getrandom) +SYSCALL_SPU(memfd_create) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 5ce5552..4e9af3f 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include <uapi/asm/unistd.h> -#define __NR_syscalls 358 +#define __NR_syscalls 361 #define __NR__exit __NR_exit #define NR_syscalls __NR_syscalls diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 2d526f7..0688fc0 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -380,5 +380,8 @@ #define __NR_sched_setattr 355 #define __NR_sched_getattr 356 #define __NR_renameat2 357 +#define __NR_seccomp 358 +#define __NR_getrandom 359 +#define __NR_memfd_create 360 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index 72c20bb..79294c4 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -62,10 +62,10 @@ long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) } kvm->arch.hpt_cma_alloc = 0; - page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); + page = kvm_alloc_hpt(1ul << (order - PAGE_SHIFT)); if (page) { hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); - memset((void *)hpt, 0, (1 << order)); + memset((void *)hpt, 0, (1ul << order)); kvm->arch.hpt_cma_alloc = 1; } diff --git a/arch/powerpc/perf/callchain.c b/arch/powerpc/perf/callchain.c index 74d1e78..2396dda 100644 --- a/arch/powerpc/perf/callchain.c +++ b/arch/powerpc/perf/callchain.c @@ -35,7 +35,7 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) return 0; /* must be 16-byte aligned */ if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) return 0; - if (sp >= prev_sp + STACK_FRAME_OVERHEAD) + if (sp >= prev_sp + STACK_FRAME_MIN_SIZE) return 1; /* * sp could decrease when we jump off an interrupt stack diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c index 97ac8dc..5e1ed15 100644 --- a/arch/powerpc/platforms/powernv/opal-hmi.c +++ b/arch/powerpc/platforms/powernv/opal-hmi.c @@ -28,6 +28,7 @@ #include <asm/opal.h> #include <asm/cputable.h> +#include <asm/machdep.h> static int opal_hmi_handler_nb_init; struct OpalHmiEvtNode { @@ -185,4 +186,4 @@ static int __init opal_hmi_handler_init(void) } return 0; } -subsys_initcall(opal_hmi_handler_init); +machine_subsys_initcall(powernv, opal_hmi_handler_init); diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index c904583..17ee193 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c @@ -113,7 +113,7 @@ out: static int pseries_remove_mem_node(struct device_node *np) { const char *type; - const unsigned int *regs; + const __be32 *regs; unsigned long base; unsigned int lmb_size; int ret = -EINVAL; @@ -132,8 +132,8 @@ static int pseries_remove_mem_node(struct device_node *np) if (!regs) return ret; - base = *(unsigned long *)regs; - lmb_size = regs[3]; + base = be64_to_cpu(*(unsigned long *)regs); + lmb_size = be32_to_cpu(regs[3]); pseries_remove_memblock(base, lmb_size); return 0; @@ -153,7 +153,7 @@ static inline int pseries_remove_mem_node(struct device_node *np) static int pseries_add_mem_node(struct device_node *np) { const char *type; - const unsigned int *regs; + const __be32 *regs; unsigned long base; unsigned int lmb_size; int ret = -EINVAL; @@ -172,8 +172,8 @@ static int pseries_add_mem_node(struct device_node *np) if (!regs) return ret; - base = *(unsigned long *)regs; - lmb_size = regs[3]; + base = be64_to_cpu(*(unsigned long *)regs); + lmb_size = be32_to_cpu(regs[3]); /* * Update memory region to represent the memory add @@ -187,14 +187,14 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) struct of_drconf_cell *new_drmem, *old_drmem; unsigned long memblock_size; u32 entries; - u32 *p; + __be32 *p; int i, rc = -EINVAL; memblock_size = pseries_memory_block_size(); if (!memblock_size) return -EINVAL; - p = (u32 *) pr->old_prop->value; + p = (__be32 *) pr->old_prop->value; if (!p) return -EINVAL; @@ -203,28 +203,30 @@ static int pseries_update_drconf_memory(struct of_prop_reconfig *pr) * entries. Get the niumber of entries and skip to the array of * of_drconf_cell's. */ - entries = *p++; + entries = be32_to_cpu(*p++); old_drmem = (struct of_drconf_cell *)p; - p = (u32 *)pr->prop->value; + p = (__be32 *)pr->prop->value; p++; new_drmem = (struct of_drconf_cell *)p; for (i = 0; i < entries; i++) { - if ((old_drmem[i].flags & DRCONF_MEM_ASSIGNED) && - (!(new_drmem[i].flags & DRCONF_MEM_ASSIGNED))) { - rc = pseries_remove_memblock(old_drmem[i].base_addr, + if ((be32_to_cpu(old_drmem[i].flags) & DRCONF_MEM_ASSIGNED) && + (!(be32_to_cpu(new_drmem[i].flags) & DRCONF_MEM_ASSIGNED))) { + rc = pseries_remove_memblock( + be64_to_cpu(old_drmem[i].base_addr), memblock_size); break; - } else if ((!(old_drmem[i].flags & DRCONF_MEM_ASSIGNED)) && - (new_drmem[i].flags & DRCONF_MEM_ASSIGNED)) { - rc = memblock_add(old_drmem[i].base_addr, + } else if ((!(be32_to_cpu(old_drmem[i].flags) & + DRCONF_MEM_ASSIGNED)) && + (be32_to_cpu(new_drmem[i].flags) & + DRCONF_MEM_ASSIGNED)) { + rc = memblock_add(be64_to_cpu(old_drmem[i].base_addr), memblock_size); rc = (rc < 0) ? -EINVAL : 0; break; } } - return rc; } diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ab39ceb8..05c78bb 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -48,8 +48,6 @@ config ARCH_SUPPORTS_DEBUG_PAGEALLOC config KEXEC def_bool y - select CRYPTO - select CRYPTO_SHA256 config AUDIT_ARCH def_bool y diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h index 2fcccc0..c81661e 100644 --- a/arch/s390/include/asm/ipl.h +++ b/arch/s390/include/asm/ipl.h @@ -17,12 +17,12 @@ #define IPL_PARM_BLK_FCP_LEN (sizeof(struct ipl_list_hdr) + \ sizeof(struct ipl_block_fcp)) -#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 8) +#define IPL_PARM_BLK0_FCP_LEN (sizeof(struct ipl_block_fcp) + 16) #define IPL_PARM_BLK_CCW_LEN (sizeof(struct ipl_list_hdr) + \ sizeof(struct ipl_block_ccw)) -#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 8) +#define IPL_PARM_BLK0_CCW_LEN (sizeof(struct ipl_block_ccw) + 16) #define IPL_MAX_SUPPORTED_VERSION (0) @@ -38,10 +38,11 @@ struct ipl_list_hdr { u8 pbt; u8 flags; u16 reserved2; + u8 loadparm[8]; } __attribute__((packed)); struct ipl_block_fcp { - u8 reserved1[313-1]; + u8 reserved1[305-1]; u8 opt; u8 reserved2[3]; u16 reserved3; @@ -62,7 +63,6 @@ struct ipl_block_fcp { offsetof(struct ipl_block_fcp, scp_data))) struct ipl_block_ccw { - u8 load_parm[8]; u8 reserved1[84]; u8 reserved2[2]; u16 devno; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index b76317c..5efb2fe 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1127,7 +1127,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { pgste_t pgste; - pte_t pte; + pte_t pte, oldpte; int young; if (mm_has_pgste(vma->vm_mm)) { @@ -1135,12 +1135,13 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste); } - pte = *ptep; + oldpte = pte = *ptep; ptep_flush_direct(vma->vm_mm, addr, ptep); young = pte_young(pte); pte = pte_mkold(pte); if (mm_has_pgste(vma->vm_mm)) { + pgste = pgste_update_all(&oldpte, pgste, vma->vm_mm); pgste = pgste_set_pte(ptep, pgste, pte); pgste_set_unlock(ptep, pgste); } else @@ -1330,6 +1331,7 @@ static inline int ptep_set_access_flags(struct vm_area_struct *vma, ptep_flush_direct(vma->vm_mm, address, ptep); if (mm_has_pgste(vma->vm_mm)) { + pgste_set_key(ptep, pgste, entry, vma->vm_mm); pgste = pgste_set_pte(ptep, pgste, entry); pgste_set_unlock(ptep, pgste); } else diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 3802d2d..940ac49 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -283,7 +283,10 @@ #define __NR_sched_setattr 345 #define __NR_sched_getattr 346 #define __NR_renameat2 347 -#define NR_syscalls 348 +#define __NR_seccomp 348 +#define __NR_getrandom 349 +#define __NR_memfd_create 350 +#define NR_syscalls 351 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index 45cdb37..faf6caa 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -214,3 +214,6 @@ COMPAT_SYSCALL_WRAP3(finit_module, int, fd, const char __user *, uargs, int, fla COMPAT_SYSCALL_WRAP3(sched_setattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, flags); COMPAT_SYSCALL_WRAP4(sched_getattr, pid_t, pid, struct sched_attr __user *, attr, unsigned int, size, unsigned int, flags); COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int, newdfd, const char __user *, newname, unsigned int, flags); +COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) +COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) +COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index 633ca75..39badb9 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -455,22 +455,6 @@ DEFINE_IPL_ATTR_RO(ipl_fcp, bootprog, "%lld\n", (unsigned long long) DEFINE_IPL_ATTR_RO(ipl_fcp, br_lba, "%lld\n", (unsigned long long) IPL_PARMBLOCK_START->ipl_info.fcp.br_lba); -static struct attribute *ipl_fcp_attrs[] = { - &sys_ipl_type_attr.attr, - &sys_ipl_device_attr.attr, - &sys_ipl_fcp_wwpn_attr.attr, - &sys_ipl_fcp_lun_attr.attr, - &sys_ipl_fcp_bootprog_attr.attr, - &sys_ipl_fcp_br_lba_attr.attr, - NULL, -}; - -static struct attribute_group ipl_fcp_attr_group = { - .attrs = ipl_fcp_attrs, -}; - -/* CCW ipl device attributes */ - static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) { @@ -487,6 +471,23 @@ static ssize_t ipl_ccw_loadparm_show(struct kobject *kobj, static struct kobj_attribute sys_ipl_ccw_loadparm_attr = __ATTR(loadparm, 0444, ipl_ccw_loadparm_show, NULL); +static struct attribute *ipl_fcp_attrs[] = { + &sys_ipl_type_attr.attr, + &sys_ipl_device_attr.attr, + &sys_ipl_fcp_wwpn_attr.attr, + &sys_ipl_fcp_lun_attr.attr, + &sys_ipl_fcp_bootprog_attr.attr, + &sys_ipl_fcp_br_lba_attr.attr, + &sys_ipl_ccw_loadparm_attr.attr, + NULL, +}; + +static struct attribute_group ipl_fcp_attr_group = { + .attrs = ipl_fcp_attrs, +}; + +/* CCW ipl device attributes */ + static struct attribute *ipl_ccw_attrs_vm[] = { &sys_ipl_type_attr.attr, &sys_ipl_device_attr.attr, @@ -765,28 +766,10 @@ DEFINE_IPL_ATTR_RW(reipl_fcp, br_lba, "%lld\n", "%lld\n", DEFINE_IPL_ATTR_RW(reipl_fcp, device, "0.0.%04llx\n", "0.0.%llx\n", reipl_block_fcp->ipl_info.fcp.devno); -static struct attribute *reipl_fcp_attrs[] = { - &sys_reipl_fcp_device_attr.attr, - &sys_reipl_fcp_wwpn_attr.attr, - &sys_reipl_fcp_lun_attr.attr, - &sys_reipl_fcp_bootprog_attr.attr, - &sys_reipl_fcp_br_lba_attr.attr, - NULL, -}; - -static struct attribute_group reipl_fcp_attr_group = { - .attrs = reipl_fcp_attrs, -}; - -/* CCW reipl device attributes */ - -DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", - reipl_block_ccw->ipl_info.ccw.devno); - static void reipl_get_ascii_loadparm(char *loadparm, struct ipl_parameter_block *ibp) { - memcpy(loadparm, ibp->ipl_info.ccw.load_parm, LOADPARM_LEN); + memcpy(loadparm, ibp->hdr.loadparm, LOADPARM_LEN); EBCASC(loadparm, LOADPARM_LEN); loadparm[LOADPARM_LEN] = 0; strim(loadparm); @@ -821,13 +804,50 @@ static ssize_t reipl_generic_loadparm_store(struct ipl_parameter_block *ipb, return -EINVAL; } /* initialize loadparm with blanks */ - memset(ipb->ipl_info.ccw.load_parm, ' ', LOADPARM_LEN); + memset(ipb->hdr.loadparm, ' ', LOADPARM_LEN); /* copy and convert to ebcdic */ - memcpy(ipb->ipl_info.ccw.load_parm, buf, lp_len); - ASCEBC(ipb->ipl_info.ccw.load_parm, LOADPARM_LEN); + memcpy(ipb->hdr.loadparm, buf, lp_len); + ASCEBC(ipb->hdr.loadparm, LOADPARM_LEN); return len; } +/* FCP wrapper */ +static ssize_t reipl_fcp_loadparm_show(struct kobject *kobj, + struct kobj_attribute *attr, char *page) +{ + return reipl_generic_loadparm_show(reipl_block_fcp, page); +} + +static ssize_t reipl_fcp_loadparm_store(struct kobject *kobj, + struct kobj_attribute *attr, + const char *buf, size_t len) +{ + return reipl_generic_loadparm_store(reipl_block_fcp, buf, len); +} + +static struct kobj_attribute sys_reipl_fcp_loadparm_attr = + __ATTR(loadparm, S_IRUGO | S_IWUSR, reipl_fcp_loadparm_show, + reipl_fcp_loadparm_store); + +static struct attribute *reipl_fcp_attrs[] = { + &sys_reipl_fcp_device_attr.attr, + &sys_reipl_fcp_wwpn_attr.attr, + &sys_reipl_fcp_lun_attr.attr, + &sys_reipl_fcp_bootprog_attr.attr, + &sys_reipl_fcp_br_lba_attr.attr, + &sys_reipl_fcp_loadparm_attr.attr, + NULL, +}; + +static struct attribute_group reipl_fcp_attr_group = { + .attrs = reipl_fcp_attrs, +}; + +/* CCW reipl device attributes */ + +DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n", + reipl_block_ccw->ipl_info.ccw.devno); + /* NSS wrapper */ static ssize_t reipl_nss_loadparm_show(struct kobject *kobj, struct kobj_attribute *attr, char *page) @@ -1125,11 +1145,10 @@ static void reipl_block_ccw_fill_parms(struct ipl_parameter_block *ipb) /* LOADPARM */ /* check if read scp info worked and set loadparm */ if (sclp_ipl_info.is_valid) - memcpy(ipb->ipl_info.ccw.load_parm, - &sclp_ipl_info.loadparm, LOADPARM_LEN); + memcpy(ipb->hdr.loadparm, &sclp_ipl_info.loadparm, LOADPARM_LEN); else /* read scp info failed: set empty loadparm (EBCDIC blanks) */ - memset(ipb->ipl_info.ccw.load_parm, 0x40, LOADPARM_LEN); + memset(ipb->hdr.loadparm, 0x40, LOADPARM_LEN); ipb->hdr.flags = DIAG308_FLAGS_LP_VALID; /* VM PARM */ @@ -1251,9 +1270,16 @@ static int __init reipl_fcp_init(void) return rc; } - if (ipl_info.type == IPL_TYPE_FCP) + if (ipl_info.type == IPL_TYPE_FCP) { memcpy(reipl_block_fcp, IPL_PARMBLOCK_START, PAGE_SIZE); - else { + /* + * Fix loadparm: There are systems where the (SCSI) LOADPARM + * is invalid in the SCSI IPL parameter block, so take it + * always from sclp_ipl_info. + */ + memcpy(reipl_block_fcp->hdr.loadparm, sclp_ipl_info.loadparm, + LOADPARM_LEN); + } else { reipl_block_fcp->hdr.len = IPL_PARM_BLK_FCP_LEN; reipl_block_fcp->hdr.version = IPL_PARM_BLOCK_VERSION; reipl_block_fcp->hdr.blk0_len = IPL_PARM_BLK0_FCP_LEN; @@ -1864,7 +1890,23 @@ static void __init shutdown_actions_init(void) static int __init s390_ipl_init(void) { + char str[8] = {0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40, 0x40}; + sclp_get_ipl_info(&sclp_ipl_info); + /* + * Fix loadparm: There are systems where the (SCSI) LOADPARM + * returned by read SCP info is invalid (contains EBCDIC blanks) + * when the system has been booted via diag308. In that case we use + * the value from diag308, if available. + * + * There are also systems where diag308 store does not work in + * case the system is booted from HMC. Fortunately in this case + * READ SCP info provides the correct value. + */ + if (memcmp(sclp_ipl_info.loadparm, str, sizeof(str)) == 0 && + diag308_set_works) + memcpy(sclp_ipl_info.loadparm, ipl_block.hdr.loadparm, + LOADPARM_LEN); shutdown_actions_init(); shutdown_triggers_init(); return 0; @@ -2060,6 +2102,13 @@ void s390_reset_system(void (*func)(void *), void *data) S390_lowcore.program_new_psw.addr = PSW_ADDR_AMODE | (unsigned long) s390_base_pgm_handler; + /* + * Clear subchannel ID and number to signal new kernel that no CCW or + * SCSI IPL has been done (for kexec and kdump) + */ + S390_lowcore.subchannel_id = 0; + S390_lowcore.subchannel_nr = 0; + /* Store status at absolute zero */ store_status(); diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c index ae1d5be..82bc113 100644 --- a/arch/s390/kernel/setup.c +++ b/arch/s390/kernel/setup.c @@ -24,6 +24,7 @@ #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> +#include <linux/random.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/ioport.h> @@ -61,6 +62,7 @@ #include <asm/diag.h> #include <asm/os_info.h> #include <asm/sclp.h> +#include <asm/sysinfo.h> #include "entry.h" /* @@ -766,6 +768,7 @@ static void __init setup_hwcaps(void) #endif get_cpu_id(&cpu_id); + add_device_randomness(&cpu_id, sizeof(cpu_id)); switch (cpu_id.machine) { case 0x9672: #if !defined(CONFIG_64BIT) @@ -804,6 +807,19 @@ static void __init setup_hwcaps(void) } /* + * Add system information as device randomness + */ +static void __init setup_randomness(void) +{ + struct sysinfo_3_2_2 *vmms; + + vmms = (struct sysinfo_3_2_2 *) alloc_page(GFP_KERNEL); + if (vmms && stsi(vmms, 3, 2, 2) == 0 && vmms->count) + add_device_randomness(&vmms, vmms->count); + free_page((unsigned long) vmms); +} + +/* * Setup function called from init/main.c just after the banner * was printed. */ @@ -901,6 +917,9 @@ void __init setup_arch(char **cmdline_p) /* Setup zfcpdump support */ setup_zfcpdump(); + + /* Add system specific data to the random pool */ + setup_randomness(); } #ifdef CONFIG_32BIT diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index fe5cdf2..6fe886a 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -356,3 +356,6 @@ SYSCALL(sys_finit_module,sys_finit_module,compat_sys_finit_module) SYSCALL(sys_sched_setattr,sys_sched_setattr,compat_sys_sched_setattr) /* 345 */ SYSCALL(sys_sched_getattr,sys_sched_getattr,compat_sys_sched_getattr) SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2) +SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) +SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) +SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S index 65fc397..7cf18f8 100644 --- a/arch/s390/kernel/vdso32/clock_gettime.S +++ b/arch/s390/kernel/vdso32/clock_gettime.S @@ -22,13 +22,11 @@ __kernel_clock_gettime: basr %r5,0 0: al %r5,21f-0b(%r5) /* get &_vdso_data */ chi %r2,__CLOCK_REALTIME - je 10f + je 11f chi %r2,__CLOCK_MONOTONIC jne 19f /* CLOCK_MONOTONIC */ - ltr %r3,%r3 - jz 9f /* tp == NULL */ 1: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 1b @@ -67,12 +65,10 @@ __kernel_clock_gettime: j 6b 8: st %r2,0(%r3) /* store tp->tv_sec */ st %r1,4(%r3) /* store tp->tv_nsec */ -9: lhi %r2,0 + lhi %r2,0 br %r14 /* CLOCK_REALTIME */ -10: ltr %r3,%r3 /* tp == NULL */ - jz 18f 11: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ tml %r4,0x0001 /* pending update ? loop */ jnz 11b @@ -111,7 +107,7 @@ __kernel_clock_gettime: j 15b 17: st %r2,0(%r3) /* store tp->tv_sec */ st %r1,4(%r3) /* store tp->tv_nsec */ -18: lhi %r2,0 + lhi %r2,0 br %r14 /* Fallback to system call */ diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S index 91940ed..3f34e09 100644 --- a/arch/s390/kernel/vdso64/clock_gettime.S +++ b/arch/s390/kernel/vdso64/clock_gettime.S @@ -21,7 +21,7 @@ __kernel_clock_gettime: .cfi_startproc larl %r5,_vdso_data cghi %r2,__CLOCK_REALTIME - je 4f + je 5f cghi %r2,__CLOCK_THREAD_CPUTIME_ID je 9f cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */ @@ -30,8 +30,6 @@ __kernel_clock_gettime: jne 12f /* CLOCK_MONOTONIC */ - ltgr %r3,%r3 - jz 3f /* tp == NULL */ 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 0b @@ -53,12 +51,10 @@ __kernel_clock_gettime: j 1b 2: stg %r0,0(%r3) /* store tp->tv_sec */ stg %r1,8(%r3) /* store tp->tv_nsec */ -3: lghi %r2,0 + lghi %r2,0 br %r14 /* CLOCK_REALTIME */ -4: ltr %r3,%r3 /* tp == NULL */ - jz 8f 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ tmll %r4,0x0001 /* pending update ? loop */ jnz 5b @@ -80,7 +76,7 @@ __kernel_clock_gettime: j 6b 7: stg %r0,0(%r3) /* store tp->tv_sec */ stg %r1,8(%r3) /* store tp->tv_nsec */ -8: lghi %r2,0 + lghi %r2,0 br %r14 /* CLOCK_THREAD_CPUTIME_ID for this thread */ diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index ce81eb2..81b0e11 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -1317,19 +1317,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) return -EINVAL; } - switch (kvm_run->exit_reason) { - case KVM_EXIT_S390_SIEIC: - case KVM_EXIT_UNKNOWN: - case KVM_EXIT_INTR: - case KVM_EXIT_S390_RESET: - case KVM_EXIT_S390_UCONTROL: - case KVM_EXIT_S390_TSCH: - case KVM_EXIT_DEBUG: - break; - default: - BUG(); - } - vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask; vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr; if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) { diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 19daa53..5404a62 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -986,11 +986,21 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep; down_read(&mm->mmap_sem); +retry: ptep = get_locked_pte(current->mm, addr, &ptl); if (unlikely(!ptep)) { up_read(&mm->mmap_sem); return -EFAULT; } + if (!(pte_val(*ptep) & _PAGE_INVALID) && + (pte_val(*ptep) & _PAGE_PROTECT)) { + pte_unmap_unlock(*ptep, ptl); + if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { + up_read(&mm->mmap_sem); + return -EFAULT; + } + goto retry; + } new = old = pgste_get_lock(ptep); pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT | diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index b319846..244fb4c 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -598,8 +598,6 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call (EXPERIMENTAL)" depends on SUPERH32 && MMU - select CRYPTO - select CRYPTO_SHA256 help kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 097c2cd..f770e39 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c @@ -229,6 +229,7 @@ void flush_icache_range(unsigned long start, unsigned long end) cacheop_on_each_cpu(local_flush_icache_range, (void *)&data, 1); } +EXPORT_SYMBOL(flush_icache_range); void flush_icache_page(struct vm_area_struct *vma, struct page *page) { diff --git a/arch/sh/mm/gup.c b/arch/sh/mm/gup.c index bf8daf9..37458f3 100644 --- a/arch/sh/mm/gup.c +++ b/arch/sh/mm/gup.c @@ -105,6 +105,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); + __flush_anon_page(page, addr); + flush_dcache_page(page); pages[*nr] = page; (*nr)++; diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index a3ffe2d..7fcd492 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -191,8 +191,6 @@ source "kernel/Kconfig.hz" config KEXEC bool "kexec system call" - select CRYPTO - select CRYPTO_SHA256 ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c index 01e8ab2..19eaa62 100644 --- a/arch/tile/kernel/smp.c +++ b/arch/tile/kernel/smp.c @@ -183,6 +183,7 @@ void flush_icache_range(unsigned long start, unsigned long end) preempt_enable(); } } +EXPORT_SYMBOL(flush_icache_range); /* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */ diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c index 780d773..7c8fb70 100644 --- a/arch/unicore32/kernel/signal.c +++ b/arch/unicore32/kernel/signal.c @@ -254,7 +254,8 @@ static int setup_frame(struct ksignal *ksig, sigset_t *set, err |= setup_sigframe(frame, regs, set); if (err == 0) - err |= setup_return(regs, &ksig->ka, frame->retcode, frame, usig); + err |= setup_return(regs, &ksig->ka, frame->retcode, frame, + ksig->sig); return err; } @@ -276,7 +277,8 @@ static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, err |= __save_altstack(&frame->sig.uc.uc_stack, regs->UCreg_sp); err |= setup_sigframe(&frame->sig, regs, set); if (err == 0) - err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame, usig); + err |= setup_return(regs, &ksig->ka, frame->sig.retcode, frame, + ksig->sig); if (err == 0) { /* @@ -303,7 +305,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs, int syscall) { struct thread_info *thread = current_thread_info(); - struct task_struct *tsk = current; sigset_t *oldset = sigmask_to_save(); int usig = ksig->sig; int ret; @@ -373,7 +374,7 @@ static void do_signal(struct pt_regs *regs, int syscall) if (!user_mode(regs)) return; - if (get_signsl(&ksig)) { + if (get_signal(&ksig)) { handle_signal(&ksig, regs, syscall); return; } diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild index 61b6d51..3942f74 100644 --- a/arch/x86/Kbuild +++ b/arch/x86/Kbuild @@ -17,6 +17,4 @@ obj-$(CONFIG_IA32_EMULATION) += ia32/ obj-y += platform/ obj-y += net/ -ifeq ($(CONFIG_X86_64),y) -obj-$(CONFIG_KEXEC) += purgatory/ -endif +obj-$(CONFIG_KEXEC_FILE) += purgatory/ diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 5d0bf1a..3632743 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -23,6 +23,7 @@ config X86 def_bool y select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS + select ARCH_HAS_FAST_MULTIPLIER select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO select HAVE_AOUT if X86_32 @@ -1585,9 +1586,6 @@ source kernel/Kconfig.hz config KEXEC bool "kexec system call" - select BUILD_BIN2C - select CRYPTO - select CRYPTO_SHA256 ---help--- kexec is a system call that implements the ability to shutdown your current kernel, and to start another kernel. It is like a reboot @@ -1602,9 +1600,22 @@ config KEXEC interface is strongly in flux, so no good recommendation can be made. +config KEXEC_FILE + bool "kexec file based system call" + select BUILD_BIN2C + depends on KEXEC + depends on X86_64 + depends on CRYPTO=y + depends on CRYPTO_SHA256=y + ---help--- + This is new version of kexec system call. This system call is + file based and takes file descriptors as system call argument + for kernel and initramfs as opposed to list of segments as + accepted by previous system call. + config KEXEC_VERIFY_SIG bool "Verify kernel signature during kexec_file_load() syscall" - depends on KEXEC + depends on KEXEC_FILE ---help--- This option makes kernel signature verification mandatory for kexec_file_load() syscall. If kernel is signature can not be diff --git a/arch/x86/Makefile b/arch/x86/Makefile index c1aa368..60087ca 100644 --- a/arch/x86/Makefile +++ b/arch/x86/Makefile @@ -184,11 +184,8 @@ archheaders: $(Q)$(MAKE) $(build)=arch/x86/syscalls all archprepare: -ifeq ($(CONFIG_KEXEC),y) -# Build only for 64bit. No loaders for 32bit yet. - ifeq ($(CONFIG_X86_64),y) +ifeq ($(CONFIG_KEXEC_FILE),y) $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c - endif endif ### @@ -254,6 +251,7 @@ archclean: $(Q)rm -rf $(objtree)/arch/x86_64 $(Q)$(MAKE) $(clean)=$(boot) $(Q)$(MAKE) $(clean)=arch/x86/tools + $(Q)$(MAKE) $(clean)=arch/x86/purgatory PHONY += kvmconfig kvmconfig: diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c index f277184..dca9842 100644 --- a/arch/x86/boot/compressed/eboot.c +++ b/arch/x86/boot/compressed/eboot.c @@ -1032,7 +1032,6 @@ struct boot_params *make_boot_params(struct efi_config *c) int i; unsigned long ramdisk_addr; unsigned long ramdisk_size; - unsigned long initrd_addr_max; efi_early = c; sys_table = (efi_system_table_t *)(unsigned long)efi_early->table; @@ -1095,15 +1094,20 @@ struct boot_params *make_boot_params(struct efi_config *c) memset(sdt, 0, sizeof(*sdt)); - if (hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) - initrd_addr_max = -1UL; - else - initrd_addr_max = hdr->initrd_addr_max; - status = handle_cmdline_files(sys_table, image, (char *)(unsigned long)hdr->cmd_line_ptr, - "initrd=", initrd_addr_max, + "initrd=", hdr->initrd_addr_max, &ramdisk_addr, &ramdisk_size); + + if (status != EFI_SUCCESS && + hdr->xloadflags & XLF_CAN_BE_LOADED_ABOVE_4G) { + efi_printk(sys_table, "Trying to load files to higher address\n"); + status = handle_cmdline_files(sys_table, image, + (char *)(unsigned long)hdr->cmd_line_ptr, + "initrd=", -1UL, + &ramdisk_addr, &ramdisk_size); + } + if (status != EFI_SUCCESS) goto fail2; hdr->ramdisk_image = ramdisk_addr & 0xffffffff; diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S index cbed140..d6b8aa4 100644 --- a/arch/x86/boot/compressed/head_32.S +++ b/arch/x86/boot/compressed/head_32.S @@ -30,6 +30,33 @@ #include <asm/boot.h> #include <asm/asm-offsets.h> +/* + * Adjust our own GOT + * + * The relocation base must be in %ebx + * + * It is safe to call this macro more than once, because in some of the + * code paths multiple invocations are inevitable, e.g. via the efi* + * entry points. + * + * Relocation is only performed the first time. + */ +.macro FIXUP_GOT + cmpb $1, got_fixed(%ebx) + je 2f + + leal _got(%ebx), %edx + leal _egot(%ebx), %ecx +1: + cmpl %ecx, %edx + jae 2f + addl %ebx, (%edx) + addl $4, %edx + jmp 1b +2: + movb $1, got_fixed(%ebx) +.endm + __HEAD ENTRY(startup_32) #ifdef CONFIG_EFI_STUB @@ -56,6 +83,9 @@ ENTRY(efi_pe_entry) add %esi, 88(%eax) pushl %eax + movl %esi, %ebx + FIXUP_GOT + call make_boot_params cmpl $0, %eax je fail @@ -81,6 +111,10 @@ ENTRY(efi32_stub_entry) leal efi32_config(%esi), %eax add %esi, 88(%eax) pushl %eax + + movl %esi, %ebx + FIXUP_GOT + 2: call efi_main cmpl $0, %eax @@ -190,19 +224,7 @@ relocated: shrl $2, %ecx rep stosl -/* - * Adjust our own GOT - */ - leal _got(%ebx), %edx - leal _egot(%ebx), %ecx -1: - cmpl %ecx, %edx - jae 2f - addl %ebx, (%edx) - addl $4, %edx - jmp 1b -2: - + FIXUP_GOT /* * Do the decompression, and jump to the new kernel.. */ @@ -225,8 +247,12 @@ relocated: xorl %ebx, %ebx jmp *%eax -#ifdef CONFIG_EFI_STUB .data +/* Have we relocated the GOT? */ +got_fixed: + .byte 0 + +#ifdef CONFIG_EFI_STUB efi32_config: .fill 11,8,0 .long efi_call_phys diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S index 2884e0c..50f69c7 100644 --- a/arch/x86/boot/compressed/head_64.S +++ b/arch/x86/boot/compressed/head_64.S @@ -32,6 +32,33 @@ #include <asm/processor-flags.h> #include <asm/asm-offsets.h> +/* + * Adjust our own GOT + * + * The relocation base must be in %rbx + * + * It is safe to call this macro more than once, because in some of the + * code paths multiple invocations are inevitable, e.g. via the efi* + * entry points. + * + * Relocation is only performed the first time. + */ +.macro FIXUP_GOT + cmpb $1, got_fixed(%rip) + je 2f + + leaq _got(%rip), %rdx + leaq _egot(%rip), %rcx +1: + cmpq %rcx, %rdx + jae 2f + addq %rbx, (%rdx) + addq $8, %rdx + jmp 1b +2: + movb $1, got_fixed(%rip) +.endm + __HEAD .code32 ENTRY(startup_32) @@ -252,10 +279,13 @@ ENTRY(efi_pe_entry) subq $1b, %rbp /* - * Relocate efi_config->call(). + * Relocate efi_config->call() and the GOT entries. */ addq %rbp, efi64_config+88(%rip) + movq %rbp, %rbx + FIXUP_GOT + movq %rax, %rdi call make_boot_params cmpq $0,%rax @@ -271,10 +301,13 @@ handover_entry: subq $1b, %rbp /* - * Relocate efi_config->call(). + * Relocate efi_config->call() and the GOT entries. */ movq efi_config(%rip), %rax addq %rbp, 88(%rax) + + movq %rbp, %rbx + FIXUP_GOT 2: movq efi_config(%rip), %rdi call efi_main @@ -385,19 +418,8 @@ relocated: shrq $3, %rcx rep stosq -/* - * Adjust our own GOT - */ - leaq _got(%rip), %rdx - leaq _egot(%rip), %rcx -1: - cmpq %rcx, %rdx - jae 2f - addq %rbx, (%rdx) - addq $8, %rdx - jmp 1b -2: - + FIXUP_GOT + /* * Do the decompression, and jump to the new kernel.. */ @@ -437,6 +459,10 @@ gdt: .quad 0x0000000000000000 /* TS continued */ gdt_end: +/* Have we relocated the GOT? */ +got_fixed: + .byte 0 + #ifdef CONFIG_EFI_STUB efi_config: .quad 0 diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h index afcd35d..cfe3b95 100644 --- a/arch/x86/include/asm/bitops.h +++ b/arch/x86/include/asm/bitops.h @@ -497,8 +497,6 @@ static __always_inline int fls64(__u64 x) #include <asm-generic/bitops/sched.h> -#define ARCH_HAS_FAST_MULTIPLIER 1 - #include <asm/arch_hweight.h> #include <asm-generic/bitops/const_hweight.h> diff --git a/arch/x86/include/asm/io_apic.h b/arch/x86/include/asm/io_apic.h index 0aeed5c..1733ab4 100644 --- a/arch/x86/include/asm/io_apic.h +++ b/arch/x86/include/asm/io_apic.h @@ -227,6 +227,8 @@ static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned extern void io_apic_eoi(unsigned int apic, unsigned int vector); +extern bool mp_should_keep_irq(struct device *dev); + #else /* !CONFIG_X86_IO_APIC */ #define io_apic_assign_pci_irqs 0 @@ -237,6 +239,7 @@ static inline int mp_find_ioapic(u32 gsi) { return 0; } static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; } static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; } static inline void mp_unmap_irq(int irq) { } +static inline bool mp_should_keep_irq(struct device *dev) { return 1; } static inline int save_ioapic_entries(void) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 0ec0560..aa97a07 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -131,8 +131,13 @@ static inline int pte_exec(pte_t pte) static inline int pte_special(pte_t pte) { - return (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_SPECIAL)) == - (_PAGE_PRESENT|_PAGE_SPECIAL); + /* + * See CONFIG_NUMA_BALANCING pte_numa in include/asm-generic/pgtable.h. + * On x86 we have _PAGE_BIT_NUMA == _PAGE_BIT_GLOBAL+1 == + * __PAGE_BIT_SOFTW1 == _PAGE_BIT_SPECIAL. + */ + return (pte_flags(pte) & _PAGE_SPECIAL) && + (pte_flags(pte) & (_PAGE_PRESENT|_PAGE_PROTNONE)); } static inline unsigned long pte_pfn(pte_t pte) diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h index 5be9063..3874693 100644 --- a/arch/x86/include/asm/pgtable_64.h +++ b/arch/x86/include/asm/pgtable_64.h @@ -19,6 +19,7 @@ extern pud_t level3_ident_pgt[512]; extern pmd_t level2_kernel_pgt[512]; extern pmd_t level2_fixmap_pgt[512]; extern pmd_t level2_ident_pgt[512]; +extern pte_t level1_fixmap_pgt[512]; extern pgd_t init_level4_pgt[]; #define swapper_pg_dir init_level4_pgt diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index b5ea75c..ada2e2d 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile @@ -71,6 +71,7 @@ obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o obj-$(CONFIG_X86_TSC) += trace_clock.o obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o +obj-$(CONFIG_KEXEC_FILE) += kexec-bzimage64.o obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o obj-y += kprobes/ obj-$(CONFIG_MODULES) += module.o @@ -118,5 +119,4 @@ ifeq ($(CONFIG_X86_64),y) obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o obj-y += vsmp_64.o - obj-$(CONFIG_KEXEC) += kexec-bzimage64.o endif diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 29290f5..337ce5a 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -1070,6 +1070,11 @@ static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin, } if (flags & IOAPIC_MAP_ALLOC) { + /* special handling for legacy IRQs */ + if (irq < nr_legacy_irqs() && info->count == 1 && + mp_irqdomain_map(domain, irq, pin) != 0) + irq = -1; + if (irq > 0) info->count++; else if (info->count == 0) @@ -3896,7 +3901,15 @@ int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq, info->polarity = 1; } info->node = NUMA_NO_NODE; - info->set = 1; + + /* + * setup_IO_APIC_irqs() programs all legacy IRQs with default + * trigger and polarity attributes. Don't set the flag for that + * case so the first legacy IRQ user could reprogram the pin + * with real trigger and polarity attributes. + */ + if (virq >= nr_legacy_irqs() || info->count) + info->set = 1; } set_io_apic_irq_attr(&attr, ioapic, hwirq, info->trigger, info->polarity); @@ -3946,6 +3959,18 @@ int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node) return ret; } +bool mp_should_keep_irq(struct device *dev) +{ + if (dev->power.is_prepared) + return true; +#ifdef CONFIG_PM_RUNTIME + if (dev->power.runtime_status == RPM_SUSPENDING) + return true; +#endif + + return false; +} + /* Enable IOAPIC early just for system timer */ void __init pre_init_apic_IRQ0(void) { diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c index 0553a34..a618fcd 100644 --- a/arch/x86/kernel/crash.c +++ b/arch/x86/kernel/crash.c @@ -182,8 +182,7 @@ void native_machine_crash_shutdown(struct pt_regs *regs) crash_save_cpu(regs, safe_smp_processor_id()); } -#ifdef CONFIG_X86_64 - +#ifdef CONFIG_KEXEC_FILE static int get_nr_ram_ranges_callback(unsigned long start_pfn, unsigned long nr_pfn, void *arg) { @@ -696,5 +695,4 @@ int crash_load_segments(struct kimage *image) return ret; } - -#endif /* CONFIG_X86_64 */ +#endif /* CONFIG_KEXEC_FILE */ diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index 1e6cff5..44f1ed4 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c @@ -203,7 +203,7 @@ void __init native_init_IRQ(void) set_intr_gate(i, interrupt[i - FIRST_EXTERNAL_VECTOR]); } - if (!acpi_ioapic && !of_ioapic) + if (!acpi_ioapic && !of_ioapic && nr_legacy_irqs()) setup_irq(2, &irq2); #ifdef CONFIG_X86_32 diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index f304773..f1314d0 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -338,8 +338,10 @@ int arch_prepare_optimized_kprobe(struct optimized_kprobe *op) * a relative jump. */ rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE; - if (abs(rel) > 0x7fffffff) + if (abs(rel) > 0x7fffffff) { + __arch_remove_optimized_kprobe(op, 0); return -ERANGE; + } buf = (u8 *)op->optinsn.insn; diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 8b04018..4859810 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -25,9 +25,11 @@ #include <asm/debugreg.h> #include <asm/kexec-bzimage64.h> +#ifdef CONFIG_KEXEC_FILE static struct kexec_file_ops *kexec_file_loaders[] = { &kexec_bzImage64_ops, }; +#endif static void free_transition_pgtable(struct kimage *image) { @@ -178,6 +180,7 @@ static void load_segments(void) ); } +#ifdef CONFIG_KEXEC_FILE /* Update purgatory as needed after various image segments have been prepared */ static int arch_update_purgatory(struct kimage *image) { @@ -209,6 +212,12 @@ static int arch_update_purgatory(struct kimage *image) return ret; } +#else /* !CONFIG_KEXEC_FILE */ +static inline int arch_update_purgatory(struct kimage *image) +{ + return 0; +} +#endif /* CONFIG_KEXEC_FILE */ int machine_kexec_prepare(struct kimage *image) { @@ -329,6 +338,7 @@ void arch_crash_save_vmcoreinfo(void) /* arch-dependent functionality related to kexec file-based syscall */ +#ifdef CONFIG_KEXEC_FILE int arch_kexec_kernel_image_probe(struct kimage *image, void *buf, unsigned long buf_len) { @@ -522,3 +532,4 @@ overflow: (int)ELF64_R_TYPE(rel[i].r_info), value); return -ENOEXEC; } +#endif /* CONFIG_KEXEC_FILE */ diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index bf7ef5c..0fa2960 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c @@ -68,6 +68,8 @@ static struct irqaction irq0 = { void __init setup_default_timer_irq(void) { + if (!nr_legacy_irqs()) + return; setup_irq(0, &irq0); } diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c index 167ffca..95a427e 100644 --- a/arch/x86/mm/dump_pagetables.c +++ b/arch/x86/mm/dump_pagetables.c @@ -48,7 +48,9 @@ enum address_markers_idx { LOW_KERNEL_NR, VMALLOC_START_NR, VMEMMAP_START_NR, +# ifdef CONFIG_X86_ESPFIX64 ESPFIX_START_NR, +# endif HIGH_KERNEL_NR, MODULES_VADDR_NR, MODULES_END_NR, @@ -71,7 +73,9 @@ static struct addr_marker address_markers[] = { { PAGE_OFFSET, "Low Kernel Mapping" }, { VMALLOC_START, "vmalloc() Area" }, { VMEMMAP_START, "Vmemmap" }, +# ifdef CONFIG_X86_ESPFIX64 { ESPFIX_BASE_ADDR, "ESPfix Area", 16 }, +# endif { __START_KERNEL_map, "High Kernel Mapping" }, { MODULES_VADDR, "Modules" }, { MODULES_END, "End Modules" }, diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c index 25e7e13..919b912 100644 --- a/arch/x86/mm/mmap.c +++ b/arch/x86/mm/mmap.c @@ -31,7 +31,7 @@ #include <linux/sched.h> #include <asm/elf.h> -struct __read_mostly va_alignment va_align = { +struct va_alignment __read_mostly va_align = { .flags = -1, }; diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c index 3865116..b9958c3 100644 --- a/arch/x86/pci/intel_mid_pci.c +++ b/arch/x86/pci/intel_mid_pci.c @@ -229,7 +229,7 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev) static void intel_mid_pci_irq_disable(struct pci_dev *dev) { - if (!dev->dev.power.is_prepared && dev->irq > 0) + if (!mp_should_keep_irq(&dev->dev) && dev->irq > 0) mp_unmap_irq(dev->irq); } diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c index bc1a2c3..eb500c2 100644 --- a/arch/x86/pci/irq.c +++ b/arch/x86/pci/irq.c @@ -1256,7 +1256,7 @@ static int pirq_enable_irq(struct pci_dev *dev) static void pirq_disable_irq(struct pci_dev *dev) { - if (io_apic_assign_pci_irqs && !dev->dev.power.is_prepared && + if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) && dev->irq) { mp_unmap_irq(dev->irq); dev->irq = 0; diff --git a/arch/x86/purgatory/Makefile b/arch/x86/purgatory/Makefile index 7fde9ee..899dd24 100644 --- a/arch/x86/purgatory/Makefile +++ b/arch/x86/purgatory/Makefile @@ -11,6 +11,7 @@ targets += purgatory.ro # sure how to relocate those. Like kexec-tools, use custom flags. KBUILD_CFLAGS := -fno-strict-aliasing -Wall -Wstrict-prototypes -fno-zero-initialized-in-bss -fno-builtin -ffreestanding -c -MD -Os -mcmodel=large +KBUILD_CFLAGS += -m$(BITS) $(obj)/purgatory.ro: $(PURGATORY_OBJS) FORCE $(call if_changed,ld) @@ -24,7 +25,4 @@ $(obj)/kexec-purgatory.c: $(obj)/purgatory.ro FORCE $(call if_changed,bin2c) -# No loaders for 32bits yet. -ifeq ($(CONFIG_X86_64),y) - obj-$(CONFIG_KEXEC) += kexec-purgatory.o -endif +obj-$(CONFIG_KEXEC_FILE) += kexec-purgatory.o diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index e8a1201..16fb009 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -1866,12 +1866,11 @@ static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, * * We can construct this by grafting the Xen provided pagetable into * head_64.S's preconstructed pagetables. We copy the Xen L2's into - * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This - * means that only the kernel has a physical mapping to start with - - * but that's enough to get __va working. We need to fill in the rest - * of the physical mapping once some sort of allocator has been set - * up. - * NOTE: for PVH, the page tables are native. + * level2_ident_pgt, and level2_kernel_pgt. This means that only the + * kernel has a physical mapping to start with - but that's enough to + * get __va working. We need to fill in the rest of the physical + * mapping once some sort of allocator has been set up. NOTE: for + * PVH, the page tables are native. */ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) { @@ -1902,8 +1901,11 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) /* L3_i[0] -> level2_ident_pgt */ convert_pfn_mfn(level3_ident_pgt); /* L3_k[510] -> level2_kernel_pgt - * L3_i[511] -> level2_fixmap_pgt */ + * L3_k[511] -> level2_fixmap_pgt */ convert_pfn_mfn(level3_kernel_pgt); + + /* L3_k[511][506] -> level1_fixmap_pgt */ + convert_pfn_mfn(level2_fixmap_pgt); } /* We get [511][511] and have Xen's version of level2_kernel_pgt */ l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); @@ -1913,21 +1915,15 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) addr[1] = (unsigned long)l3; addr[2] = (unsigned long)l2; /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: - * Both L4[272][0] and L4[511][511] have entries that point to the same + * Both L4[272][0] and L4[511][510] have entries that point to the same * L2 (PMD) tables. Meaning that if you modify it in __va space * it will be also modified in the __ka space! (But if you just * modify the PMD table to point to other PTE's or none, then you * are OK - which is what cleanup_highmap does) */ copy_page(level2_ident_pgt, l2); - /* Graft it onto L4[511][511] */ + /* Graft it onto L4[511][510] */ copy_page(level2_kernel_pgt, l2); - /* Get [511][510] and graft that in level2_fixmap_pgt */ - l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); - l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); - copy_page(level2_fixmap_pgt, l2); - /* Note that we don't do anything with level1_fixmap_pgt which - * we don't need. */ if (!xen_feature(XENFEAT_auto_translated_physmap)) { /* Make pagetable pieces RO */ set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); @@ -1937,6 +1933,7 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); /* Pin down new L4 */ pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 3a617af..49c6c3d 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -4,24 +4,23 @@ config ZONE_DMA config XTENSA def_bool y select ARCH_WANT_FRAME_POINTERS - select HAVE_IDE - select GENERIC_ATOMIC64 - select GENERIC_CLOCKEVENTS - select VIRT_TO_BUS - select GENERIC_IRQ_SHOW - select GENERIC_SCHED_CLOCK - select MODULES_USE_ELF_RELA - select GENERIC_PCI_IOMAP select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_OPTIONAL_GPIOLIB select BUILDTIME_EXTABLE_SORT select CLONE_BACKWARDS - select IRQ_DOMAIN - select HAVE_OPROFILE + select COMMON_CLK + select GENERIC_ATOMIC64 + select GENERIC_CLOCKEVENTS + select GENERIC_IRQ_SHOW + select GENERIC_PCI_IOMAP + select GENERIC_SCHED_CLOCK select HAVE_FUNCTION_TRACER select HAVE_IRQ_TIME_ACCOUNTING + select HAVE_OPROFILE select HAVE_PERF_EVENTS - select COMMON_CLK + select IRQ_DOMAIN + select MODULES_USE_ELF_RELA + select VIRT_TO_BUS help Xtensa processors are 32-bit RISC machines designed by Tensilica primarily for embedded systems. These processors are both @@ -62,7 +61,9 @@ config TRACE_IRQFLAGS_SUPPORT def_bool y config MMU - def_bool n + bool + default n if !XTENSA_VARIANT_CUSTOM + default XTENSA_VARIANT_MMU if XTENSA_VARIANT_CUSTOM config VARIANT_IRQ_SWITCH def_bool n @@ -102,8 +103,40 @@ config XTENSA_VARIANT_S6000 select VARIANT_IRQ_SWITCH select ARCH_REQUIRE_GPIOLIB select XTENSA_CALIBRATE_CCOUNT + +config XTENSA_VARIANT_CUSTOM + bool "Custom Xtensa processor configuration" + select MAY_HAVE_SMP + select HAVE_XTENSA_GPIO32 + help + Select this variant to use a custom Xtensa processor configuration. + You will be prompted for a processor variant CORENAME. endchoice +config XTENSA_VARIANT_CUSTOM_NAME + string "Xtensa Processor Custom Core Variant Name" + depends on XTENSA_VARIANT_CUSTOM + help + Provide the name of a custom Xtensa processor variant. + This CORENAME selects arch/xtensa/variant/CORENAME. + Dont forget you have to select MMU if you have one. + +config XTENSA_VARIANT_NAME + string + default "dc232b" if XTENSA_VARIANT_DC232B + default "dc233c" if XTENSA_VARIANT_DC233C + default "fsf" if XTENSA_VARIANT_FSF + default "s6000" if XTENSA_VARIANT_S6000 + default XTENSA_VARIANT_CUSTOM_NAME if XTENSA_VARIANT_CUSTOM + +config XTENSA_VARIANT_MMU + bool "Core variant has a Full MMU (TLB, Pages, Protection, etc)" + depends on XTENSA_VARIANT_CUSTOM + default y + help + Build a Conventional Kernel with full MMU support, + ie: it supports a TLB with auto-loading, page protection. + config XTENSA_UNALIGNED_USER bool "Unaligned memory access in use space" help @@ -156,13 +189,9 @@ config HOTPLUG_CPU Say N if you want to disable CPU hotplug. -config MATH_EMULATION - bool "Math emulation" - help - Can we use information of configuration file? - config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX bool "Initialize Xtensa MMU inside the Linux kernel code" + depends on MMU default y help Earlier version initialized the MMU in the exception vector @@ -192,6 +221,7 @@ config INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX config HIGHMEM bool "High Memory Support" + depends on MMU help Linux can use the full amount of RAM in the system by default. However, the default MMUv2 setup only maps the @@ -208,6 +238,32 @@ config HIGHMEM If unsure, say Y. +config FAST_SYSCALL_XTENSA + bool "Enable fast atomic syscalls" + default n + help + fast_syscall_xtensa is a syscall that can make atomic operations + on UP kernel when processor has no s32c1i support. + + This syscall is deprecated. It may have issues when called with + invalid arguments. It is provided only for backwards compatibility. + Only enable it if your userspace software requires it. + + If unsure, say N. + +config FAST_SYSCALL_SPILL_REGISTERS + bool "Enable spill registers syscall" + default n + help + fast_syscall_spill_registers is a syscall that spills all active + register windows of a calling userspace task onto its stack. + + This syscall is deprecated. It may have issues when called with + invalid arguments. It is provided only for backwards compatibility. + Only enable it if your userspace software requires it. + + If unsure, say N. + endmenu config XTENSA_CALIBRATE_CCOUNT @@ -250,12 +306,14 @@ config XTENSA_PLATFORM_ISS config XTENSA_PLATFORM_XT2000 bool "XT2000" + select HAVE_IDE help XT2000 is the name of Tensilica's feature-rich emulation platform. This hardware is capable of running a full Linux distribution. config XTENSA_PLATFORM_S6105 bool "S6105" + select HAVE_IDE select SERIAL_CONSOLE select NO_IOPORT_MAP diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile index 81250ec..4725330 100644 --- a/arch/xtensa/Makefile +++ b/arch/xtensa/Makefile @@ -4,6 +4,7 @@ # for more details. # # Copyright (C) 2001 - 2005 Tensilica Inc. +# Copyright (C) 2014 Cadence Design Systems Inc. # # This file is included by the global makefile so that you can add your own # architecture-specific flags and dependencies. Remember to do have actions @@ -13,11 +14,7 @@ # Core configuration. # (Use VAR=<xtensa_config> to use another default compiler.) -variant-$(CONFIG_XTENSA_VARIANT_FSF) := fsf -variant-$(CONFIG_XTENSA_VARIANT_DC232B) := dc232b -variant-$(CONFIG_XTENSA_VARIANT_DC233C) := dc233c -variant-$(CONFIG_XTENSA_VARIANT_S6000) := s6000 -variant-$(CONFIG_XTENSA_VARIANT_LINUX_CUSTOM) := custom +variant-y := $(patsubst "%",%,$(CONFIG_XTENSA_VARIANT_NAME)) VARIANT = $(variant-y) export VARIANT diff --git a/arch/xtensa/boot/dts/kc705.dts b/arch/xtensa/boot/dts/kc705.dts index 742a347..c4d17a3 100644 --- a/arch/xtensa/boot/dts/kc705.dts +++ b/arch/xtensa/boot/dts/kc705.dts @@ -4,8 +4,11 @@ / { compatible = "cdns,xtensa-kc705"; + chosen { + bootargs = "earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=0x38000000"; + }; memory@0 { device_type = "memory"; - reg = <0x00000000 0x08000000>; + reg = <0x00000000 0x38000000>; }; }; diff --git a/arch/xtensa/configs/common_defconfig b/arch/xtensa/configs/common_defconfig index f6000fe..721df12 100644 --- a/arch/xtensa/configs/common_defconfig +++ b/arch/xtensa/configs/common_defconfig @@ -66,7 +66,6 @@ CONFIG_XTENSA_ARCH_LINUX_BE=y CONFIG_MMU=y # CONFIG_XTENSA_UNALIGNED_USER is not set # CONFIG_PREEMPT is not set -# CONFIG_MATH_EMULATION is not set # CONFIG_HIGHMEM is not set # diff --git a/arch/xtensa/configs/iss_defconfig b/arch/xtensa/configs/iss_defconfig index 1493c68..b966baf 100644 --- a/arch/xtensa/configs/iss_defconfig +++ b/arch/xtensa/configs/iss_defconfig @@ -146,7 +146,6 @@ CONFIG_XTENSA_VARIANT_FSF=y # CONFIG_XTENSA_VARIANT_S6000 is not set # CONFIG_XTENSA_UNALIGNED_USER is not set # CONFIG_PREEMPT is not set -# CONFIG_MATH_EMULATION is not set CONFIG_XTENSA_CALIBRATE_CCOUNT=y CONFIG_SERIAL_CONSOLE=y CONFIG_XTENSA_ISS_NETWORK=y @@ -308,7 +307,7 @@ CONFIG_MISC_DEVICES=y # EEPROM support # # CONFIG_EEPROM_93CX6 is not set -CONFIG_HAVE_IDE=y +# CONFIG_HAVE_IDE is not set # CONFIG_IDE is not set # diff --git a/arch/xtensa/configs/s6105_defconfig b/arch/xtensa/configs/s6105_defconfig index 12a492a..9471265 100644 --- a/arch/xtensa/configs/s6105_defconfig +++ b/arch/xtensa/configs/s6105_defconfig @@ -109,7 +109,6 @@ CONFIG_VARIANT_IRQ_SWITCH=y CONFIG_XTENSA_VARIANT_S6000=y # CONFIG_XTENSA_UNALIGNED_USER is not set CONFIG_PREEMPT=y -# CONFIG_MATH_EMULATION is not set # CONFIG_HIGHMEM is not set CONFIG_XTENSA_CALIBRATE_CCOUNT=y CONFIG_SERIAL_CONSOLE=y diff --git a/arch/xtensa/include/asm/cacheflush.h b/arch/xtensa/include/asm/cacheflush.h index 555a98a..e72aaca 100644 --- a/arch/xtensa/include/asm/cacheflush.h +++ b/arch/xtensa/include/asm/cacheflush.h @@ -37,6 +37,7 @@ * specials for cache aliasing: * * __flush_invalidate_dcache_page_alias(vaddr,paddr) + * __invalidate_dcache_page_alias(vaddr,paddr) * __invalidate_icache_page_alias(vaddr,paddr) */ @@ -62,6 +63,7 @@ extern void __flush_invalidate_dcache_range(unsigned long, unsigned long); #if defined(CONFIG_MMU) && (DCACHE_WAY_SIZE > PAGE_SIZE) extern void __flush_invalidate_dcache_page_alias(unsigned long, unsigned long); +extern void __invalidate_dcache_page_alias(unsigned long, unsigned long); #else static inline void __flush_invalidate_dcache_page_alias(unsigned long virt, unsigned long phys) { } diff --git a/arch/xtensa/include/asm/fixmap.h b/arch/xtensa/include/asm/fixmap.h index 9f6c33d0..62b507d 100644 --- a/arch/xtensa/include/asm/fixmap.h +++ b/arch/xtensa/include/asm/fixmap.h @@ -23,8 +23,8 @@ * Here we define all the compile-time 'special' virtual * addresses. The point is to have a constant address at * compile time, but to set the physical address only - * in the boot process. We allocate these special addresses - * from the end of the consistent memory region backwards. + * in the boot process. We allocate these special addresses + * from the start of the consistent memory region upwards. * Also this lets us do fail-safe vmalloc(), we * can guarantee that these special addresses and * vmalloc()-ed addresses never overlap. @@ -38,7 +38,8 @@ enum fixed_addresses { #ifdef CONFIG_HIGHMEM /* reserved pte's for temporary kernel mappings */ FIX_KMAP_BEGIN, - FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1, + FIX_KMAP_END = FIX_KMAP_BEGIN + + (KM_TYPE_NR * NR_CPUS * DCACHE_N_COLORS) - 1, #endif __end_of_fixed_addresses }; @@ -47,7 +48,28 @@ enum fixed_addresses { #define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT) #define FIXADDR_START ((FIXADDR_TOP - FIXADDR_SIZE) & PMD_MASK) -#include <asm-generic/fixmap.h> +#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) +#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) + +#ifndef __ASSEMBLY__ +/* + * 'index to address' translation. If anyone tries to use the idx + * directly without translation, we catch the bug with a NULL-deference + * kernel oops. Illegal ranges of incoming indices are caught too. + */ +static __always_inline unsigned long fix_to_virt(const unsigned int idx) +{ + BUILD_BUG_ON(idx >= __end_of_fixed_addresses); + return __fix_to_virt(idx); +} + +static inline unsigned long virt_to_fix(const unsigned long vaddr) +{ + BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); + return __virt_to_fix(vaddr); +} + +#endif #define kmap_get_fixmap_pte(vaddr) \ pte_offset_kernel( \ diff --git a/arch/xtensa/include/asm/highmem.h b/arch/xtensa/include/asm/highmem.h index 2653ef5..2c7901e 100644 --- a/arch/xtensa/include/asm/highmem.h +++ b/arch/xtensa/include/asm/highmem.h @@ -12,19 +12,55 @@ #ifndef _XTENSA_HIGHMEM_H #define _XTENSA_HIGHMEM_H +#include <linux/wait.h> #include <asm/cacheflush.h> #include <asm/fixmap.h> #include <asm/kmap_types.h> #include <asm/pgtable.h> -#define PKMAP_BASE (FIXADDR_START - PMD_SIZE) -#define LAST_PKMAP PTRS_PER_PTE +#define PKMAP_BASE ((FIXADDR_START - \ + (LAST_PKMAP + 1) * PAGE_SIZE) & PMD_MASK) +#define LAST_PKMAP (PTRS_PER_PTE * DCACHE_N_COLORS) #define LAST_PKMAP_MASK (LAST_PKMAP - 1) #define PKMAP_NR(virt) (((virt) - PKMAP_BASE) >> PAGE_SHIFT) #define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT)) #define kmap_prot PAGE_KERNEL +#if DCACHE_WAY_SIZE > PAGE_SIZE +#define get_pkmap_color get_pkmap_color +static inline int get_pkmap_color(struct page *page) +{ + return DCACHE_ALIAS(page_to_phys(page)); +} + +extern unsigned int last_pkmap_nr_arr[]; + +static inline unsigned int get_next_pkmap_nr(unsigned int color) +{ + last_pkmap_nr_arr[color] = + (last_pkmap_nr_arr[color] + DCACHE_N_COLORS) & LAST_PKMAP_MASK; + return last_pkmap_nr_arr[color] + color; +} + +static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color) +{ + return pkmap_nr < DCACHE_N_COLORS; +} + +static inline int get_pkmap_entries_count(unsigned int color) +{ + return LAST_PKMAP / DCACHE_N_COLORS; +} + +extern wait_queue_head_t pkmap_map_wait_arr[]; + +static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color) +{ + return pkmap_map_wait_arr + color; +} +#endif + extern pte_t *pkmap_page_table; void *kmap_high(struct page *page); diff --git a/arch/xtensa/include/asm/page.h b/arch/xtensa/include/asm/page.h index 47f5823..abe24c6 100644 --- a/arch/xtensa/include/asm/page.h +++ b/arch/xtensa/include/asm/page.h @@ -78,7 +78,9 @@ # define DCACHE_ALIAS_EQ(a,b) ((((a) ^ (b)) & DCACHE_ALIAS_MASK) == 0) #else # define DCACHE_ALIAS_ORDER 0 +# define DCACHE_ALIAS(a) ((void)(a), 0) #endif +#define DCACHE_N_COLORS (1 << DCACHE_ALIAS_ORDER) #if ICACHE_WAY_SIZE > PAGE_SIZE # define ICACHE_ALIAS_ORDER (ICACHE_WAY_SHIFT - PAGE_SHIFT) @@ -134,6 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size) #endif struct page; +struct vm_area_struct; extern void clear_page(void *page); extern void copy_page(void *to, void *from); @@ -143,8 +146,15 @@ extern void copy_page(void *to, void *from); */ #if DCACHE_WAY_SIZE > PAGE_SIZE -extern void clear_user_page(void*, unsigned long, struct page*); -extern void copy_user_page(void*, void*, unsigned long, struct page*); +extern void clear_page_alias(void *vaddr, unsigned long paddr); +extern void copy_page_alias(void *to, void *from, + unsigned long to_paddr, unsigned long from_paddr); + +#define clear_user_highpage clear_user_highpage +void clear_user_highpage(struct page *page, unsigned long vaddr); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); #else # define clear_user_page(page, vaddr, pg) clear_page(page) # define copy_user_page(to, from, vaddr, pg) copy_page(to, from) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 4b0ca35..b2173e5 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -67,7 +67,12 @@ #define VMALLOC_START 0xC0000000 #define VMALLOC_END 0xC7FEFFFF #define TLBTEMP_BASE_1 0xC7FF0000 -#define TLBTEMP_BASE_2 0xC7FF8000 +#define TLBTEMP_BASE_2 (TLBTEMP_BASE_1 + DCACHE_WAY_SIZE) +#if 2 * DCACHE_WAY_SIZE > ICACHE_WAY_SIZE +#define TLBTEMP_SIZE (2 * DCACHE_WAY_SIZE) +#else +#define TLBTEMP_SIZE ICACHE_WAY_SIZE +#endif /* * For the Xtensa architecture, the PTE layout is as follows: diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index fd686dc..c7211e7 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -52,7 +52,12 @@ */ .macro get_fs ad, sp GET_CURRENT(\ad,\sp) +#if THREAD_CURRENT_DS > 1020 + addi \ad, \ad, TASK_THREAD + l32i \ad, \ad, THREAD_CURRENT_DS - TASK_THREAD +#else l32i \ad, \ad, THREAD_CURRENT_DS +#endif .endm /* diff --git a/arch/xtensa/include/uapi/asm/ioctls.h b/arch/xtensa/include/uapi/asm/ioctls.h index b4cb110..a47909f 100644 --- a/arch/xtensa/include/uapi/asm/ioctls.h +++ b/arch/xtensa/include/uapi/asm/ioctls.h @@ -28,17 +28,17 @@ #define TCSETSW 0x5403 #define TCSETSF 0x5404 -#define TCGETA _IOR('t', 23, struct termio) -#define TCSETA _IOW('t', 24, struct termio) -#define TCSETAW _IOW('t', 25, struct termio) -#define TCSETAF _IOW('t', 28, struct termio) +#define TCGETA 0x80127417 /* _IOR('t', 23, struct termio) */ +#define TCSETA 0x40127418 /* _IOW('t', 24, struct termio) */ +#define TCSETAW 0x40127419 /* _IOW('t', 25, struct termio) */ +#define TCSETAF 0x4012741C /* _IOW('t', 28, struct termio) */ #define TCSBRK _IO('t', 29) #define TCXONC _IO('t', 30) #define TCFLSH _IO('t', 31) -#define TIOCSWINSZ _IOW('t', 103, struct winsize) -#define TIOCGWINSZ _IOR('t', 104, struct winsize) +#define TIOCSWINSZ 0x40087467 /* _IOW('t', 103, struct winsize) */ +#define TIOCGWINSZ 0x80087468 /* _IOR('t', 104, struct winsize) */ #define TIOCSTART _IO('t', 110) /* start output, like ^Q */ #define TIOCSTOP _IO('t', 111) /* stop output, like ^S */ #define TIOCOUTQ _IOR('t', 115, int) /* output queue size */ @@ -88,7 +88,6 @@ #define TIOCSETD _IOW('T', 35, int) #define TIOCGETD _IOR('T', 36, int) #define TCSBRKP _IOW('T', 37, int) /* Needed for POSIX tcsendbreak()*/ -#define TIOCTTYGSTRUCT _IOR('T', 38, struct tty_struct) /* For debugging only*/ #define TIOCSBRK _IO('T', 39) /* BSD compatibility */ #define TIOCCBRK _IO('T', 40) /* BSD compatibility */ #define TIOCGSID _IOR('T', 41, pid_t) /* Return the session ID of FD*/ @@ -114,8 +113,10 @@ #define TIOCSERGETLSR _IOR('T', 89, unsigned int) /* Get line status reg. */ /* ioctl (fd, TIOCSERGETLSR, &result) where result may be as below */ # define TIOCSER_TEMT 0x01 /* Transmitter physically empty */ -#define TIOCSERGETMULTI _IOR('T', 90, struct serial_multiport_struct) /* Get multiport config */ -#define TIOCSERSETMULTI _IOW('T', 91, struct serial_multiport_struct) /* Set multiport config */ +#define TIOCSERGETMULTI 0x80a8545a /* Get multiport config */ + /* _IOR('T', 90, struct serial_multiport_struct) */ +#define TIOCSERSETMULTI 0x40a8545b /* Set multiport config */ + /* _IOW('T', 91, struct serial_multiport_struct) */ #define TIOCMIWAIT _IO('T', 92) /* wait for a change on serial input line(s) */ #define TIOCGICOUNT 0x545D /* read serial port inline interrupt counts */ diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h index b939552..8883fc8 100644 --- a/arch/xtensa/include/uapi/asm/unistd.h +++ b/arch/xtensa/include/uapi/asm/unistd.h @@ -739,7 +739,10 @@ __SYSCALL(334, sys_sched_setattr, 2) #define __NR_sched_getattr 335 __SYSCALL(335, sys_sched_getattr, 3) -#define __NR_syscall_count 336 +#define __NR_renameat2 336 +__SYSCALL(336, sys_renameat2, 5) + +#define __NR_syscall_count 337 /* * sysxtensa syscall handler diff --git a/arch/xtensa/kernel/align.S b/arch/xtensa/kernel/align.S index d4cef60..890004a 100644 --- a/arch/xtensa/kernel/align.S +++ b/arch/xtensa/kernel/align.S @@ -8,6 +8,7 @@ * this archive for more details. * * Copyright (C) 2001 - 2005 Tensilica, Inc. + * Copyright (C) 2014 Cadence Design Systems Inc. * * Rewritten by Chris Zankel <chris@zankel.net> * @@ -174,6 +175,10 @@ ENTRY(fast_unaligned) s32i a0, a2, PT_AREG2 s32i a3, a2, PT_AREG3 + rsr a3, excsave1 + movi a4, fast_unaligned_fixup + s32i a4, a3, EXC_TABLE_FIXUP + /* Keep value of SAR in a0 */ rsr a0, sar @@ -225,10 +230,6 @@ ENTRY(fast_unaligned) addx8 a5, a6, a5 jx a5 # jump into table - /* Invalid instruction, CRITICAL! */ -.Linvalid_instruction_load: - j .Linvalid_instruction - /* Load: Load memory address. */ .Lload: movi a3, ~3 @@ -272,18 +273,6 @@ ENTRY(fast_unaligned) /* Set target register. */ 1: - -#if XCHAL_HAVE_LOOPS - rsr a5, lend # check if we reached LEND - bne a7, a5, 1f - rsr a5, lcount # and LCOUNT != 0 - beqz a5, 1f - addi a5, a5, -1 # decrement LCOUNT and set - rsr a7, lbeg # set PC to LBEGIN - wsr a5, lcount -#endif - -1: wsr a7, epc1 # skip load instruction extui a4, a4, INSN_T, 4 # extract target register movi a5, .Lload_table addx8 a4, a4, a5 @@ -326,6 +315,35 @@ ENTRY(fast_unaligned) mov a3, a14 ; _j 1f; .align 8 mov a3, a15 ; _j 1f; .align 8 + /* We cannot handle this exception. */ + + .extern _kernel_exception +.Linvalid_instruction_load: +.Linvalid_instruction_store: + + movi a4, 0 + rsr a3, excsave1 + s32i a4, a3, EXC_TABLE_FIXUP + + /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ + + l32i a8, a2, PT_AREG8 + l32i a7, a2, PT_AREG7 + l32i a6, a2, PT_AREG6 + l32i a5, a2, PT_AREG5 + l32i a4, a2, PT_AREG4 + wsr a0, sar + mov a1, a2 + + rsr a0, ps + bbsi.l a0, PS_UM_BIT, 2f # jump if user mode + + movi a0, _kernel_exception + jx a0 + +2: movi a0, _user_exception + jx a0 + 1: # a7: instruction pointer, a4: instruction, a3: value movi a6, 0 # mask: ffffffff:00000000 @@ -353,17 +371,6 @@ ENTRY(fast_unaligned) /* Get memory address */ 1: -#if XCHAL_HAVE_LOOPS - rsr a4, lend # check if we reached LEND - bne a7, a4, 1f - rsr a4, lcount # and LCOUNT != 0 - beqz a4, 1f - addi a4, a4, -1 # decrement LCOUNT and set - rsr a7, lbeg # set PC to LBEGIN - wsr a4, lcount -#endif - -1: wsr a7, epc1 # skip store instruction movi a4, ~3 and a4, a4, a8 # align memory address @@ -375,25 +382,25 @@ ENTRY(fast_unaligned) #endif __ssa8r a8 - __src_b a7, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) + __src_b a8, a5, a6 # lo-mask F..F0..0 (BE) 0..0F..F (LE) __src_b a6, a6, a5 # hi-mask 0..0F..F (BE) F..F0..0 (LE) #ifdef UNALIGNED_USER_EXCEPTION l32e a5, a4, -8 #else l32i a5, a4, 0 # load lower address word #endif - and a5, a5, a7 # mask - __sh a7, a3 # shift value - or a5, a5, a7 # or with original value + and a5, a5, a8 # mask + __sh a8, a3 # shift value + or a5, a5, a8 # or with original value #ifdef UNALIGNED_USER_EXCEPTION s32e a5, a4, -8 - l32e a7, a4, -4 + l32e a8, a4, -4 #else s32i a5, a4, 0 # store - l32i a7, a4, 4 # same for upper address word + l32i a8, a4, 4 # same for upper address word #endif __sl a5, a3 - and a6, a7, a6 + and a6, a8, a6 or a6, a6, a5 #ifdef UNALIGNED_USER_EXCEPTION s32e a6, a4, -4 @@ -401,9 +408,27 @@ ENTRY(fast_unaligned) s32i a6, a4, 4 #endif - /* Done. restore stack and return */ - .Lexit: +#if XCHAL_HAVE_LOOPS + rsr a4, lend # check if we reached LEND + bne a7, a4, 1f + rsr a4, lcount # and LCOUNT != 0 + beqz a4, 1f + addi a4, a4, -1 # decrement LCOUNT and set + rsr a7, lbeg # set PC to LBEGIN + wsr a4, lcount +#endif + +1: wsr a7, epc1 # skip emulated instruction + + /* Update icount if we're single-stepping in userspace. */ + rsr a4, icountlevel + beqz a4, 1f + bgeui a4, LOCKLEVEL + 1, 1f + rsr a4, icount + addi a4, a4, 1 + wsr a4, icount +1: movi a4, 0 rsr a3, excsave1 s32i a4, a3, EXC_TABLE_FIXUP @@ -424,31 +449,40 @@ ENTRY(fast_unaligned) l32i a2, a2, PT_AREG2 rfe - /* We cannot handle this exception. */ +ENDPROC(fast_unaligned) - .extern _kernel_exception -.Linvalid_instruction_store: -.Linvalid_instruction: +ENTRY(fast_unaligned_fixup) - /* Restore a4...a8 and SAR, set SP, and jump to default exception. */ + l32i a2, a3, EXC_TABLE_DOUBLE_SAVE + wsr a3, excsave1 l32i a8, a2, PT_AREG8 l32i a7, a2, PT_AREG7 l32i a6, a2, PT_AREG6 l32i a5, a2, PT_AREG5 l32i a4, a2, PT_AREG4 + l32i a0, a2, PT_AREG2 + xsr a0, depc # restore depc and a0 wsr a0, sar - mov a1, a2 + + rsr a0, exccause + s32i a0, a2, PT_DEPC # mark as a regular exception rsr a0, ps - bbsi.l a2, PS_UM_BIT, 1f # jump if user mode + bbsi.l a0, PS_UM_BIT, 1f # jump if user mode - movi a0, _kernel_exception + rsr a0, exccause + addx4 a0, a0, a3 # find entry in table + l32i a0, a0, EXC_TABLE_FAST_KERNEL # load handler + l32i a3, a2, PT_AREG3 jx a0 - -1: movi a0, _user_exception +1: + rsr a0, exccause + addx4 a0, a0, a3 # find entry in table + l32i a0, a0, EXC_TABLE_FAST_USER # load handler + l32i a3, a2, PT_AREG3 jx a0 -ENDPROC(fast_unaligned) +ENDPROC(fast_unaligned_fixup) #endif /* XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION */ diff --git a/arch/xtensa/kernel/entry.S b/arch/xtensa/kernel/entry.S index ef7f499..82bbfa5 100644 --- a/arch/xtensa/kernel/entry.S +++ b/arch/xtensa/kernel/entry.S @@ -986,6 +986,8 @@ ENDPROC(fast_syscall_unrecoverable) * j done */ +#ifdef CONFIG_FAST_SYSCALL_XTENSA + #define TRY \ .section __ex_table, "a"; \ .word 66f, 67f; \ @@ -1001,9 +1003,8 @@ ENTRY(fast_syscall_xtensa) movi a7, 4 # sizeof(unsigned int) access_ok a3, a7, a0, a2, .Leac # a0: scratch reg, a2: sp - addi a6, a6, -1 # assuming SYS_XTENSA_ATOMIC_SET = 1 - _bgeui a6, SYS_XTENSA_COUNT - 1, .Lill - _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP - 1, .Lnswp + _bgeui a6, SYS_XTENSA_COUNT, .Lill + _bnei a6, SYS_XTENSA_ATOMIC_CMP_SWP, .Lnswp /* Fall through for ATOMIC_CMP_SWP. */ @@ -1015,27 +1016,26 @@ TRY s32i a5, a3, 0 # different, modify value l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 1 # and return 1 - addi a6, a6, 1 # restore a6 (really necessary?) rfe 1: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, 0 # return 0 (note that we cannot set - addi a6, a6, 1 # restore a6 (really necessary?) rfe .Lnswp: /* Atomic set, add, and exg_add. */ TRY l32i a7, a3, 0 # orig + addi a6, a6, -SYS_XTENSA_ATOMIC_SET add a0, a4, a7 # + arg moveqz a0, a4, a6 # set + addi a6, a6, SYS_XTENSA_ATOMIC_SET TRY s32i a0, a3, 0 # write new value mov a0, a2 mov a2, a7 l32i a7, a0, PT_AREG7 # restore a7 l32i a0, a0, PT_AREG0 # restore a0 - addi a6, a6, 1 # restore a6 (really necessary?) rfe CATCH @@ -1044,13 +1044,25 @@ CATCH movi a2, -EFAULT rfe -.Lill: l32i a7, a2, PT_AREG0 # restore a7 +.Lill: l32i a7, a2, PT_AREG7 # restore a7 l32i a0, a2, PT_AREG0 # restore a0 movi a2, -EINVAL rfe ENDPROC(fast_syscall_xtensa) +#else /* CONFIG_FAST_SYSCALL_XTENSA */ + +ENTRY(fast_syscall_xtensa) + + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -ENOSYS + rfe + +ENDPROC(fast_syscall_xtensa) + +#endif /* CONFIG_FAST_SYSCALL_XTENSA */ + /* fast_syscall_spill_registers. * @@ -1066,6 +1078,8 @@ ENDPROC(fast_syscall_xtensa) * Note: We assume the stack pointer is EXC_TABLE_KSTK in the fixup handler. */ +#ifdef CONFIG_FAST_SYSCALL_SPILL_REGISTERS + ENTRY(fast_syscall_spill_registers) /* Register a FIXUP handler (pass current wb as a parameter) */ @@ -1400,6 +1414,18 @@ ENTRY(fast_syscall_spill_registers_fixup_return) ENDPROC(fast_syscall_spill_registers_fixup_return) +#else /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ + +ENTRY(fast_syscall_spill_registers) + + l32i a0, a2, PT_AREG0 # restore a0 + movi a2, -ENOSYS + rfe + +ENDPROC(fast_syscall_spill_registers) + +#endif /* CONFIG_FAST_SYSCALL_SPILL_REGISTERS */ + #ifdef CONFIG_MMU /* * We should never get here. Bail out! @@ -1565,7 +1591,7 @@ ENTRY(fast_second_level_miss) rsr a0, excvaddr bltu a0, a3, 2f - addi a1, a0, -(2 << (DCACHE_ALIAS_ORDER + PAGE_SHIFT)) + addi a1, a0, -TLBTEMP_SIZE bgeu a1, a3, 2f /* Check if we have to restore an ITLB mapping. */ @@ -1820,7 +1846,6 @@ ENTRY(_switch_to) entry a1, 16 - mov a10, a2 # preserve 'prev' (a2) mov a11, a3 # and 'next' (a3) l32i a4, a2, TASK_THREAD_INFO @@ -1828,8 +1853,14 @@ ENTRY(_switch_to) save_xtregs_user a4 a6 a8 a9 a12 a13 THREAD_XTREGS_USER - s32i a0, a10, THREAD_RA # save return address - s32i a1, a10, THREAD_SP # save stack pointer +#if THREAD_RA > 1020 || THREAD_SP > 1020 + addi a10, a2, TASK_THREAD + s32i a0, a10, THREAD_RA - TASK_THREAD # save return address + s32i a1, a10, THREAD_SP - TASK_THREAD # save stack pointer +#else + s32i a0, a2, THREAD_RA # save return address + s32i a1, a2, THREAD_SP # save stack pointer +#endif /* Disable ints while we manipulate the stack pointer. */ @@ -1870,7 +1901,6 @@ ENTRY(_switch_to) load_xtregs_user a5 a6 a8 a9 a12 a13 THREAD_XTREGS_USER wsr a14, ps - mov a2, a10 # return 'prev' rsync retw diff --git a/arch/xtensa/kernel/pci-dma.c b/arch/xtensa/kernel/pci-dma.c index 2d9cc6d..e8b76b8 100644 --- a/arch/xtensa/kernel/pci-dma.c +++ b/arch/xtensa/kernel/pci-dma.c @@ -49,9 +49,8 @@ dma_alloc_coherent(struct device *dev,size_t size,dma_addr_t *handle,gfp_t flag) /* We currently don't support coherent memory outside KSEG */ - if (ret < XCHAL_KSEG_CACHED_VADDR - || ret >= XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(ret < XCHAL_KSEG_CACHED_VADDR || + ret > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); if (ret != 0) { @@ -68,10 +67,11 @@ EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { - long addr=(long)vaddr+XCHAL_KSEG_CACHED_VADDR-XCHAL_KSEG_BYPASS_VADDR; + unsigned long addr = (unsigned long)vaddr + + XCHAL_KSEG_CACHED_VADDR - XCHAL_KSEG_BYPASS_VADDR; - if (addr < 0 || addr >= XCHAL_KSEG_SIZE) - BUG(); + BUG_ON(addr < XCHAL_KSEG_CACHED_VADDR || + addr > XCHAL_KSEG_CACHED_VADDR + XCHAL_KSEG_SIZE - 1); free_pages(addr, get_order(size)); } diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c index 40b5a37..4d02e38 100644 --- a/arch/xtensa/kernel/smp.c +++ b/arch/xtensa/kernel/smp.c @@ -571,6 +571,7 @@ void flush_icache_range(unsigned long start, unsigned long end) }; on_each_cpu(ipi_flush_icache_range, &fd, 1); } +EXPORT_SYMBOL(flush_icache_range); /* ------------------------------------------------------------------------- */ diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c index eebbfd8..9d2f45f 100644 --- a/arch/xtensa/kernel/traps.c +++ b/arch/xtensa/kernel/traps.c @@ -101,9 +101,8 @@ static dispatch_init_table_t __initdata dispatch_init_table[] = { #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION #ifdef CONFIG_XTENSA_UNALIGNED_USER { EXCCAUSE_UNALIGNED, USER, fast_unaligned }, -#else -{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, #endif +{ EXCCAUSE_UNALIGNED, 0, do_unaligned_user }, { EXCCAUSE_UNALIGNED, KRNL, fast_unaligned }, #endif #ifdef CONFIG_MMU @@ -264,7 +263,6 @@ do_illegal_instruction(struct pt_regs *regs) */ #if XCHAL_UNALIGNED_LOAD_EXCEPTION || XCHAL_UNALIGNED_STORE_EXCEPTION -#ifndef CONFIG_XTENSA_UNALIGNED_USER void do_unaligned_user (struct pt_regs *regs) { @@ -286,7 +284,6 @@ do_unaligned_user (struct pt_regs *regs) } #endif -#endif void do_debug(struct pt_regs *regs) diff --git a/arch/xtensa/kernel/vectors.S b/arch/xtensa/kernel/vectors.S index 8453e6e..1b397a9 100644 --- a/arch/xtensa/kernel/vectors.S +++ b/arch/xtensa/kernel/vectors.S @@ -454,8 +454,14 @@ _DoubleExceptionVector_WindowOverflow: s32i a0, a2, PT_DEPC _DoubleExceptionVector_handle_exception: + addi a0, a0, -EXCCAUSE_UNALIGNED + beqz a0, 2f addx4 a0, a0, a3 - l32i a0, a0, EXC_TABLE_FAST_USER + l32i a0, a0, EXC_TABLE_FAST_USER + 4 * EXCCAUSE_UNALIGNED + xsr a3, excsave1 + jx a0 +2: + movi a0, user_exception xsr a3, excsave1 jx a0 diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S index d16db6d..fc1bc2b 100644 --- a/arch/xtensa/kernel/vmlinux.lds.S +++ b/arch/xtensa/kernel/vmlinux.lds.S @@ -269,13 +269,13 @@ SECTIONS .UserExceptionVector.literal) SECTION_VECTOR (_DoubleExceptionVector_literal, .DoubleExceptionVector.literal, - DOUBLEEXC_VECTOR_VADDR - 40, + DOUBLEEXC_VECTOR_VADDR - 48, SIZEOF(.UserExceptionVector.text), .UserExceptionVector.text) SECTION_VECTOR (_DoubleExceptionVector_text, .DoubleExceptionVector.text, DOUBLEEXC_VECTOR_VADDR, - 40, + 48, .DoubleExceptionVector.literal) . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3; diff --git a/arch/xtensa/mm/cache.c b/arch/xtensa/mm/cache.c index 63cbb86..d75aa14 100644 --- a/arch/xtensa/mm/cache.c +++ b/arch/xtensa/mm/cache.c @@ -59,9 +59,68 @@ * */ -#if (DCACHE_WAY_SIZE > PAGE_SIZE) && defined(CONFIG_HIGHMEM) -#error "HIGHMEM is not supported on cores with aliasing cache." -#endif +#if (DCACHE_WAY_SIZE > PAGE_SIZE) +static inline void kmap_invalidate_coherent(struct page *page, + unsigned long vaddr) +{ + if (!DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { + unsigned long kvaddr; + + if (!PageHighMem(page)) { + kvaddr = (unsigned long)page_to_virt(page); + + __invalidate_dcache_page(kvaddr); + } else { + kvaddr = TLBTEMP_BASE_1 + + (page_to_phys(page) & DCACHE_ALIAS_MASK); + + __invalidate_dcache_page_alias(kvaddr, + page_to_phys(page)); + } + } +} + +static inline void *coherent_kvaddr(struct page *page, unsigned long base, + unsigned long vaddr, unsigned long *paddr) +{ + if (PageHighMem(page) || !DCACHE_ALIAS_EQ(page_to_phys(page), vaddr)) { + *paddr = page_to_phys(page); + return (void *)(base + (vaddr & DCACHE_ALIAS_MASK)); + } else { + *paddr = 0; + return page_to_virt(page); + } +} + +void clear_user_highpage(struct page *page, unsigned long vaddr) +{ + unsigned long paddr; + void *kvaddr = coherent_kvaddr(page, TLBTEMP_BASE_1, vaddr, &paddr); + + pagefault_disable(); + kmap_invalidate_coherent(page, vaddr); + set_bit(PG_arch_1, &page->flags); + clear_page_alias(kvaddr, paddr); + pagefault_enable(); +} + +void copy_user_highpage(struct page *dst, struct page *src, + unsigned long vaddr, struct vm_area_struct *vma) +{ + unsigned long dst_paddr, src_paddr; + void *dst_vaddr = coherent_kvaddr(dst, TLBTEMP_BASE_1, vaddr, + &dst_paddr); + void *src_vaddr = coherent_kvaddr(src, TLBTEMP_BASE_2, vaddr, + &src_paddr); + + pagefault_disable(); + kmap_invalidate_coherent(dst, vaddr); + set_bit(PG_arch_1, &dst->flags); + copy_page_alias(dst_vaddr, src_vaddr, dst_paddr, src_paddr); + pagefault_enable(); +} + +#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */ #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK @@ -103,7 +162,8 @@ void flush_dcache_page(struct page *page) if (!alias && !mapping) return; - __flush_invalidate_dcache_page((long)page_address(page)); + virt = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(virt, phys); virt = TLBTEMP_BASE_1 + (temp & DCACHE_ALIAS_MASK); @@ -168,13 +228,12 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) #if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { - - unsigned long paddr = (unsigned long) page_address(page); unsigned long phys = page_to_phys(page); - unsigned long tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); - - __flush_invalidate_dcache_page(paddr); + unsigned long tmp; + tmp = TLBTEMP_BASE_1 + (phys & DCACHE_ALIAS_MASK); + __flush_invalidate_dcache_page_alias(tmp, phys); + tmp = TLBTEMP_BASE_1 + (addr & DCACHE_ALIAS_MASK); __flush_invalidate_dcache_page_alias(tmp, phys); __invalidate_icache_page_alias(tmp, phys); diff --git a/arch/xtensa/mm/highmem.c b/arch/xtensa/mm/highmem.c index 17a8c0d..8cfb71e 100644 --- a/arch/xtensa/mm/highmem.c +++ b/arch/xtensa/mm/highmem.c @@ -14,23 +14,45 @@ static pte_t *kmap_pte; +#if DCACHE_WAY_SIZE > PAGE_SIZE +unsigned int last_pkmap_nr_arr[DCACHE_N_COLORS]; +wait_queue_head_t pkmap_map_wait_arr[DCACHE_N_COLORS]; + +static void __init kmap_waitqueues_init(void) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(pkmap_map_wait_arr); ++i) + init_waitqueue_head(pkmap_map_wait_arr + i); +} +#else +static inline void kmap_waitqueues_init(void) +{ +} +#endif + +static inline enum fixed_addresses kmap_idx(int type, unsigned long color) +{ + return (type + KM_TYPE_NR * smp_processor_id()) * DCACHE_N_COLORS + + color; +} + void *kmap_atomic(struct page *page) { enum fixed_addresses idx; unsigned long vaddr; - int type; pagefault_disable(); if (!PageHighMem(page)) return page_address(page); - type = kmap_atomic_idx_push(); - idx = type + KM_TYPE_NR * smp_processor_id(); + idx = kmap_idx(kmap_atomic_idx_push(), + DCACHE_ALIAS(page_to_phys(page))); vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); #ifdef CONFIG_DEBUG_HIGHMEM - BUG_ON(!pte_none(*(kmap_pte - idx))); + BUG_ON(!pte_none(*(kmap_pte + idx))); #endif - set_pte(kmap_pte - idx, mk_pte(page, PAGE_KERNEL_EXEC)); + set_pte(kmap_pte + idx, mk_pte(page, PAGE_KERNEL_EXEC)); return (void *)vaddr; } @@ -38,12 +60,10 @@ EXPORT_SYMBOL(kmap_atomic); void __kunmap_atomic(void *kvaddr) { - int idx, type; - if (kvaddr >= (void *)FIXADDR_START && kvaddr < (void *)FIXADDR_TOP) { - type = kmap_atomic_idx(); - idx = type + KM_TYPE_NR * smp_processor_id(); + int idx = kmap_idx(kmap_atomic_idx(), + DCACHE_ALIAS((unsigned long)kvaddr)); /* * Force other mappings to Oops if they'll try to access this @@ -51,7 +71,7 @@ void __kunmap_atomic(void *kvaddr) * is a bad idea also, in case the page changes cacheability * attributes or becomes a protected page in a hypervisor. */ - pte_clear(&init_mm, kvaddr, kmap_pte - idx); + pte_clear(&init_mm, kvaddr, kmap_pte + idx); local_flush_tlb_kernel_range((unsigned long)kvaddr, (unsigned long)kvaddr + PAGE_SIZE); @@ -69,4 +89,5 @@ void __init kmap_init(void) /* cache the first kmap pte */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); + kmap_waitqueues_init(); } diff --git a/arch/xtensa/mm/misc.S b/arch/xtensa/mm/misc.S index 1f68558..11a01c3 100644 --- a/arch/xtensa/mm/misc.S +++ b/arch/xtensa/mm/misc.S @@ -110,41 +110,24 @@ ENTRY(__tlbtemp_mapping_start) #if (DCACHE_WAY_SIZE > PAGE_SIZE) /* - * clear_user_page (void *addr, unsigned long vaddr, struct page *page) - * a2 a3 a4 + * clear_page_alias(void *addr, unsigned long paddr) + * a2 a3 */ -ENTRY(clear_user_page) +ENTRY(clear_page_alias) entry a1, 32 - /* Mark page dirty and determine alias. */ + /* Skip setting up a temporary DTLB if not aliased low page. */ - movi a7, (1 << PG_ARCH_1) - l32i a5, a4, PAGE_FLAGS - xor a6, a2, a3 - extui a3, a3, PAGE_SHIFT, DCACHE_ALIAS_ORDER - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER - or a5, a5, a7 - slli a3, a3, PAGE_SHIFT - s32i a5, a4, PAGE_FLAGS + movi a5, PAGE_OFFSET + movi a6, 0 + beqz a3, 1f - /* Skip setting up a temporary DTLB if not aliased. */ - - beqz a6, 1f - - /* Invalidate kernel page. */ - - mov a10, a2 - call8 __invalidate_dcache_page - - /* Setup a temporary DTLB with the color of the VPN */ - - movi a4, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff - movi a5, TLBTEMP_BASE_1 # virt - add a6, a2, a4 # ppn - add a2, a5, a3 # add 'color' + /* Setup a temporary DTLB for the addr. */ + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) + mov a4, a2 wdtlb a6, a2 dsync @@ -165,62 +148,43 @@ ENTRY(clear_user_page) /* We need to invalidate the temporary idtlb entry, if any. */ -1: addi a2, a2, -PAGE_SIZE - idtlb a2 +1: idtlb a4 dsync retw -ENDPROC(clear_user_page) +ENDPROC(clear_page_alias) /* - * copy_page_user (void *to, void *from, unsigned long vaddr, struct page *page) - * a2 a3 a4 a5 + * copy_page_alias(void *to, void *from, + * a2 a3 + * unsigned long to_paddr, unsigned long from_paddr) + * a4 a5 */ -ENTRY(copy_user_page) +ENTRY(copy_page_alias) entry a1, 32 - /* Mark page dirty and determine alias for destination. */ - - movi a8, (1 << PG_ARCH_1) - l32i a9, a5, PAGE_FLAGS - xor a6, a2, a4 - xor a7, a3, a4 - extui a4, a4, PAGE_SHIFT, DCACHE_ALIAS_ORDER - extui a6, a6, PAGE_SHIFT, DCACHE_ALIAS_ORDER - extui a7, a7, PAGE_SHIFT, DCACHE_ALIAS_ORDER - or a9, a9, a8 - slli a4, a4, PAGE_SHIFT - s32i a9, a5, PAGE_FLAGS - movi a5, ((PAGE_KERNEL | _PAGE_HW_WRITE) - PAGE_OFFSET) & 0xffffffff - - beqz a6, 1f - - /* Invalidate dcache */ - - mov a10, a2 - call8 __invalidate_dcache_page + /* Skip setting up a temporary DTLB for destination if not aliased. */ - /* Setup a temporary DTLB with a matching color. */ + movi a6, 0 + movi a7, 0 + beqz a4, 1f - movi a8, TLBTEMP_BASE_1 # base - add a6, a2, a5 # ppn - add a2, a8, a4 # add 'color' + /* Setup a temporary DTLB for destination. */ + addi a6, a4, (PAGE_KERNEL | _PAGE_HW_WRITE) wdtlb a6, a2 dsync - /* Skip setting up a temporary DTLB for destination if not aliased. */ + /* Skip setting up a temporary DTLB for source if not aliased. */ -1: beqz a7, 1f +1: beqz a5, 1f - /* Setup a temporary DTLB with a matching color. */ + /* Setup a temporary DTLB for source. */ - movi a8, TLBTEMP_BASE_2 # base - add a7, a3, a5 # ppn - add a3, a8, a4 + addi a7, a5, PAGE_KERNEL addi a8, a3, 1 # way1 wdtlb a7, a8 @@ -271,7 +235,7 @@ ENTRY(copy_user_page) retw -ENDPROC(copy_user_page) +ENDPROC(copy_page_alias) #endif @@ -300,6 +264,30 @@ ENTRY(__flush_invalidate_dcache_page_alias) retw ENDPROC(__flush_invalidate_dcache_page_alias) + +/* + * void __invalidate_dcache_page_alias (addr, phys) + * a2 a3 + */ + +ENTRY(__invalidate_dcache_page_alias) + + entry sp, 16 + + movi a7, 0 # required for exception handler + addi a6, a3, (PAGE_KERNEL | _PAGE_HW_WRITE) + mov a4, a2 + wdtlb a6, a2 + dsync + + ___invalidate_dcache_page a2 a3 + + idtlb a4 + dsync + + retw + +ENDPROC(__invalidate_dcache_page_alias) #endif ENTRY(__tlbtemp_mapping_itlb) diff --git a/arch/xtensa/mm/mmu.c b/arch/xtensa/mm/mmu.c index 3429b48..abe4513 100644 --- a/arch/xtensa/mm/mmu.c +++ b/arch/xtensa/mm/mmu.c @@ -18,32 +18,38 @@ #include <asm/io.h> #if defined(CONFIG_HIGHMEM) -static void * __init init_pmd(unsigned long vaddr) +static void * __init init_pmd(unsigned long vaddr, unsigned long n_pages) { pgd_t *pgd = pgd_offset_k(vaddr); pmd_t *pmd = pmd_offset(pgd, vaddr); + pte_t *pte; + unsigned long i; - if (pmd_none(*pmd)) { - unsigned i; - pte_t *pte = alloc_bootmem_low_pages(PAGE_SIZE); + n_pages = ALIGN(n_pages, PTRS_PER_PTE); - for (i = 0; i < 1024; i++) - pte_clear(NULL, 0, pte + i); + pr_debug("%s: vaddr: 0x%08lx, n_pages: %ld\n", + __func__, vaddr, n_pages); - set_pmd(pmd, __pmd(((unsigned long)pte) & PAGE_MASK)); - BUG_ON(pte != pte_offset_kernel(pmd, 0)); - pr_debug("%s: vaddr: 0x%08lx, pmd: 0x%p, pte: 0x%p\n", - __func__, vaddr, pmd, pte); - return pte; - } else { - return pte_offset_kernel(pmd, 0); + pte = alloc_bootmem_low_pages(n_pages * sizeof(pte_t)); + + for (i = 0; i < n_pages; ++i) + pte_clear(NULL, 0, pte + i); + + for (i = 0; i < n_pages; i += PTRS_PER_PTE, ++pmd) { + pte_t *cur_pte = pte + i; + + BUG_ON(!pmd_none(*pmd)); + set_pmd(pmd, __pmd(((unsigned long)cur_pte) & PAGE_MASK)); + BUG_ON(cur_pte != pte_offset_kernel(pmd, 0)); + pr_debug("%s: pmd: 0x%p, pte: 0x%p\n", + __func__, pmd, cur_pte); } + return pte; } static void __init fixedrange_init(void) { - BUILD_BUG_ON(FIXADDR_SIZE > PMD_SIZE); - init_pmd(__fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK); + init_pmd(__fix_to_virt(0), __end_of_fixed_addresses); } #endif @@ -52,7 +58,7 @@ void __init paging_init(void) memset(swapper_pg_dir, 0, PAGE_SIZE); #ifdef CONFIG_HIGHMEM fixedrange_init(); - pkmap_page_table = init_pmd(PKMAP_BASE); + pkmap_page_table = init_pmd(PKMAP_BASE, LAST_PKMAP); kmap_init(); #endif } |