From 7e91c7df29b5e196de3dc6f086c8937973bd0b88 Mon Sep 17 00:00:00 2001 From: Stefano Stabellini Date: Tue, 7 Feb 2017 19:58:02 +0200 Subject: swiotlb-xen: implement xen_swiotlb_dma_mmap callback This function creates userspace mapping for the DMA-coherent memory. Signed-off-by: Stefano Stabellini Signed-off-by: Oleksandr Dmytryshyn Signed-off-by: Andrii Anisov Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/xen/mm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index bd62d94..cd1684e 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -198,6 +198,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = { .unmap_page = xen_swiotlb_unmap_page, .dma_supported = xen_swiotlb_dma_supported, .set_dma_mask = xen_swiotlb_set_dma_mask, + .mmap = xen_swiotlb_dma_mmap, }; int __init xen_mm_init(void) -- cgit v1.1 From 69369f52d28a34c84acb6f2a8a585e743441566a Mon Sep 17 00:00:00 2001 From: Andrii Anisov Date: Tue, 7 Feb 2017 19:58:03 +0200 Subject: swiotlb-xen: implement xen_swiotlb_get_sgtable callback Signed-off-by: Andrii Anisov Signed-off-by: Stefano Stabellini Signed-off-by: Konrad Rzeszutek Wilk --- arch/arm/xen/mm.c | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c index cd1684e..76ea48a 100644 --- a/arch/arm/xen/mm.c +++ b/arch/arm/xen/mm.c @@ -199,6 +199,7 @@ static struct dma_map_ops xen_swiotlb_dma_ops = { .dma_supported = xen_swiotlb_dma_supported, .set_dma_mask = xen_swiotlb_set_dma_mask, .mmap = xen_swiotlb_dma_mmap, + .get_sgtable = xen_swiotlb_get_sgtable, }; int __init xen_mm_init(void) -- cgit v1.1 From 179125085bd4ca70e8e028913193a93653bd12f7 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Tue, 14 Feb 2017 10:26:03 -0800 Subject: ARM: OMAP3: Fix smartreflex platform data regression Commit d9d9cec02835 ("ARM: OMAP2+: Remove legacy data from hwmod for omap3") dropped platform data that should no longer be used as we're booting with device tree. It turns out that smartreflex is still using platform data and produces the following errors during probe: smartreflex smartreflex.0: invalid resource smartreflex smartreflex.0: omap_sr_probe: ioremap fail smartreflex: probe of smartreflex.0 failed with error -22 smartreflex smartreflex.1: invalid resource smartreflex smartreflex.1: omap_sr_probe: ioremap fail smartreflex: probe of smartreflex.1 failed with error -22 Let's fix the regression by adding back the smartreflex hwmod data. The long term is to update the smartreflex driver to use device tree based probing. Fixes: d9d9cec02835 ("ARM: OMAP2+: Remove legacy data from hwmod for omap3") Reported-by: Adam Ford Tested-by: Adam Ford Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch') diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 56f917e..507ff07 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -2112,11 +2112,20 @@ static struct omap_hwmod_ocp_if omap3_l4_core__i2c3 = { }; /* L4 CORE -> SR1 interface */ +static struct omap_hwmod_addr_space omap3_sr1_addr_space[] = { + { + .pa_start = OMAP34XX_SR1_BASE, + .pa_end = OMAP34XX_SR1_BASE + SZ_1K - 1, + .flags = ADDR_TYPE_RT, + }, + { }, +}; static struct omap_hwmod_ocp_if omap34xx_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr1_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr1_addr_space, .user = OCP_USER_MPU, }; @@ -2124,15 +2133,25 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr1 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap36xx_sr1_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr1_addr_space, .user = OCP_USER_MPU, }; /* L4 CORE -> SR1 interface */ +static struct omap_hwmod_addr_space omap3_sr2_addr_space[] = { + { + .pa_start = OMAP34XX_SR2_BASE, + .pa_end = OMAP34XX_SR2_BASE + SZ_1K - 1, + .flags = ADDR_TYPE_RT, + }, + { }, +}; static struct omap_hwmod_ocp_if omap34xx_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap34xx_sr2_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr2_addr_space, .user = OCP_USER_MPU, }; @@ -2140,6 +2159,7 @@ static struct omap_hwmod_ocp_if omap36xx_l4_core__sr2 = { .master = &omap3xxx_l4_core_hwmod, .slave = &omap36xx_sr2_hwmod, .clk = "sr_l4_ick", + .addr = omap3_sr2_addr_space, .user = OCP_USER_MPU, }; -- cgit v1.1 From 448c077eeb02240c430db2a2c3bf5285a4c65d66 Mon Sep 17 00:00:00 2001 From: Matthijs van Duin Date: Thu, 16 Feb 2017 01:05:04 +0100 Subject: ARM: OMAP5 / DRA7: Fix HYP mode boot for thumb2 build 'adr' yields a data-pointer, not a function-pointer. Fixes: 999f934de195 ("ARM: omap5/dra7xx: Enable booting secondary CPU in HYP mode") Signed-off-by: Matthijs van Duin Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap-headsmp.S | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index fe36ce2..4c6f14c 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S @@ -17,6 +17,7 @@ #include #include +#include #include "omap44xx.h" @@ -66,7 +67,7 @@ wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 cmp r0, r4 bne wait_2 ldr r12, =API_HYP_ENTRY - adr r0, hyp_boot + badr r0, hyp_boot smc #0 hyp_boot: b omap_secondary_startup -- cgit v1.1 From c74fd80f2f41d05f350bb478151021f88551afe8 Mon Sep 17 00:00:00 2001 From: Dan Streetman Date: Fri, 13 Jan 2017 15:07:51 -0500 Subject: xen: do not re-use pirq number cached in pci device msi msg data Revert the main part of commit: af42b8d12f8a ("xen: fix MSI setup and teardown for PV on HVM guests") That commit introduced reading the pci device's msi message data to see if a pirq was previously configured for the device's msi/msix, and re-use that pirq. At the time, that was the correct behavior. However, a later change to Qemu caused it to call into the Xen hypervisor to unmap all pirqs for a pci device, when the pci device disables its MSI/MSIX vectors; specifically the Qemu commit: c976437c7dba9c7444fb41df45468968aaa326ad ("qemu-xen: free all the pirqs for msi/msix when driver unload") Once Qemu added this pirq unmapping, it was no longer correct for the kernel to re-use the pirq number cached in the pci device msi message data. All Qemu releases since 2.1.0 contain the patch that unmaps the pirqs when the pci device disables its MSI/MSIX vectors. This bug is causing failures to initialize multiple NVMe controllers under Xen, because the NVMe driver sets up a single MSIX vector for each controller (concurrently), and then after using that to talk to the controller for some configuration data, it disables the single MSIX vector and re-configures all the MSIX vectors it needs. So the MSIX setup code tries to re-use the cached pirq from the first vector for each controller, but the hypervisor has already given away that pirq to another controller, and its initialization fails. This is discussed in more detail at: https://lists.xen.org/archives/html/xen-devel/2017-01/msg00447.html Fixes: af42b8d12f8a ("xen: fix MSI setup and teardown for PV on HVM guests") Signed-off-by: Dan Streetman Reviewed-by: Stefano Stabellini Acked-by: Konrad Rzeszutek Wilk Signed-off-by: Boris Ostrovsky --- arch/x86/pci/xen.c | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c index e1fb269..292ab03 100644 --- a/arch/x86/pci/xen.c +++ b/arch/x86/pci/xen.c @@ -234,23 +234,14 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) return 1; for_each_pci_msi_entry(msidesc, dev) { - __pci_read_msi_msg(msidesc, &msg); - pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | - ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); - if (msg.data != XEN_PIRQ_MSI_DATA || - xen_irq_from_pirq(pirq) < 0) { - pirq = xen_allocate_pirq_msi(dev, msidesc); - if (pirq < 0) { - irq = -ENODEV; - goto error; - } - xen_msi_compose_msg(dev, pirq, &msg); - __pci_write_msi_msg(msidesc, &msg); - dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); - } else { - dev_dbg(&dev->dev, - "xen: msi already bound to pirq=%d\n", pirq); + pirq = xen_allocate_pirq_msi(dev, msidesc); + if (pirq < 0) { + irq = -ENODEV; + goto error; } + xen_msi_compose_msg(dev, pirq, &msg); + __pci_write_msi_msg(msidesc, &msg); + dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); irq = xen_bind_pirq_msi_to_irq(dev, msidesc, pirq, (type == PCI_CAP_ID_MSI) ? nvec : 1, (type == PCI_CAP_ID_MSIX) ? -- cgit v1.1 From f5432f01240ef69a391940d623b6a51768aefd65 Mon Sep 17 00:00:00 2001 From: Sekhar Nori Date: Wed, 15 Feb 2017 20:42:52 +0530 Subject: ARM: dts: am57xx-idk: tpic2810 is on I2C bus, not SPI commit 50e95b6b854c ("ARM: dts: am57xx-idk: Add Industrial output support") added the TPIC2810 device-tree node under SPI bus instead of I2C1. Fix it. Tested on AM572x IDK by driving on-board LEDs connected to TPIC2810 Fixes: 50e95b6b854c ("ARM: dts: am57xx-idk: Add Industrial output support") Signed-off-by: Sekhar Nori Acked-by: Andrew F. Davis Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am57xx-idk-common.dtsi | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/am57xx-idk-common.dtsi b/arch/arm/boot/dts/am57xx-idk-common.dtsi index 814a720..d0a55b8 100644 --- a/arch/arm/boot/dts/am57xx-idk-common.dtsi +++ b/arch/arm/boot/dts/am57xx-idk-common.dtsi @@ -311,6 +311,13 @@ /* ID & VBUS GPIOs provided in board dts */ }; }; + + tpic2810: tpic2810@60 { + compatible = "ti,tpic2810"; + reg = <0x60>; + gpio-controller; + #gpio-cells = <2>; + }; }; &mcspi3 { @@ -326,13 +333,6 @@ spi-max-frequency = <1000000>; spi-cpol; }; - - tpic2810: tpic2810@60 { - compatible = "ti,tpic2810"; - reg = <0x60>; - gpio-controller; - #gpio-cells = <2>; - }; }; &uart3 { -- cgit v1.1 From 0341735226dcfa9d3727ff001e030f879ab91ca2 Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 17 Feb 2017 06:51:17 -0800 Subject: ARM: omap2plus_defconfig: Enable INPUT_MOUSEDEV as loadable modules Otherwise mice won't be happy. Signed-off-by: Tony Lindgren --- arch/arm/configs/omap2plus_defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 195c98b..77ffccf 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -188,6 +188,7 @@ CONFIG_WL12XX=m CONFIG_WL18XX=m CONFIG_WLCORE_SPI=m CONFIG_WLCORE_SDIO=m +CONFIG_INPUT_MOUSEDEV=m CONFIG_INPUT_JOYDEV=m CONFIG_INPUT_EVDEV=m CONFIG_KEYBOARD_ATKBD=m -- cgit v1.1 From 48385896e9c53e1061f103e1271a6be9a7ea00c4 Mon Sep 17 00:00:00 2001 From: Teresa Remmet Date: Fri, 10 Feb 2017 13:29:07 +0100 Subject: ARM: dts: am335x-pcm953: Fix legacy wakeup source binding Replaced the legacy binding "gpio-key,wakeup" with "wakeup-source" as noted in the kernel documentation. Signed-off-by: Teresa Remmet Reported-by: Sudeep Holla Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-pcm-953.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/am335x-pcm-953.dtsi b/arch/arm/boot/dts/am335x-pcm-953.dtsi index 02981ea..1ec8e0d 100644 --- a/arch/arm/boot/dts/am335x-pcm-953.dtsi +++ b/arch/arm/boot/dts/am335x-pcm-953.dtsi @@ -63,14 +63,14 @@ label = "home"; linux,code = ; gpios = <&gpio3 7 GPIO_ACTIVE_HIGH>; - gpio-key,wakeup; + wakeup-source; }; button@1 { label = "menu"; linux,code = ; gpios = <&gpio3 8 GPIO_ACTIVE_HIGH>; - gpio-key,wakeup; + wakeup-source; }; }; -- cgit v1.1 From 7807e086a2d1f69cc1a57958cac04fea79fc2112 Mon Sep 17 00:00:00 2001 From: Ladislav Michl Date: Sat, 11 Feb 2017 14:02:49 +0100 Subject: ARM: OMAP2+: gpmc-onenand: propagate error on initialization failure gpmc_probe_onenand_child returns success even on gpmc_onenand_init failure. Fix that. Signed-off-by: Ladislav Michl Acked-by: Roger Quadros Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/gpmc-onenand.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c index 8633c70..2944af8 100644 --- a/arch/arm/mach-omap2/gpmc-onenand.c +++ b/arch/arm/mach-omap2/gpmc-onenand.c @@ -367,7 +367,7 @@ static int gpmc_onenand_setup(void __iomem *onenand_base, int *freq_ptr) return ret; } -void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) +int gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) { int err; struct device *dev = &gpmc_onenand_device.dev; @@ -393,15 +393,17 @@ void gpmc_onenand_init(struct omap_onenand_platform_data *_onenand_data) if (err < 0) { dev_err(dev, "Cannot request GPMC CS %d, error %d\n", gpmc_onenand_data->cs, err); - return; + return err; } gpmc_onenand_resource.end = gpmc_onenand_resource.start + ONENAND_IO_SIZE - 1; - if (platform_device_register(&gpmc_onenand_device) < 0) { + err = platform_device_register(&gpmc_onenand_device); + if (err) { dev_err(dev, "Unable to register OneNAND device\n"); gpmc_cs_free(gpmc_onenand_data->cs); - return; } + + return err; } -- cgit v1.1 From ac28e47ccc3ff8dabce1aec6b224760c3e524044 Mon Sep 17 00:00:00 2001 From: Ladislav Michl Date: Tue, 21 Feb 2017 10:44:45 +0100 Subject: ARM: OMAP2+: Remove legacy gpmc-nand.c This code is no longer used and can be removed as we are using device tree. Removing this code also removes a dependency between drivers/mtd and arch/arm/mach-omap2 making furhter driver changes easier. Signed-off-by: Ladislav Michl [tony@atomide.com: removed from header too, updated comments] Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/Makefile | 3 - arch/arm/mach-omap2/gpmc-nand.c | 154 ---------------------------------------- 2 files changed, 157 deletions(-) delete mode 100644 arch/arm/mach-omap2/gpmc-nand.c (limited to 'arch') diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 093458b..c89757a 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile @@ -241,6 +241,3 @@ obj-$(CONFIG_MACH_OMAP2_TUSB6010) += usb-tusb6010.o onenand-$(CONFIG_MTD_ONENAND_OMAP2) := gpmc-onenand.o obj-y += $(onenand-m) $(onenand-y) - -nand-$(CONFIG_MTD_NAND_OMAP2) := gpmc-nand.o -obj-y += $(nand-m) $(nand-y) diff --git a/arch/arm/mach-omap2/gpmc-nand.c b/arch/arm/mach-omap2/gpmc-nand.c deleted file mode 100644 index f6ac027..0000000 --- a/arch/arm/mach-omap2/gpmc-nand.c +++ /dev/null @@ -1,154 +0,0 @@ -/* - * gpmc-nand.c - * - * Copyright (C) 2009 Texas Instruments - * Vimal Singh - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -#include -#include -#include -#include -#include -#include - -#include - -#include "soc.h" - -/* minimum size for IO mapping */ -#define NAND_IO_SIZE 4 - -static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt) -{ - /* platforms which support all ECC schemes */ - if (soc_is_am33xx() || soc_is_am43xx() || cpu_is_omap44xx() || - soc_is_omap54xx() || soc_is_dra7xx()) - return 1; - - if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW || - ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) { - if (cpu_is_omap24xx()) - return 0; - else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0)) - return 0; - else - return 1; - } - - /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes - * which require H/W based ECC error detection */ - if ((cpu_is_omap34xx() || cpu_is_omap3630()) && - ((ecc_opt == OMAP_ECC_BCH4_CODE_HW) || - (ecc_opt == OMAP_ECC_BCH8_CODE_HW))) - return 0; - - /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */ - if (ecc_opt == OMAP_ECC_HAM1_CODE_HW || - ecc_opt == OMAP_ECC_HAM1_CODE_SW) - return 1; - else - return 0; -} - -/* This function will go away once the device-tree convertion is complete */ -static void gpmc_set_legacy(struct omap_nand_platform_data *gpmc_nand_data, - struct gpmc_settings *s) -{ - /* Enable RD PIN Monitoring Reg */ - if (gpmc_nand_data->dev_ready) { - s->wait_on_read = true; - s->wait_on_write = true; - } - - if (gpmc_nand_data->devsize == NAND_BUSWIDTH_16) - s->device_width = GPMC_DEVWIDTH_16BIT; - else - s->device_width = GPMC_DEVWIDTH_8BIT; -} - -int gpmc_nand_init(struct omap_nand_platform_data *gpmc_nand_data, - struct gpmc_timings *gpmc_t) -{ - int err = 0; - struct gpmc_settings s; - struct platform_device *pdev; - struct resource gpmc_nand_res[] = { - { .flags = IORESOURCE_MEM, }, - { .flags = IORESOURCE_IRQ, }, - { .flags = IORESOURCE_IRQ, }, - }; - - BUG_ON(gpmc_nand_data->cs >= GPMC_CS_NUM); - - err = gpmc_cs_request(gpmc_nand_data->cs, NAND_IO_SIZE, - (unsigned long *)&gpmc_nand_res[0].start); - if (err < 0) { - pr_err("omap2-gpmc: Cannot request GPMC CS %d, error %d\n", - gpmc_nand_data->cs, err); - return err; - } - gpmc_nand_res[0].end = gpmc_nand_res[0].start + NAND_IO_SIZE - 1; - gpmc_nand_res[1].start = gpmc_get_client_irq(GPMC_IRQ_FIFOEVENTENABLE); - gpmc_nand_res[2].start = gpmc_get_client_irq(GPMC_IRQ_COUNT_EVENT); - - memset(&s, 0, sizeof(struct gpmc_settings)); - gpmc_set_legacy(gpmc_nand_data, &s); - - s.device_nand = true; - - if (gpmc_t) { - err = gpmc_cs_set_timings(gpmc_nand_data->cs, gpmc_t, &s); - if (err < 0) { - pr_err("omap2-gpmc: Unable to set gpmc timings: %d\n", - err); - return err; - } - } - - err = gpmc_cs_program_settings(gpmc_nand_data->cs, &s); - if (err < 0) - goto out_free_cs; - - err = gpmc_configure(GPMC_CONFIG_WP, 0); - if (err < 0) - goto out_free_cs; - - if (!gpmc_hwecc_bch_capable(gpmc_nand_data->ecc_opt)) { - pr_err("omap2-nand: Unsupported NAND ECC scheme selected\n"); - err = -EINVAL; - goto out_free_cs; - } - - - pdev = platform_device_alloc("omap2-nand", gpmc_nand_data->cs); - if (pdev) { - err = platform_device_add_resources(pdev, gpmc_nand_res, - ARRAY_SIZE(gpmc_nand_res)); - if (!err) - pdev->dev.platform_data = gpmc_nand_data; - } else { - err = -ENOMEM; - } - if (err) - goto out_free_pdev; - - err = platform_device_add(pdev); - if (err) { - dev_err(&pdev->dev, "Unable to register NAND device\n"); - goto out_free_pdev; - } - - return 0; - -out_free_pdev: - platform_device_put(pdev); -out_free_cs: - gpmc_cs_free(gpmc_nand_data->cs); - - return err; -} -- cgit v1.1 From 75013fb16f8484898eaa8d0b08fed942d790f029 Mon Sep 17 00:00:00 2001 From: Masami Hiramatsu Date: Wed, 1 Mar 2017 01:23:24 +0900 Subject: kprobes/x86: Fix kernel panic when certain exception-handling addresses are probed Fix to the exception table entry check by using probed address instead of the address of copied instruction. This bug may cause unexpected kernel panic if user probe an address where an exception can happen which should be fixup by __ex_table (e.g. copy_from_user.) Unless user puts a kprobe on such address, this doesn't cause any problem. This bug has been introduced years ago, by commit: 464846888d9a ("x86/kprobes: Fix a bug which can modify kernel code permanently"). Signed-off-by: Masami Hiramatsu Cc: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 464846888d9a ("x86/kprobes: Fix a bug which can modify kernel code permanently") Link: http://lkml.kernel.org/r/148829899399.28855.12581062400757221722.stgit@devbox Signed-off-by: Ingo Molnar --- arch/x86/kernel/kprobes/common.h | 2 +- arch/x86/kernel/kprobes/core.c | 6 +++--- arch/x86/kernel/kprobes/opt.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/kprobes/common.h b/arch/x86/kernel/kprobes/common.h index c6ee63f..d688826 100644 --- a/arch/x86/kernel/kprobes/common.h +++ b/arch/x86/kernel/kprobes/common.h @@ -67,7 +67,7 @@ #endif /* Ensure if the instruction can be boostable */ -extern int can_boost(kprobe_opcode_t *instruction); +extern int can_boost(kprobe_opcode_t *instruction, void *addr); /* Recover instruction if given address is probed */ extern unsigned long recover_probed_instruction(kprobe_opcode_t *buf, unsigned long addr); diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index 520b8df..88b3c94 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -166,12 +166,12 @@ NOKPROBE_SYMBOL(skip_prefixes); * Returns non-zero if opcode is boostable. * RIP relative instructions are adjusted at copying time in 64 bits mode */ -int can_boost(kprobe_opcode_t *opcodes) +int can_boost(kprobe_opcode_t *opcodes, void *addr) { kprobe_opcode_t opcode; kprobe_opcode_t *orig_opcodes = opcodes; - if (search_exception_tables((unsigned long)opcodes)) + if (search_exception_tables((unsigned long)addr)) return 0; /* Page fault may occur on this address. */ retry: @@ -416,7 +416,7 @@ static int arch_copy_kprobe(struct kprobe *p) * __copy_instruction can modify the displacement of the instruction, * but it doesn't affect boostable check. */ - if (can_boost(p->ainsn.insn)) + if (can_boost(p->ainsn.insn, p->addr)) p->ainsn.boostable = 0; else p->ainsn.boostable = -1; diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c index 3d1bee9..3e7c6e5 100644 --- a/arch/x86/kernel/kprobes/opt.c +++ b/arch/x86/kernel/kprobes/opt.c @@ -178,7 +178,7 @@ static int copy_optimized_instructions(u8 *dest, u8 *src) while (len < RELATIVEJUMP_SIZE) { ret = __copy_instruction(dest + len, src + len); - if (!ret || !can_boost(dest + len)) + if (!ret || !can_boost(dest + len, src + len)) return -EINVAL; len += ret; } -- cgit v1.1 From 10bce8410607a18eb3adf5d2739db8c8593e110d Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Mon, 27 Feb 2017 23:50:58 +0100 Subject: x86/kdebugfs: Move boot params hierarchy under (debugfs)/x86/ ... since this is all x86-specific data and it makes sense to have it under x86/ logically instead in the toplevel debugfs dir. Signed-off-by: Borislav Petkov Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Link: http://lkml.kernel.org/r/20170227225058.27289-1-bp@alien8.de Signed-off-by: Ingo Molnar --- arch/x86/kernel/kdebugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c index bdb83e4..38b6458 100644 --- a/arch/x86/kernel/kdebugfs.c +++ b/arch/x86/kernel/kdebugfs.c @@ -167,7 +167,7 @@ static int __init boot_params_kdebugfs_init(void) struct dentry *dbp, *version, *data; int error = -ENOMEM; - dbp = debugfs_create_dir("boot_params", NULL); + dbp = debugfs_create_dir("boot_params", arch_debugfs_dir); if (!dbp) return -ENOMEM; -- cgit v1.1 From e7c95effcd3a7a0c9535c809141ca499fede2c31 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 28 Feb 2017 07:05:59 +0100 Subject: s390/crypt: fix missing unlock in ctr_paes_crypt on error path The ctr mode of protected key aes uses the ctrblk page if the ctrblk_lock could be acquired. If the protected key has to be reestablished and this operation fails the unlock for the ctrblk_lock is missing. Add it. Signed-off-by: Martin Schwidefsky --- arch/s390/crypto/paes_s390.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/crypto/paes_s390.c b/arch/s390/crypto/paes_s390.c index d69ea49..716b172 100644 --- a/arch/s390/crypto/paes_s390.c +++ b/arch/s390/crypto/paes_s390.c @@ -474,8 +474,11 @@ static int ctr_paes_crypt(struct blkcipher_desc *desc, unsigned long modifier, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (k < n) { - if (__ctr_paes_set_key(ctx) != 0) + if (__ctr_paes_set_key(ctx) != 0) { + if (locked) + spin_unlock(&ctrblk_lock); return blkcipher_walk_done(desc, walk, -EIO); + } } } if (locked) -- cgit v1.1 From d9fcf2a1cbd1ff65d0109b1b400938808007fcd5 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Tue, 28 Feb 2017 07:42:01 +0100 Subject: s390: fix in-kernel program checks A program check inside the kernel takes a slightly different path in entry.S compare to a normal user fault. A recent change moved the store of the breaking event address into the path taken for in-kernel program checks as well, but %r14 has not been setup to point to the correct location. A wild store is the consequence. Move the store of the breaking event address to the code path for user space faults. Fixes: 34525e1f7e8d ("s390: store breaking event address only for program checks") Reported-by: Michael Holzheu Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/entry.S | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S index dff2152..6a7d737 100644 --- a/arch/s390/kernel/entry.S +++ b/arch/s390/kernel/entry.S @@ -490,7 +490,7 @@ ENTRY(pgm_check_handler) jnz .Lpgm_svcper # -> single stepped svc 1: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) - j 3f + j 4f 2: UPDATE_VTIME %r14,%r15,__LC_SYNC_ENTER_TIMER lg %r15,__LC_KERNEL_STACK lgr %r14,%r12 @@ -499,8 +499,8 @@ ENTRY(pgm_check_handler) tm __LC_PGM_ILC+2,0x02 # check for transaction abort jz 3f mvc __THREAD_trap_tdb(256,%r14),0(%r13) -3: la %r11,STACK_FRAME_OVERHEAD(%r15) - stg %r10,__THREAD_last_break(%r14) +3: stg %r10,__THREAD_last_break(%r14) +4: la %r11,STACK_FRAME_OVERHEAD(%r15) stmg %r0,%r7,__PT_R0(%r11) mvc __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC stmg %r8,%r9,__PT_PSW(%r11) @@ -509,14 +509,14 @@ ENTRY(pgm_check_handler) xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) stg %r10,__PT_ARGS(%r11) tm __LC_PGM_ILC+3,0x80 # check for per exception - jz 4f + jz 5f tmhh %r8,0x0001 # kernel per event ? jz .Lpgm_kprobe oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE mvc __THREAD_per_paid(1,%r14),__LC_PER_ACCESS_ID -4: REENABLE_IRQS +5: REENABLE_IRQS xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) larl %r1,pgm_check_table llgh %r10,__PT_INT_CODE+2(%r11) -- cgit v1.1 From e69ca822ce0ed3ba006ce384d7d205c81d92373f Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 1 Mar 2017 09:16:03 +0100 Subject: s390/cputime: remove last traces of cputime_t The cputime_t type is a thing of the past, replace the last occurences of the type in the s390 code with a simple u64. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cputime.h | 14 ++------------ arch/s390/kernel/vtime.c | 2 +- 2 files changed, 3 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index d1c407d..e5672d6 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -8,32 +8,22 @@ #define _S390_CPUTIME_H #include -#include #define CPUTIME_PER_USEC 4096ULL #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) /* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ -typedef unsigned long long __nocast cputime_t; -typedef unsigned long long __nocast cputime64_t; - #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) -static inline unsigned long __div(unsigned long long n, unsigned long base) -{ - return n / base; -} - /* * Convert cputime to microseconds and back. */ -static inline unsigned int cputime_to_usecs(const cputime_t cputime) +static inline u64 cputime_to_usecs(const u64 cputime) { - return (__force unsigned long long) cputime >> 12; + return cputime >> 12; } - u64 arch_cpu_idle_time(int cpu); #define arch_idle_time(cpu) arch_cpu_idle_time(cpu) diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 31bd96e..8f5f59a 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -111,7 +111,7 @@ static inline u64 scale_vtime(u64 vtime) } static void account_system_index_scaled(struct task_struct *p, - cputime_t cputime, cputime_t scaled, + u64 cputime, u64 scaled, enum cpu_usage_stat index) { p->stimescaled += cputime_to_nsecs(scaled); -- cgit v1.1 From 3c915bdc1775acfa214195da1ffb39dabdd1a389 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 1 Mar 2017 09:18:34 +0100 Subject: s390/cputime: reset all accounting fields on fork copy_thread has to reset all cputime related field in the task struct, not only user_timer and system_timer. Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/process.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c index 5428166..249deaf 100644 --- a/arch/s390/kernel/process.c +++ b/arch/s390/kernel/process.c @@ -121,7 +121,10 @@ int copy_thread_tls(unsigned long clone_flags, unsigned long new_stackp, clear_tsk_thread_flag(p, TIF_SINGLE_STEP); /* Initialize per thread user and system timer values */ p->thread.user_timer = 0; + p->thread.guest_timer = 0; p->thread.system_timer = 0; + p->thread.hardirq_timer = 0; + p->thread.softirq_timer = 0; frame->sf.back_chain = 0; /* new return point is ret_from_fork */ -- cgit v1.1 From e53051e757d6cd66741955b93581e54415e48a70 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 1 Mar 2017 09:21:10 +0100 Subject: s390/cputime: provide archicture specific cputime_to_nsecs The generic cputime_to_nsecs function first converts the cputime to micro-seconds and then multiplies the result with 1000. This looses some bits of accuracy, provide our own version of cputime_to_nsecs that does not loose precision. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/cputime.h | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h index e5672d6..9072bf6 100644 --- a/arch/s390/include/asm/cputime.h +++ b/arch/s390/include/asm/cputime.h @@ -8,6 +8,7 @@ #define _S390_CPUTIME_H #include +#include #define CPUTIME_PER_USEC 4096ULL #define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC) @@ -17,13 +18,18 @@ #define cmpxchg_cputime(ptr, old, new) cmpxchg64(ptr, old, new) /* - * Convert cputime to microseconds and back. + * Convert cputime to microseconds. */ static inline u64 cputime_to_usecs(const u64 cputime) { return cputime >> 12; } +/* + * Convert cputime to nanoseconds. + */ +#define cputime_to_nsecs(cputime) tod_to_ns(cputime) + u64 arch_cpu_idle_time(int cpu); #define arch_idle_time(cpu) arch_cpu_idle_time(cpu) -- cgit v1.1 From d03bd0454b101adb94d0b5a9cc11396182943cb4 Mon Sep 17 00:00:00 2001 From: Martin Schwidefsky Date: Wed, 1 Mar 2017 09:47:57 +0100 Subject: s390/timex: micro optimization for tod_to_ns The conversion of a TOD value to nano-seconds currently uses a 32/32 bit split with the calculation for "nsecs = (TOD * 125) >> 9". Using a 55/9 bit split saves an instruction. Signed-off-by: Martin Schwidefsky --- arch/s390/include/asm/timex.h | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 354344d..1185351 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -206,20 +206,16 @@ static inline unsigned long long get_tod_clock_monotonic(void) * ns = (todval * 125) >> 9; * * In order to avoid an overflow with the multiplication we can rewrite this. - * With a split todval == 2^32 * th + tl (th upper 32 bits, tl lower 32 bits) + * With a split todval == 2^9 * th + tl (th upper 55 bits, tl lower 9 bits) * we end up with * - * ns = ((2^32 * th + tl) * 125 ) >> 9; - * -> ns = (2^23 * th * 125) + ((tl * 125) >> 9); + * ns = ((2^9 * th + tl) * 125 ) >> 9; + * -> ns = (th * 125) + ((tl * 125) >> 9); * */ static inline unsigned long long tod_to_ns(unsigned long long todval) { - unsigned long long ns; - - ns = ((todval >> 32) << 23) * 125; - ns += ((todval & 0xffffffff) * 125) >> 9; - return ns; + return ((todval >> 9) * 125) + (((todval & 0x1ff) * 125) >> 9); } #endif -- cgit v1.1 From bb3f0a52630c84807fca9bdd76ac2f5dcec82689 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Tue, 28 Feb 2017 13:50:52 +0800 Subject: x86/apic: Fix a warning message in logical CPU IDs allocation The current warning message in allocate_logical_cpuid() is somewhat confusing: Only 1 processors supported.Processor 2/0x2 and the rest are ignored. As it might imply that there's only one CPU in the system - while what we ran into here is a kernel limitation. Fix the warning message to clarify all that: APIC: NR_CPUS/possible_cpus limit of 2 reached. Processor 2/0x2 and the rest are ignored. ( Also update the error return from -1 to -EINVAL, which is the more canonical return value. ) Signed-off-by: Dou Liyang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bp@alien8.de Cc: nicstange@gmail.com Cc: wanpeng.li@hotmail.com Link: http://lkml.kernel.org/r/1488261052-25753-1-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/apic.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 4261b32..11088b8 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2062,10 +2062,10 @@ static int allocate_logical_cpuid(int apicid) /* Allocate a new cpuid. */ if (nr_logical_cpuids >= nr_cpu_ids) { - WARN_ONCE(1, "Only %d processors supported." + WARN_ONCE(1, "APIC: NR_CPUS/possible_cpus limit of %i reached. " "Processor %d/0x%x and the rest are ignored.\n", - nr_cpu_ids - 1, nr_logical_cpuids, apicid); - return -1; + nr_cpu_ids, nr_logical_cpuids, apicid); + return -EINVAL; } cpuid_to_apicid[nr_logical_cpuids] = apicid; -- cgit v1.1 From 11277aabcbbe13916151af897d29a5e9f71ca73f Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Thu, 23 Feb 2017 17:16:41 +0800 Subject: x86/apic: Simplify enable_IR_x2apic(), remove try_to_enable_IR() The following commit: 2e63ad4bd5dd ("x86/apic: Do not init irq remapping if ioapic is disabled") ... added a check for skipped IO-APIC setup to enable_IR_x2apic(), but this check is also duplicated in try_to_enable_IR() - and it will never succeed in calling irq_remapping_enable(). Remove the whole irq_remapping_enable() complication: if the IO-APIC is disabled we cannot enable IRQ remapping. Signed-off-by: Dou Liyang Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: bp@alien8.de Cc: nicstange@gmail.com Cc: wanpeng.li@hotmail.com Link: http://lkml.kernel.org/r/1487841401-1543-1-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Ingo Molnar --- arch/x86/kernel/apic/apic.c | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 11088b8..aee7ded 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -1610,24 +1610,15 @@ static inline void try_to_enable_x2apic(int remap_mode) { } static inline void __x2apic_enable(void) { } #endif /* !CONFIG_X86_X2APIC */ -static int __init try_to_enable_IR(void) -{ -#ifdef CONFIG_X86_IO_APIC - if (!x2apic_enabled() && skip_ioapic_setup) { - pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); - return -1; - } -#endif - return irq_remapping_enable(); -} - void __init enable_IR_x2apic(void) { unsigned long flags; int ret, ir_stat; - if (skip_ioapic_setup) + if (skip_ioapic_setup) { + pr_info("Not enabling interrupt remapping due to skipped IO-APIC setup\n"); return; + } ir_stat = irq_remapping_prepare(); if (ir_stat < 0 && !x2apic_supported()) @@ -1645,7 +1636,7 @@ void __init enable_IR_x2apic(void) /* If irq_remapping_prepare() succeeded, try to enable it */ if (ir_stat >= 0) - ir_stat = try_to_enable_IR(); + ir_stat = irq_remapping_enable(); /* ir_stat contains the remap mode or an error code */ try_to_enable_x2apic(ir_stat); -- cgit v1.1 From 6b0b7551428e4caae1e2c023a529465a9a9ae2d4 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Thu, 16 Feb 2017 17:00:50 +1100 Subject: perf/core: Rename CONFIG_[UK]PROBE_EVENT to CONFIG_[UK]PROBE_EVENTS We have uses of CONFIG_UPROBE_EVENT and CONFIG_KPROBE_EVENT as well as CONFIG_UPROBE_EVENTS and CONFIG_KPROBE_EVENTS. Consistently use the plurals. Signed-off-by: Anton Blanchard Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: acme@kernel.org Cc: alexander.shishkin@linux.intel.com Cc: davem@davemloft.net Cc: sparclinux@vger.kernel.org Link: http://lkml.kernel.org/r/20170216060050.20866-1-anton@ozlabs.org Signed-off-by: Ingo Molnar --- arch/powerpc/configs/85xx/kmp204x_defconfig | 2 +- arch/s390/configs/default_defconfig | 2 +- arch/s390/configs/gcov_defconfig | 2 +- arch/s390/configs/performance_defconfig | 2 +- arch/s390/defconfig | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/configs/85xx/kmp204x_defconfig b/arch/powerpc/configs/85xx/kmp204x_defconfig index aaaaa60..34a4da2 100644 --- a/arch/powerpc/configs/85xx/kmp204x_defconfig +++ b/arch/powerpc/configs/85xx/kmp204x_defconfig @@ -210,7 +210,7 @@ CONFIG_DEBUG_SHIRQ=y CONFIG_DETECT_HUNG_TASK=y CONFIG_SCHEDSTATS=y CONFIG_RCU_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_CRYPTO_NULL=y CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_MD4=y diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig index 143b1e0..4b176fe 100644 --- a/arch/s390/configs/default_defconfig +++ b/arch/s390/configs/default_defconfig @@ -609,7 +609,7 @@ CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig index f05d2d6..0de46cc 100644 --- a/arch/s390/configs/gcov_defconfig +++ b/arch/s390/configs/gcov_defconfig @@ -560,7 +560,7 @@ CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig index 2358bf3..e167557 100644 --- a/arch/s390/configs/performance_defconfig +++ b/arch/s390/configs/performance_defconfig @@ -558,7 +558,7 @@ CONFIG_SCHED_TRACER=y CONFIG_FTRACE_SYSCALLS=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_HIST_TRIGGERS=y CONFIG_TRACE_ENUM_MAP_FILE=y diff --git a/arch/s390/defconfig b/arch/s390/defconfig index 68bfd09..97189db 100644 --- a/arch/s390/defconfig +++ b/arch/s390/defconfig @@ -179,7 +179,7 @@ CONFIG_FTRACE_SYSCALLS=y CONFIG_TRACER_SNAPSHOT_PER_CPU_SWAP=y CONFIG_STACK_TRACER=y CONFIG_BLK_DEV_IO_TRACE=y -CONFIG_UPROBE_EVENT=y +CONFIG_UPROBE_EVENTS=y CONFIG_FUNCTION_PROFILER=y CONFIG_TRACE_ENUM_MAP_FILE=y CONFIG_KPROBES_SANITY_TEST=y -- cgit v1.1 From 1b17c6df852851b40c3c27c66b8fa2fd99cf25d8 Mon Sep 17 00:00:00 2001 From: Andrew Banman Date: Fri, 17 Feb 2017 11:07:49 -0600 Subject: x86/platform/uv/BAU: Fix HUB errors by remove initial write to sw-ack register Writing to the software acknowledge clear register when there are no pending messages causes a HUB error to assert. The original intent of this write was to clear the pending bits before start of operation, but this is an incorrect method and has been determined to be unnecessary. Signed-off-by: Andrew Banman Acked-by: Mike Travis Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: akpm@linux-foundation.org Cc: rja@hpe.com Cc: sivanich@hpe.com Link: http://lkml.kernel.org/r/1487351269-181133-1-git-send-email-abanman@hpe.com Signed-off-by: Ingo Molnar --- arch/x86/platform/uv/tlb_uv.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c index 766d4d3..f25982c 100644 --- a/arch/x86/platform/uv/tlb_uv.c +++ b/arch/x86/platform/uv/tlb_uv.c @@ -1847,7 +1847,6 @@ static void pq_init(int node, int pnode) ops.write_payload_first(pnode, first); ops.write_payload_last(pnode, last); - ops.write_g_sw_ack(pnode, 0xffffUL); /* in effect, all msg_type's are set to MSG_NOOP */ memset(pqp, 0, sizeof(struct bau_pq_entry) * DEST_Q_SIZE); -- cgit v1.1 From 90b28ded88dda8bea82b4a86923e73ba0746d884 Mon Sep 17 00:00:00 2001 From: Matjaz Hegedic Date: Sun, 19 Feb 2017 19:32:48 +0100 Subject: x86/reboot/quirks: Add ASUS EeeBook X205TA reboot quirk Without the parameter reboot=a, ASUS EeeBook X205TA will hang when it should reboot. This adds the appropriate quirk, thus fixing the problem. Signed-off-by: Matjaz Hegedic Cc: Andy Lutomirski Cc: Borislav Petkov Cc: Brian Gerst Cc: Denys Vlasenko Cc: H. Peter Anvin Cc: Josh Poimboeuf Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Signed-off-by: Ingo Molnar --- arch/x86/kernel/reboot.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index e244c19..01c9cda 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -223,6 +223,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_BOARD_NAME, "P4S800"), }, }, + { /* Handle problems with rebooting on ASUS EeeBook X205TA */ + .callback = set_acpi_reboot, + .ident = "ASUS EeeBook X205TA", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"), + }, + }, /* Certec */ { /* Handle problems with rebooting on Certec BPC600 */ -- cgit v1.1 From 73667e31a153a66da97feb1584726222504924f8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Feb 2017 22:17:17 +0100 Subject: x86/hyperv: Hide unused label This new 32-bit warning just showed up: arch/x86/hyperv/hv_init.c: In function 'hyperv_init': arch/x86/hyperv/hv_init.c:167:1: error: label 'register_msr_cs' defined but not used [-Werror=unused-label] The easiest solution is to move the label up into the existing #ifdef that has the goto. Fixes: dee863b571b0 ("hv: export current Hyper-V clocksource") Signed-off-by: Arnd Bergmann Acked-by: Stephen Hemminger Cc: Greg Kroah-Hartman Cc: Haiyang Zhang Cc: devel@linuxdriverproject.org Cc: Vitaly Kuznetsov Cc: "K. Y. Srinivasan" Link: http://lkml.kernel.org/r/20170214211736.2641241-1-arnd@arndb.de Signed-off-by: Thomas Gleixner --- arch/x86/hyperv/hv_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/hyperv/hv_init.c b/arch/x86/hyperv/hv_init.c index db64baf0..8bef70e 100644 --- a/arch/x86/hyperv/hv_init.c +++ b/arch/x86/hyperv/hv_init.c @@ -158,13 +158,13 @@ void hyperv_init(void) clocksource_register_hz(&hyperv_cs_tsc, NSEC_PER_SEC/100); return; } +register_msr_cs: #endif /* * For 32 bit guests just use the MSR based mechanism for reading * the partition counter. */ -register_msr_cs: hyperv_cs = &hyperv_cs_msr; if (ms_hyperv.features & HV_X64_MSR_TIME_REF_COUNT_AVAILABLE) clocksource_register_hz(&hyperv_cs_msr, NSEC_PER_SEC/100); -- cgit v1.1 From aa5ec3f715d576353c7d8f4f8085634bd845b73f Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Mon, 27 Feb 2017 21:29:22 +0900 Subject: x86/vmware: Remove duplicate inclusion of asm/timer.h Signed-off-by: Masanari Iida Cc: akataria@vmware.com Cc: virtualization@lists.linux-foundation.org Link: http://lkml.kernel.org/r/20170227122922.26230-1-standby24x7@gmail.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/vmware.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c index 891f4da..22403a2 100644 --- a/arch/x86/kernel/cpu/vmware.c +++ b/arch/x86/kernel/cpu/vmware.c @@ -30,7 +30,6 @@ #include #include #include -#include #undef pr_fmt #define pr_fmt(fmt) "vmware: " fmt -- cgit v1.1 From e86a2d2d34e20289ae7d46692e16d43c3a785db9 Mon Sep 17 00:00:00 2001 From: Masanari Iida Date: Mon, 27 Feb 2017 22:07:03 +0900 Subject: x86/intel_rdt: Remove duplicate inclusion of linux/cpu.h Signed-off-by: Masanari Iida Cc: fenghua.yu@intel.com Link: http://lkml.kernel.org/r/20170227130703.26968-1-standby24x7@gmail.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 1 - 1 file changed, 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index 8af04afd..759577d 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -27,7 +27,6 @@ #include #include #include -#include #include #include -- cgit v1.1 From 153654dbe595a68845ba14d5b0bfe299fa6a7e99 Mon Sep 17 00:00:00 2001 From: Rui Wang Date: Tue, 28 Feb 2017 21:34:28 +0800 Subject: x86/PCI: Implement pcibios_release_device to release IRQ from IOAPIC The revert of 991de2e59090 ("PCI, x86: Implement pcibios_alloc_irq() and pcibios_free_irq()") causes a problem for IOAPIC hotplug. The problem is that IRQs are allocated and freed in pci_enable_device() and pci_disable_device(). But there are some drivers which don't call pci_disable_device(), and they have good reasons not calling it, so if they're using IOAPIC their IRQs won't have a chance to be released from the IOAPIC. When this happens IOAPIC hot-removal fails with a kernel stack dump and an error message like this: [149335.697989] pin16 on IOAPIC2 is still in use. It turns out that we can fix it in a different way without moving IRQ allocation into pcibios_alloc_irq(), thus avoiding the regression of 991de2e59090. We can keep the allocation and freeing of IRQs as is within pci_enable_device()/pci_disable_device(), without breaking any previous assumption of the rest of the system, keeping compatibility with both the legacy and the modern drivers. We can accomplish this by implementing the existing __weak hook of pcibios_release_device() thus when a pci device is about to be deleted we get notified in the hook and take the chance to release its IRQ, if any, from the IOAPIC. Implement pcibios_release_device() for x86 to release any IRQ not released by the driver. Signed-off-by: Rui Wang Cc: tony.luck@intel.com Cc: linux-pci@vger.kernel.org Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: fengguang.wu@intel.com Cc: helgaas@kernel.org Cc: kbuild-all@01.org Cc: bhelgaas@google.com Link: http://lkml.kernel.org/r/1488288869-31290-2-git-send-email-rui.y.wang@intel.com Signed-off-by: Thomas Gleixner --- arch/x86/pci/common.c | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'arch') diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c index 0cb52ae..190e718 100644 --- a/arch/x86/pci/common.c +++ b/arch/x86/pci/common.c @@ -735,6 +735,15 @@ void pcibios_disable_device (struct pci_dev *dev) pcibios_disable_irq(dev); } +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +void pcibios_release_device(struct pci_dev *dev) +{ + if (atomic_dec_return(&dev->enable_cnt) >= 0) + pcibios_disable_device(dev); + +} +#endif + int pci_ext_cfg_avail(void) { if (raw_pci_ext_ops) -- cgit v1.1 From 58ab9a088ddac4efe823471275859d64f735577e Mon Sep 17 00:00:00 2001 From: Dave Hansen Date: Thu, 23 Feb 2017 14:26:03 -0800 Subject: x86/pkeys: Check against max pkey to avoid overflows Kirill reported a warning from UBSAN about undefined behavior when using protection keys. He is running on hardware that actually has support for it, which is not widely available. The warning triggers because of very large shifts of integers when doing a pkey_free() of a large, invalid value. This happens because we never check that the pkey "fits" into the mm_pkey_allocation_map(). I do not believe there is any danger here of anything bad happening other than some aliasing issues where somebody could do: pkey_free(35); and the kernel would effectively execute: pkey_free(8); While this might be confusing to an app that was doing something stupid, it has to do something stupid and the effects are limited to the app shooting itself in the foot. Signed-off-by: Dave Hansen Cc: stable@vger.kernel.org Cc: linux-kselftest@vger.kernel.org Cc: shuah@kernel.org Cc: kirill.shutemov@linux.intel.com Link: http://lkml.kernel.org/r/20170223222603.A022ED65@viggo.jf.intel.com Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/pkeys.h | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h index 34684ad..b3b09b9 100644 --- a/arch/x86/include/asm/pkeys.h +++ b/arch/x86/include/asm/pkeys.h @@ -46,6 +46,15 @@ extern int __arch_set_user_pkey_access(struct task_struct *tsk, int pkey, static inline bool mm_pkey_is_allocated(struct mm_struct *mm, int pkey) { + /* + * "Allocated" pkeys are those that have been returned + * from pkey_alloc(). pkey 0 is special, and never + * returned from pkey_alloc(). + */ + if (pkey <= 0) + return false; + if (pkey >= arch_max_pkey()) + return false; return mm_pkey_allocation_map(mm) & (1U << pkey); } @@ -82,12 +91,6 @@ int mm_pkey_alloc(struct mm_struct *mm) static inline int mm_pkey_free(struct mm_struct *mm, int pkey) { - /* - * pkey 0 is special, always allocated and can never - * be freed. - */ - if (!pkey) - return -EINVAL; if (!mm_pkey_is_allocated(mm, pkey)) return -EINVAL; -- cgit v1.1 From 940b2f2fd963c043418ce8af64605783b2b19140 Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Sat, 18 Feb 2017 12:31:40 +0100 Subject: x86/events: Remove last remnants of old filenames Update to the new file paths, remove them from introductory comments. Signed-off-by: Borislav Petkov Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20170218113140.8051-1-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/events/amd/core.c | 2 +- arch/x86/events/intel/cstate.c | 2 +- arch/x86/events/intel/rapl.c | 2 +- arch/x86/events/intel/uncore.h | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c index afb222b..c84584b 100644 --- a/arch/x86/events/amd/core.c +++ b/arch/x86/events/amd/core.c @@ -604,7 +604,7 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, int idx, return &amd_f15_PMC20; } case AMD_EVENT_NB: - /* moved to perf_event_amd_uncore.c */ + /* moved to uncore.c */ return &emptyconstraint; default: return &emptyconstraint; diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c index aff4b5b..238ae32 100644 --- a/arch/x86/events/intel/cstate.c +++ b/arch/x86/events/intel/cstate.c @@ -1,5 +1,5 @@ /* - * perf_event_intel_cstate.c: support cstate residency counters + * Support cstate residency counters * * Copyright (C) 2015, Intel Corp. * Author: Kan Liang (kan.liang@intel.com) diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c index 22054ca..9d05c7e 100644 --- a/arch/x86/events/intel/rapl.c +++ b/arch/x86/events/intel/rapl.c @@ -1,5 +1,5 @@ /* - * perf_event_intel_rapl.c: support Intel RAPL energy consumption counters + * Support Intel RAPL energy consumption counters * Copyright (C) 2013 Google, Inc., Stephane Eranian * * Intel RAPL interface is specified in the IA-32 Manual Vol3b diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h index ad986c1..df5989f 100644 --- a/arch/x86/events/intel/uncore.h +++ b/arch/x86/events/intel/uncore.h @@ -360,7 +360,7 @@ extern struct list_head pci2phy_map_head; extern struct pci_extra_dev *uncore_extra_pci_dev; extern struct event_constraint uncore_constraint_empty; -/* perf_event_intel_uncore_snb.c */ +/* uncore_snb.c */ int snb_uncore_pci_init(void); int ivb_uncore_pci_init(void); int hsw_uncore_pci_init(void); @@ -371,7 +371,7 @@ void nhm_uncore_cpu_init(void); void skl_uncore_cpu_init(void); int snb_pci2phy_map_init(int devid); -/* perf_event_intel_uncore_snbep.c */ +/* uncore_snbep.c */ int snbep_uncore_pci_init(void); void snbep_uncore_cpu_init(void); int ivbep_uncore_pci_init(void); @@ -385,5 +385,5 @@ void knl_uncore_cpu_init(void); int skx_uncore_pci_init(void); void skx_uncore_cpu_init(void); -/* perf_event_intel_uncore_nhmex.c */ +/* uncore_nhmex.c */ void nhmex_uncore_cpu_init(void); -- cgit v1.1 From 72042a8c7b01048a36ece216aaf206b7d60ca661 Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 20 Feb 2017 10:12:35 +1100 Subject: x86/purgatory: Make functions and variables static Sparse emits several 'symbol not declared' warnings for various functions and variables. Add static keyword to functions and variables which have file scope only. Signed-off-by: Tobin C. Harding Link: http://lkml.kernel.org/r/1487545956-2547-2-git-send-email-me@tobin.cc Signed-off-by: Thomas Gleixner --- arch/x86/purgatory/purgatory.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 25e068b..2a5f437 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -18,11 +18,11 @@ struct sha_region { unsigned long len; }; -unsigned long backup_dest = 0; -unsigned long backup_src = 0; -unsigned long backup_sz = 0; +static unsigned long backup_dest; +static unsigned long backup_src; +static unsigned long backup_sz; -u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 }; +static u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 }; struct sha_region sha_regions[16] = {}; @@ -39,7 +39,7 @@ static int copy_backup_region(void) return 0; } -int verify_sha256_digest(void) +static int verify_sha256_digest(void) { struct sha_region *ptr, *end; u8 digest[SHA256_DIGEST_SIZE]; -- cgit v1.1 From e98fe5127b9cc8ab33d1094b81c19deb1f9082bf Mon Sep 17 00:00:00 2001 From: "Tobin C. Harding" Date: Mon, 20 Feb 2017 10:12:36 +1100 Subject: x86/purgatory: Fix sparse warning, symbol not declared Sparse emits warning, 'symbol not declared' for a function that has neither file scope nor a forward declaration. The functions only call site is an ASM file. Add a header file with the function declaration. Include the header file in the C source file defining the function in order to fix the sparse warning. Include the header file in ASM file containing the call site to document the usage. Signed-off-by: Tobin C. Harding Link: http://lkml.kernel.org/r/1487545956-2547-3-git-send-email-me@tobin.cc Signed-off-by: Thomas Gleixner --- arch/x86/purgatory/purgatory.c | 1 + arch/x86/purgatory/purgatory.h | 8 ++++++++ arch/x86/purgatory/setup-x86_64.S | 1 + 3 files changed, 10 insertions(+) create mode 100644 arch/x86/purgatory/purgatory.h (limited to 'arch') diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index 2a5f437..b6d5c89 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -11,6 +11,7 @@ */ #include "sha256.h" +#include "purgatory.h" #include "../boot/string.h" struct sha_region { diff --git a/arch/x86/purgatory/purgatory.h b/arch/x86/purgatory/purgatory.h new file mode 100644 index 0000000..e2e365a --- /dev/null +++ b/arch/x86/purgatory/purgatory.h @@ -0,0 +1,8 @@ +#ifndef PURGATORY_H +#define PURGATORY_H + +#ifndef __ASSEMBLY__ +extern void purgatory(void); +#endif /* __ASSEMBLY__ */ + +#endif /* PURGATORY_H */ diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S index fe3c91b..f90e9df 100644 --- a/arch/x86/purgatory/setup-x86_64.S +++ b/arch/x86/purgatory/setup-x86_64.S @@ -9,6 +9,7 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ +#include "purgatory.h" .text .globl purgatory_start -- cgit v1.1 From 8392f16d38bb5222c03073a3906b7fd272386faf Mon Sep 17 00:00:00 2001 From: Borislav Petkov Date: Tue, 21 Feb 2017 19:36:39 +0100 Subject: x86/boot: Correct setup_header.start_sys name It is called start_sys_seg elsewhere so rename it to that. It is an obsolete field so we could just as well directly call it __u16 __pad... No functional change. Signed-off-by: Borislav Petkov Link: http://lkml.kernel.org/r/20170221183639.16554-1-bp@alien8.de Signed-off-by: Thomas Gleixner --- arch/x86/include/uapi/asm/bootparam.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/uapi/asm/bootparam.h b/arch/x86/include/uapi/asm/bootparam.h index 5138dac..07244ea 100644 --- a/arch/x86/include/uapi/asm/bootparam.h +++ b/arch/x86/include/uapi/asm/bootparam.h @@ -58,7 +58,7 @@ struct setup_header { __u32 header; __u16 version; __u32 realmode_swtch; - __u16 start_sys; + __u16 start_sys_seg; __u16 kernel_version; __u8 type_of_loader; __u8 loadflags; -- cgit v1.1 From f94c8d116997597fc00f0812b0ab9256e7b0c58f Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Wed, 1 Mar 2017 15:53:38 +0100 Subject: sched/clock, x86/tsc: Rework the x86 'unstable' sched_clock() interface Wanpeng Li reported that since the following commit: acb04058de49 ("sched/clock: Fix hotplug crash") ... KVM always runs with unstable sched-clock even though KVM's kvm_clock _is_ stable. The problem is that we've tied clear_sched_clock_stable() to the TSC state, and overlooked that sched_clock() is a paravirt function. Solve this by doing two things: - tie the sched_clock() stable state more clearly to the TSC stable state for the normal (!paravirt) case. - only call clear_sched_clock_stable() when we mark TSC unstable when we use native_sched_clock(). The first means we can actually run with stable sched_clock in more situations then before, which is good. And since commit: 12907fbb1a69 ("sched/clock, clocksource: Add optional cs::mark_unstable() method") ... this should be reliable. Since any detection of TSC fail now results in marking the TSC unstable. Reported-by: Wanpeng Li Signed-off-by: Peter Zijlstra (Intel) Cc: Borislav Petkov Cc: Linus Torvalds Cc: Mike Galbraith Cc: Paolo Bonzini Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Fixes: acb04058de49 ("sched/clock: Fix hotplug crash") Signed-off-by: Ingo Molnar --- arch/x86/kernel/cpu/amd.c | 4 ---- arch/x86/kernel/cpu/centaur.c | 2 -- arch/x86/kernel/cpu/common.c | 3 --- arch/x86/kernel/cpu/cyrix.c | 1 - arch/x86/kernel/cpu/intel.c | 4 ---- arch/x86/kernel/cpu/transmeta.c | 2 -- arch/x86/kernel/tsc.c | 35 +++++++++++++++++++++++------------ 7 files changed, 23 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 4e95b2e..30d924a 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -555,10 +555,6 @@ static void early_init_amd(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - if (check_tsc_unstable()) - clear_sched_clock_stable(); - } else { - clear_sched_clock_stable(); } /* Bit 12 of 8000_0007 edx is accumulated power mechanism. */ diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 2c234a6..bad8ff0 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -104,8 +104,6 @@ static void early_init_centaur(struct cpuinfo_x86 *c) #ifdef CONFIG_X86_64 set_cpu_cap(c, X86_FEATURE_SYSENTER32); #endif - - clear_sched_clock_stable(); } static void init_centaur(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index c64ca592..9d98e2e 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -86,7 +86,6 @@ static void default_init(struct cpuinfo_x86 *c) strcpy(c->x86_model_id, "386"); } #endif - clear_sched_clock_stable(); } static const struct cpu_dev default_cpu = { @@ -1075,8 +1074,6 @@ static void identify_cpu(struct cpuinfo_x86 *c) */ if (this_cpu->c_init) this_cpu->c_init(c); - else - clear_sched_clock_stable(); /* Disable the PN if appropriate */ squash_the_stupid_serial_number(c); diff --git a/arch/x86/kernel/cpu/cyrix.c b/arch/x86/kernel/cpu/cyrix.c index 47416f9..31e6792 100644 --- a/arch/x86/kernel/cpu/cyrix.c +++ b/arch/x86/kernel/cpu/cyrix.c @@ -184,7 +184,6 @@ static void early_init_cyrix(struct cpuinfo_x86 *c) set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; } - clear_sched_clock_stable(); } static void init_cyrix(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index 017ecd3..2388baf 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c @@ -161,10 +161,6 @@ static void early_init_intel(struct cpuinfo_x86 *c) if (c->x86_power & (1 << 8)) { set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC); set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC); - if (check_tsc_unstable()) - clear_sched_clock_stable(); - } else { - clear_sched_clock_stable(); } /* Penwell and Cloverview have the TSC which doesn't sleep on S3 */ diff --git a/arch/x86/kernel/cpu/transmeta.c b/arch/x86/kernel/cpu/transmeta.c index c1ea5b9..6a9ad73 100644 --- a/arch/x86/kernel/cpu/transmeta.c +++ b/arch/x86/kernel/cpu/transmeta.c @@ -15,8 +15,6 @@ static void early_init_transmeta(struct cpuinfo_x86 *c) if (xlvl >= 0x80860001) c->x86_capability[CPUID_8086_0001_EDX] = cpuid_edx(0x80860001); } - - clear_sched_clock_stable(); } static void init_transmeta(struct cpuinfo_x86 *c) diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 2724dc8..911129f 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -326,9 +326,16 @@ unsigned long long sched_clock(void) { return paravirt_sched_clock(); } + +static inline bool using_native_sched_clock(void) +{ + return pv_time_ops.sched_clock == native_sched_clock; +} #else unsigned long long sched_clock(void) __attribute__((alias("native_sched_clock"))); + +static inline bool using_native_sched_clock(void) { return true; } #endif int check_tsc_unstable(void) @@ -1111,8 +1118,10 @@ static void tsc_cs_mark_unstable(struct clocksource *cs) { if (tsc_unstable) return; + tsc_unstable = 1; - clear_sched_clock_stable(); + if (using_native_sched_clock()) + clear_sched_clock_stable(); disable_sched_clock_irqtime(); pr_info("Marking TSC unstable due to clocksource watchdog\n"); } @@ -1134,18 +1143,20 @@ static struct clocksource clocksource_tsc = { void mark_tsc_unstable(char *reason) { - if (!tsc_unstable) { - tsc_unstable = 1; + if (tsc_unstable) + return; + + tsc_unstable = 1; + if (using_native_sched_clock()) clear_sched_clock_stable(); - disable_sched_clock_irqtime(); - pr_info("Marking TSC unstable due to %s\n", reason); - /* Change only the rating, when not registered */ - if (clocksource_tsc.mult) - clocksource_mark_unstable(&clocksource_tsc); - else { - clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; - clocksource_tsc.rating = 0; - } + disable_sched_clock_irqtime(); + pr_info("Marking TSC unstable due to %s\n", reason); + /* Change only the rating, when not registered */ + if (clocksource_tsc.mult) { + clocksource_mark_unstable(&clocksource_tsc); + } else { + clocksource_tsc.flags |= CLOCK_SOURCE_UNSTABLE; + clocksource_tsc.rating = 0; } } -- cgit v1.1 From bb1a2c26165640ba2cbcfe06c81e9f9d6db4e643 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 1 Mar 2017 21:10:17 +0100 Subject: x86/hpet: Prevent might sleep splat on resume Sergey reported a might sleep warning triggered from the hpet resume path. It's caused by the call to disable_irq() from interrupt disabled context. The problem with the low level resume code is that it is not accounted as a special system_state like we do during the boot process. Calling the same code during system boot would not trigger the warning. That's inconsistent at best. In this particular case it's trivial to replace the disable_irq() with disable_hardirq() because this particular code path is solely used from system resume and the involved hpet interrupts can never be force threaded. Reported-and-tested-by: Sergey Senozhatsky Signed-off-by: Thomas Gleixner Cc: Peter Zijlstra Cc: "Rafael J. Wysocki" Cc: Sergey Senozhatsky Cc: Borislav Petkov Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1703012108460.3684@nanos Signed-off-by: Thomas Gleixner --- arch/x86/kernel/hpet.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c index dc6ba5b..89ff7af 100644 --- a/arch/x86/kernel/hpet.c +++ b/arch/x86/kernel/hpet.c @@ -354,7 +354,7 @@ static int hpet_resume(struct clock_event_device *evt, int timer) irq_domain_deactivate_irq(irq_get_irq_data(hdev->irq)); irq_domain_activate_irq(irq_get_irq_data(hdev->irq)); - disable_irq(hdev->irq); + disable_hardirq(hdev->irq); irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu)); enable_irq(hdev->irq); } -- cgit v1.1 From 7afbeb6df2aa5f9e3a0fc228817a85c16dea0faa Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Fri, 24 Feb 2017 12:56:12 +0100 Subject: s390/ipl: always use load normal for CCW-type re-IPL commit 14890678687c ("s390/ipl: use load normal for LPAR re-ipl") missed to convert one code path to use load normal semantics for re-IPL. Convert the missing code path as well. Fixes: 14890678687c ("s390/ipl: use load normal for LPAR re-ipl") Reported-by: Michael Holzheu Acked-by: Michael Holzheu Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/kernel/ipl.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c index b67dafb..e545ffe 100644 --- a/arch/s390/kernel/ipl.c +++ b/arch/s390/kernel/ipl.c @@ -564,6 +564,8 @@ static struct kset *ipl_kset; static void __ipl_run(void *unused) { + if (MACHINE_IS_LPAR && ipl_info.type == IPL_TYPE_CCW) + diag308(DIAG308_LOAD_NORMAL_DUMP, NULL); diag308(DIAG308_LOAD_CLEAR, NULL); if (MACHINE_IS_VM) __cpcmd("IPL", NULL, 0, NULL); -- cgit v1.1 From 2e4d88009f57057df7672fa69a32b5224af54d37 Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Thu, 2 Mar 2017 15:23:42 +0100 Subject: KVM: s390: Fix guest migration for huge guests resulting in panic While we can technically not run huge page guests right now, we can setup a guest with huge pages. Trying to migrate it will trigger a VM_BUG_ON and, if the kernel is not configured to panic on a BUG, it will happily try to work on non-existing page table entries. With this patch, we always return "dirty" if we encounter a large page when migrating. This at least fixes the immediate problem until we have proper handling for both kind of pages. Fixes: 15f36eb ("KVM: s390: Add proper dirty bitmap support to S390 kvm.") Cc: # 3.16+ Signed-off-by: Janosch Frank Acked-by: Christian Borntraeger Signed-off-by: Martin Schwidefsky --- arch/s390/mm/pgtable.c | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index b48dc5f..463e5ef 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -608,12 +608,29 @@ void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep) bool test_and_clear_guest_dirty(struct mm_struct *mm, unsigned long addr) { spinlock_t *ptl; + pgd_t *pgd; + pud_t *pud; + pmd_t *pmd; pgste_t pgste; pte_t *ptep; pte_t pte; bool dirty; - ptep = get_locked_pte(mm, addr, &ptl); + pgd = pgd_offset(mm, addr); + pud = pud_alloc(mm, pgd, addr); + if (!pud) + return false; + pmd = pmd_alloc(mm, pud, addr); + if (!pmd) + return false; + /* We can't run guests backed by huge pages, but userspace can + * still set them up and then try to migrate them without any + * migration support. + */ + if (pmd_large(*pmd)) + return true; + + ptep = pte_alloc_map_lock(mm, pmd, addr, &ptl); if (unlikely(!ptep)) return false; -- cgit v1.1 From e148bd17f48bd17fca2f4f089ec879fa6e47e34c Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 14 Feb 2017 14:46:42 +0530 Subject: powerpc: Emulation support for load/store instructions on LE emulate_step() uses a number of underlying kernel functions that were initially not enabled for LE. This has been rectified since. So, fix emulate_step() for LE for the corresponding instructions. Cc: stable@vger.kernel.org # v3.18+ Reported-by: Anton Blanchard Signed-off-by: Ravi Bangoria Signed-off-by: Michael Ellerman --- arch/powerpc/lib/sstep.c | 20 -------------------- 1 file changed, 20 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index 846dba2..9c542ec 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c @@ -1799,8 +1799,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto instr_done; case LARX: - if (regs->msr & MSR_LE) - return 0; if (op.ea & (size - 1)) break; /* can't handle misaligned */ if (!address_ok(regs, op.ea, size)) @@ -1823,8 +1821,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto ldst_done; case STCX: - if (regs->msr & MSR_LE) - return 0; if (op.ea & (size - 1)) break; /* can't handle misaligned */ if (!address_ok(regs, op.ea, size)) @@ -1849,8 +1845,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto ldst_done; case LOAD: - if (regs->msr & MSR_LE) - return 0; err = read_mem(®s->gpr[op.reg], op.ea, size, regs); if (!err) { if (op.type & SIGNEXT) @@ -1862,8 +1856,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #ifdef CONFIG_PPC_FPU case LOAD_FP: - if (regs->msr & MSR_LE) - return 0; if (size == 4) err = do_fp_load(op.reg, do_lfs, op.ea, size, regs); else @@ -1872,15 +1864,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #endif #ifdef CONFIG_ALTIVEC case LOAD_VMX: - if (regs->msr & MSR_LE) - return 0; err = do_vec_load(op.reg, do_lvx, op.ea & ~0xfUL, regs); goto ldst_done; #endif #ifdef CONFIG_VSX case LOAD_VSX: - if (regs->msr & MSR_LE) - return 0; err = do_vsx_load(op.reg, do_lxvd2x, op.ea, regs); goto ldst_done; #endif @@ -1903,8 +1891,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) goto instr_done; case STORE: - if (regs->msr & MSR_LE) - return 0; if ((op.type & UPDATE) && size == sizeof(long) && op.reg == 1 && op.update_reg == 1 && !(regs->msr & MSR_PR) && @@ -1917,8 +1903,6 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #ifdef CONFIG_PPC_FPU case STORE_FP: - if (regs->msr & MSR_LE) - return 0; if (size == 4) err = do_fp_store(op.reg, do_stfs, op.ea, size, regs); else @@ -1927,15 +1911,11 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) #endif #ifdef CONFIG_ALTIVEC case STORE_VMX: - if (regs->msr & MSR_LE) - return 0; err = do_vec_store(op.reg, do_stvx, op.ea & ~0xfUL, regs); goto ldst_done; #endif #ifdef CONFIG_VSX case STORE_VSX: - if (regs->msr & MSR_LE) - return 0; err = do_vsx_store(op.reg, do_stxvd2x, op.ea, regs); goto ldst_done; #endif -- cgit v1.1 From 4ceae137bdab2232e57b2e6fd5631a22a0160938 Mon Sep 17 00:00:00 2001 From: Ravi Bangoria Date: Tue, 14 Feb 2017 14:46:43 +0530 Subject: powerpc: emulate_step() tests for load/store instructions Add new selftest that test emulate_step for Normal, Floating Point, Vector and Vector Scalar - load/store instructions. Test should run at boot time if CONFIG_KPROBES_SANITY_TEST and CONFIG_PPC64 is set. Sample log: emulate_step_test: ld : PASS emulate_step_test: lwz : PASS emulate_step_test: lwzx : PASS emulate_step_test: std : PASS emulate_step_test: ldarx / stdcx. : PASS emulate_step_test: lfsx : PASS emulate_step_test: stfsx : PASS emulate_step_test: lfdx : PASS emulate_step_test: stfdx : PASS emulate_step_test: lvx : PASS emulate_step_test: stvx : PASS emulate_step_test: lxvd2x : PASS emulate_step_test: stxvd2x : PASS Signed-off-by: Ravi Bangoria [mpe: Drop start/complete lines, make it all __init] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/ppc-opcode.h | 7 + arch/powerpc/lib/Makefile | 1 + arch/powerpc/lib/test_emulate_step.c | 434 ++++++++++++++++++++++++++++++++++ 3 files changed, 442 insertions(+) create mode 100644 arch/powerpc/lib/test_emulate_step.c (limited to 'arch') diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index d99bd44..e7d6d86 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h @@ -284,6 +284,13 @@ #define PPC_INST_BRANCH_COND 0x40800000 #define PPC_INST_LBZCIX 0x7c0006aa #define PPC_INST_STBCIX 0x7c0007aa +#define PPC_INST_LWZX 0x7c00002e +#define PPC_INST_LFSX 0x7c00042e +#define PPC_INST_STFSX 0x7c00052e +#define PPC_INST_LFDX 0x7c0004ae +#define PPC_INST_STFDX 0x7c0005ae +#define PPC_INST_LVX 0x7c0000ce +#define PPC_INST_STVX 0x7c0001ce /* macros to insert fields into opcodes */ #define ___PPC_RA(a) (((a) & 0x1f) << 16) diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 0e649d7..2b5e090 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -20,6 +20,7 @@ obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \ obj64-$(CONFIG_SMP) += locks.o obj64-$(CONFIG_ALTIVEC) += vmx-helper.o +obj64-$(CONFIG_KPROBES_SANITY_TEST) += test_emulate_step.o obj-y += checksum_$(BITS).o checksum_wrappers.o diff --git a/arch/powerpc/lib/test_emulate_step.c b/arch/powerpc/lib/test_emulate_step.c new file mode 100644 index 0000000..2534c14 --- /dev/null +++ b/arch/powerpc/lib/test_emulate_step.c @@ -0,0 +1,434 @@ +/* + * Simple sanity test for emulate_step load/store instructions. + * + * Copyright IBM Corp. 2016 + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#define pr_fmt(fmt) "emulate_step_test: " fmt + +#include +#include +#include + +#define IMM_L(i) ((uintptr_t)(i) & 0xffff) + +/* + * Defined with TEST_ prefix so it does not conflict with other + * definitions. + */ +#define TEST_LD(r, base, i) (PPC_INST_LD | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) +#define TEST_LWZ(r, base, i) (PPC_INST_LWZ | ___PPC_RT(r) | \ + ___PPC_RA(base) | IMM_L(i)) +#define TEST_LWZX(t, a, b) (PPC_INST_LWZX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STD(r, base, i) (PPC_INST_STD | ___PPC_RS(r) | \ + ___PPC_RA(base) | ((i) & 0xfffc)) +#define TEST_LDARX(t, a, b, eh) (PPC_INST_LDARX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b) | \ + __PPC_EH(eh)) +#define TEST_STDCX(s, a, b) (PPC_INST_STDCX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LFSX(t, a, b) (PPC_INST_LFSX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STFSX(s, a, b) (PPC_INST_STFSX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LFDX(t, a, b) (PPC_INST_LFDX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STFDX(s, a, b) (PPC_INST_STFDX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LVX(t, a, b) (PPC_INST_LVX | ___PPC_RT(t) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_STVX(s, a, b) (PPC_INST_STVX | ___PPC_RS(s) | \ + ___PPC_RA(a) | ___PPC_RB(b)) +#define TEST_LXVD2X(s, a, b) (PPC_INST_LXVD2X | VSX_XX1((s), R##a, R##b)) +#define TEST_STXVD2X(s, a, b) (PPC_INST_STXVD2X | VSX_XX1((s), R##a, R##b)) + + +static void __init init_pt_regs(struct pt_regs *regs) +{ + static unsigned long msr; + static bool msr_cached; + + memset(regs, 0, sizeof(struct pt_regs)); + + if (likely(msr_cached)) { + regs->msr = msr; + return; + } + + asm volatile("mfmsr %0" : "=r"(regs->msr)); + + regs->msr |= MSR_FP; + regs->msr |= MSR_VEC; + regs->msr |= MSR_VSX; + + msr = regs->msr; + msr_cached = true; +} + +static void __init show_result(char *ins, char *result) +{ + pr_info("%-14s : %s\n", ins, result); +} + +static void __init test_ld(void) +{ + struct pt_regs regs; + unsigned long a = 0x23; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) &a; + + /* ld r5, 0(r3) */ + stepped = emulate_step(®s, TEST_LD(5, 3, 0)); + + if (stepped == 1 && regs.gpr[5] == a) + show_result("ld", "PASS"); + else + show_result("ld", "FAIL"); +} + +static void __init test_lwz(void) +{ + struct pt_regs regs; + unsigned int a = 0x4545; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) &a; + + /* lwz r5, 0(r3) */ + stepped = emulate_step(®s, TEST_LWZ(5, 3, 0)); + + if (stepped == 1 && regs.gpr[5] == a) + show_result("lwz", "PASS"); + else + show_result("lwz", "FAIL"); +} + +static void __init test_lwzx(void) +{ + struct pt_regs regs; + unsigned int a[3] = {0x0, 0x0, 0x1234}; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) a; + regs.gpr[4] = 8; + regs.gpr[5] = 0x8765; + + /* lwzx r5, r3, r4 */ + stepped = emulate_step(®s, TEST_LWZX(5, 3, 4)); + if (stepped == 1 && regs.gpr[5] == a[2]) + show_result("lwzx", "PASS"); + else + show_result("lwzx", "FAIL"); +} + +static void __init test_std(void) +{ + struct pt_regs regs; + unsigned long a = 0x1234; + int stepped = -1; + + init_pt_regs(®s); + regs.gpr[3] = (unsigned long) &a; + regs.gpr[5] = 0x5678; + + /* std r5, 0(r3) */ + stepped = emulate_step(®s, TEST_STD(5, 3, 0)); + if (stepped == 1 || regs.gpr[5] == a) + show_result("std", "PASS"); + else + show_result("std", "FAIL"); +} + +static void __init test_ldarx_stdcx(void) +{ + struct pt_regs regs; + unsigned long a = 0x1234; + int stepped = -1; + unsigned long cr0_eq = 0x1 << 29; /* eq bit of CR0 */ + + init_pt_regs(®s); + asm volatile("mfcr %0" : "=r"(regs.ccr)); + + + /*** ldarx ***/ + + regs.gpr[3] = (unsigned long) &a; + regs.gpr[4] = 0; + regs.gpr[5] = 0x5678; + + /* ldarx r5, r3, r4, 0 */ + stepped = emulate_step(®s, TEST_LDARX(5, 3, 4, 0)); + + /* + * Don't touch 'a' here. Touching 'a' can do Load/store + * of 'a' which result in failure of subsequent stdcx. + * Instead, use hardcoded value for comparison. + */ + if (stepped <= 0 || regs.gpr[5] != 0x1234) { + show_result("ldarx / stdcx.", "FAIL (ldarx)"); + return; + } + + + /*** stdcx. ***/ + + regs.gpr[5] = 0x9ABC; + + /* stdcx. r5, r3, r4 */ + stepped = emulate_step(®s, TEST_STDCX(5, 3, 4)); + + /* + * Two possible scenarios that indicates successful emulation + * of stdcx. : + * 1. Reservation is active and store is performed. In this + * case cr0.eq bit will be set to 1. + * 2. Reservation is not active and store is not performed. + * In this case cr0.eq bit will be set to 0. + */ + if (stepped == 1 && ((regs.gpr[5] == a && (regs.ccr & cr0_eq)) + || (regs.gpr[5] != a && !(regs.ccr & cr0_eq)))) + show_result("ldarx / stdcx.", "PASS"); + else + show_result("ldarx / stdcx.", "FAIL (stdcx.)"); +} + +#ifdef CONFIG_PPC_FPU +static void __init test_lfsx_stfsx(void) +{ + struct pt_regs regs; + union { + float a; + int b; + } c; + int cached_b; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lfsx ***/ + + c.a = 123.45; + cached_b = c.b; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lfsx frt10, r3, r4 */ + stepped = emulate_step(®s, TEST_LFSX(10, 3, 4)); + + if (stepped == 1) + show_result("lfsx", "PASS"); + else + show_result("lfsx", "FAIL"); + + + /*** stfsx ***/ + + c.a = 678.91; + + /* stfsx frs10, r3, r4 */ + stepped = emulate_step(®s, TEST_STFSX(10, 3, 4)); + + if (stepped == 1 && c.b == cached_b) + show_result("stfsx", "PASS"); + else + show_result("stfsx", "FAIL"); +} + +static void __init test_lfdx_stfdx(void) +{ + struct pt_regs regs; + union { + double a; + long b; + } c; + long cached_b; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lfdx ***/ + + c.a = 123456.78; + cached_b = c.b; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lfdx frt10, r3, r4 */ + stepped = emulate_step(®s, TEST_LFDX(10, 3, 4)); + + if (stepped == 1) + show_result("lfdx", "PASS"); + else + show_result("lfdx", "FAIL"); + + + /*** stfdx ***/ + + c.a = 987654.32; + + /* stfdx frs10, r3, r4 */ + stepped = emulate_step(®s, TEST_STFDX(10, 3, 4)); + + if (stepped == 1 && c.b == cached_b) + show_result("stfdx", "PASS"); + else + show_result("stfdx", "FAIL"); +} +#else +static void __init test_lfsx_stfsx(void) +{ + show_result("lfsx", "SKIP (CONFIG_PPC_FPU is not set)"); + show_result("stfsx", "SKIP (CONFIG_PPC_FPU is not set)"); +} + +static void __init test_lfdx_stfdx(void) +{ + show_result("lfdx", "SKIP (CONFIG_PPC_FPU is not set)"); + show_result("stfdx", "SKIP (CONFIG_PPC_FPU is not set)"); +} +#endif /* CONFIG_PPC_FPU */ + +#ifdef CONFIG_ALTIVEC +static void __init test_lvx_stvx(void) +{ + struct pt_regs regs; + union { + vector128 a; + u32 b[4]; + } c; + u32 cached_b[4]; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lvx ***/ + + cached_b[0] = c.b[0] = 923745; + cached_b[1] = c.b[1] = 2139478; + cached_b[2] = c.b[2] = 9012; + cached_b[3] = c.b[3] = 982134; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lvx vrt10, r3, r4 */ + stepped = emulate_step(®s, TEST_LVX(10, 3, 4)); + + if (stepped == 1) + show_result("lvx", "PASS"); + else + show_result("lvx", "FAIL"); + + + /*** stvx ***/ + + c.b[0] = 4987513; + c.b[1] = 84313948; + c.b[2] = 71; + c.b[3] = 498532; + + /* stvx vrs10, r3, r4 */ + stepped = emulate_step(®s, TEST_STVX(10, 3, 4)); + + if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && + cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) + show_result("stvx", "PASS"); + else + show_result("stvx", "FAIL"); +} +#else +static void __init test_lvx_stvx(void) +{ + show_result("lvx", "SKIP (CONFIG_ALTIVEC is not set)"); + show_result("stvx", "SKIP (CONFIG_ALTIVEC is not set)"); +} +#endif /* CONFIG_ALTIVEC */ + +#ifdef CONFIG_VSX +static void __init test_lxvd2x_stxvd2x(void) +{ + struct pt_regs regs; + union { + vector128 a; + u32 b[4]; + } c; + u32 cached_b[4]; + int stepped = -1; + + init_pt_regs(®s); + + + /*** lxvd2x ***/ + + cached_b[0] = c.b[0] = 18233; + cached_b[1] = c.b[1] = 34863571; + cached_b[2] = c.b[2] = 834; + cached_b[3] = c.b[3] = 6138911; + + regs.gpr[3] = (unsigned long) &c.a; + regs.gpr[4] = 0; + + /* lxvd2x vsr39, r3, r4 */ + stepped = emulate_step(®s, TEST_LXVD2X(39, 3, 4)); + + if (stepped == 1) + show_result("lxvd2x", "PASS"); + else + show_result("lxvd2x", "FAIL"); + + + /*** stxvd2x ***/ + + c.b[0] = 21379463; + c.b[1] = 87; + c.b[2] = 374234; + c.b[3] = 4; + + /* stxvd2x vsr39, r3, r4 */ + stepped = emulate_step(®s, TEST_STXVD2X(39, 3, 4)); + + if (stepped == 1 && cached_b[0] == c.b[0] && cached_b[1] == c.b[1] && + cached_b[2] == c.b[2] && cached_b[3] == c.b[3]) + show_result("stxvd2x", "PASS"); + else + show_result("stxvd2x", "FAIL"); +} +#else +static void __init test_lxvd2x_stxvd2x(void) +{ + show_result("lxvd2x", "SKIP (CONFIG_VSX is not set)"); + show_result("stxvd2x", "SKIP (CONFIG_VSX is not set)"); +} +#endif /* CONFIG_VSX */ + +static int __init test_emulate_step(void) +{ + test_ld(); + test_lwz(); + test_lwzx(); + test_std(); + test_ldarx_stdcx(); + test_lfsx_stfsx(); + test_lfdx_stfdx(); + test_lvx_stvx(); + test_lxvd2x_stxvd2x(); + + return 0; +} +late_initcall(test_emulate_step); -- cgit v1.1 From 7a70d7288c926ae88e0c773fbb506aa374e99c2d Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Mon, 27 Feb 2017 14:32:41 +1100 Subject: powerpc/64: Invalidate process table caching after setting process table The POWER9 MMU reads and caches entries from the process table. When we kexec from one kernel to another, the second kernel sets its process table pointer but doesn't currently do anything to make the CPU invalidate any cached entries from the old process table. This adds a tlbie (TLB invalidate entry) instruction with parameters to invalidate caching of the process table after the new process table is installed. Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/mm/pgtable-radix.c | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/mm/pgtable-radix.c b/arch/powerpc/mm/pgtable-radix.c index feeda90..01c94f1 100644 --- a/arch/powerpc/mm/pgtable-radix.c +++ b/arch/powerpc/mm/pgtable-radix.c @@ -186,6 +186,10 @@ static void __init radix_init_pgtable(void) */ register_process_table(__pa(process_tb), 0, PRTB_SIZE_SHIFT - 12); pr_info("Process table %p and radix root for kernel: %p\n", process_tb, init_mm.pgd); + asm volatile("ptesync" : : : "memory"); + asm volatile(PPC_TLBIE_5(%0,%1,2,1,1) : : + "r" (TLBIEL_INVAL_SET_LPID), "r" (0)); + asm volatile("eieio; tlbsync; ptesync" : : : "memory"); } static void __init radix_init_partition_table(void) -- cgit v1.1 From 424f8acd328a111319ae30bf384e5dfb9bc8f8cb Mon Sep 17 00:00:00 2001 From: "Gautham R. Shenoy" Date: Mon, 27 Feb 2017 11:10:07 +0530 Subject: powerpc/powernv: Fix bug due to labeling ambiguity in power_enter_stop Commit 09206b600c76 ("powernv: Pass PSSCR value and mask to power9_idle_stop") added additional code in power_enter_stop() to distinguish between stop requests whose PSSCR had ESL=EC=1 from those which did not. When ESL=EC=1, we do a forward-jump to a location labelled by "1", which had the code to handle the ESL=EC=1 case. Unfortunately just a couple of instructions before this label, is the macro IDLE_STATE_ENTER_SEQ() which also has a label "1" in its expansion. As a result, the current code can result in directly executing stop instruction for deep stop requests with PSSCR ESL=EC=1, without saving the hypervisor state. Fix this BUG by labeling the location that handles ESL=EC=1 case with a more descriptive label ".Lhandle_esl_ec_set" (local label suggestion a la .Lxx from Anton Blanchard). While at it, rename the label "2" labelling the location of the code handling entry into deep stop states with ".Lhandle_deep_stop". For a good measure, change the label in IDLE_STATE_ENTER_SEQ() macro to an not-so commonly used value in order to avoid similar mishaps in the future. Fixes: 09206b600c76 ("powernv: Pass PSSCR value and mask to power9_idle_stop") Signed-off-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/cpuidle.h | 4 ++-- arch/powerpc/kernel/idle_book3s.S | 10 ++++++---- 2 files changed, 8 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/cpuidle.h b/arch/powerpc/include/asm/cpuidle.h index fd321eb4..1557315 100644 --- a/arch/powerpc/include/asm/cpuidle.h +++ b/arch/powerpc/include/asm/cpuidle.h @@ -70,8 +70,8 @@ static inline void report_invalid_psscr_val(u64 psscr_val, int err) std r0,0(r1); \ ptesync; \ ld r0,0(r1); \ -1: cmpd cr0,r0,r0; \ - bne 1b; \ +236: cmpd cr0,r0,r0; \ + bne 236b; \ IDLE_INST; \ #define IDLE_STATE_ENTER_SEQ_NORET(IDLE_INST) \ diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 5f61cc0..9957287 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -276,19 +276,21 @@ power_enter_stop: */ andis. r4,r3,PSSCR_EC_ESL_MASK_SHIFTED clrldi r3,r3,60 /* r3 = Bits[60:63] = Requested Level (RL) */ - bne 1f + bne .Lhandle_esl_ec_set IDLE_STATE_ENTER_SEQ(PPC_STOP) li r3,0 /* Since we didn't lose state, return 0 */ b pnv_wakeup_noloss + +.Lhandle_esl_ec_set: /* * Check if the requested state is a deep idle state. */ -1: LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) + LOAD_REG_ADDRBASE(r5,pnv_first_deep_stop_state) ld r4,ADDROFF(pnv_first_deep_stop_state)(r5) cmpd r3,r4 - bge 2f + bge .Lhandle_deep_stop IDLE_STATE_ENTER_SEQ_NORET(PPC_STOP) -2: +.Lhandle_deep_stop: /* * Entering deep idle state. * Clear thread bit in PACA_CORE_IDLE_STATE, save SPRs to -- cgit v1.1 From 4dc831aa88132f835cefe876aa0206977c4d7710 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Sun, 27 Nov 2016 13:46:20 +1100 Subject: powerpc: Fix compiling a BE kernel with a powerpc64le toolchain GCC can compile with either endian, but the default ABI version is set based on the default endianness of the toolchain. Alan Modra says: you need both -mbig and -mabi=elfv1 to make a powerpc64le gcc generate powerpc64 code The opposite is true for powerpc64 when generating -mlittle it requires -mabi=elfv2 to generate v2 ABI, which we were already doing. This change adds ABI annotations together with endianness for all cases, LE and BE. This fixes the case of building a BE kernel with a toolchain that is LE by default. Signed-off-by: Nicholas Piggin Tested-by: Naveen N. Rao Signed-off-by: Michael Ellerman --- arch/powerpc/Makefile | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index 31286fa..19b0d1a 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -72,8 +72,15 @@ GNUTARGET := powerpc MULTIPLEWORD := -mmultiple endif -cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) +ifdef CONFIG_PPC64 +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mcall-aixdesc) +aflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mabi=elfv1) +aflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mabi=elfv2 +endif + cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mlittle-endian +cflags-$(CONFIG_CPU_BIG_ENDIAN) += $(call cc-option,-mbig-endian) ifneq ($(cc-name),clang) cflags-$(CONFIG_CPU_LITTLE_ENDIAN) += -mno-strict-align endif @@ -113,7 +120,9 @@ ifeq ($(CONFIG_CPU_LITTLE_ENDIAN),y) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2,$(call cc-option,-mcall-aixdesc)) AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv2) else +CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcall-aixdesc) +AFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mabi=elfv1) endif CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mcmodel=medium,$(call cc-option,-mminimal-toc)) CFLAGS-$(CONFIG_PPC64) += $(call cc-option,-mno-pointers-to-nested-functions) -- cgit v1.1 From 3fb66a70a4ae886445743354e4b60e54058bb3ff Mon Sep 17 00:00:00 2001 From: Laurentiu Tudor Date: Thu, 16 Feb 2017 09:11:29 -0600 Subject: powerpc/booke: Fix boot crash due to null hugepd On 32-bit book-e machines, hugepd_ok() no longer takes into account null hugepd values, causing this crash at boot: Unable to handle kernel paging request for data at address 0x80000000 ... NIP [c0018378] follow_huge_addr+0x38/0xf0 LR [c001836c] follow_huge_addr+0x2c/0xf0 Call Trace: follow_huge_addr+0x2c/0xf0 (unreliable) follow_page_mask+0x40/0x3e0 __get_user_pages+0xc8/0x450 get_user_pages_remote+0x8c/0x250 copy_strings+0x110/0x390 copy_strings_kernel+0x2c/0x50 do_execveat_common+0x478/0x630 do_execve+0x2c/0x40 try_to_run_init_process+0x18/0x60 kernel_init+0xbc/0x110 ret_from_kernel_thread+0x5c/0x64 This impacts all nxp (ex-freescale) 32-bit booke platforms. This was caused by the change of hugepd_t.pd from signed to unsigned, and the update to the nohash version of hugepd_ok(). Previously hugepd_ok() could exclude all non-huge and NULL pgds using > 0, whereas now we need to explicitly check that the value is not zero and also that PD_HUGE is *clear*. This isn't protected by the pgd_none() check in __find_linux_pte_or_hugepte() because on 32-bit we use pgtable-nopud.h, which causes the pgd_none() check to be always false. Fixes: 20717e1ff526 ("powerpc/mm: Fix little-endian 4K hugetlb") Cc: stable@vger.kernel.org # v4.7+ Reported-by: Madalin-Cristian Bucur Signed-off-by: Laurentiu Tudor [mpe: Flesh out change log details.] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/nohash/pgtable.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h index 0cd8a38..e5805ad 100644 --- a/arch/powerpc/include/asm/nohash/pgtable.h +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -230,7 +230,7 @@ static inline int hugepd_ok(hugepd_t hpd) return ((hpd_val(hpd) & 0x4) != 0); #else /* We clear the top bit to indicate hugepd */ - return ((hpd_val(hpd) & PD_HUGE) == 0); + return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); #endif } -- cgit v1.1 From 2a9c4f40ab2c281f95d41577ff0f7f78668feb73 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Thu, 2 Mar 2017 17:41:24 +1100 Subject: powerpc/powernv: Fix opal tracepoints with JUMP_LABEL=n The recent commit to allow calling OPAL calls in real mode, commit ab9bad0ead9a ("powerpc/powernv: Remove separate entry for OPAL real mode calls"), introduced a bug when CONFIG_JUMP_LABEL=n. The commit moved the "mfmsr r12" prior to the call to OPAL_BRANCH, but we missed that OPAL_BRANCH clobbers r12 when jump labels are disabled. This leads to us using the tracepoint refcount as the MSR value, typically zero, and saving that into PACASAVEDMSR. When we return from OPAL we use that value as the MSR value for rfid, meaning we switch to 32-bit BE real mode - hilarity ensues. Fix it by using r11 in OPAL_BRANCH, which is not live at the time the macro is used in OPAL_CALL. Fixes: ab9bad0ead9a ("powerpc/powernv: Remove separate entry for OPAL real mode calls") Suggested-by: Paul Mackerras Signed-off-by: Alexey Kardashevskiy Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/opal-wrappers.S | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 6693f75..da8a0f7 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -39,8 +39,8 @@ opal_tracepoint_refcount: BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ - ld r12,opal_tracepoint_refcount@toc(r2); \ - cmpdi r12,0; \ + ld r11,opal_tracepoint_refcount@toc(r2); \ + cmpdi r11,0; \ bne- LABEL; \ 1: -- cgit v1.1 From 6ad966d7303b70165228dba1ee8da1a05c10eefe Mon Sep 17 00:00:00 2001 From: Shile Zhang Date: Sat, 4 Feb 2017 17:03:40 +0800 Subject: powerpc/64: Fix checksum folding in csum_add() Paul's patch to fix checksum folding, commit b492f7e4e07a ("powerpc/64: Fix checksum folding in csum_tcpudp_nofold and ip_fast_csum_nofold") missed a case in csum_add(). Fix it. Signed-off-by: Shile Zhang Acked-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/checksum.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 4e63787..842124b 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h @@ -112,7 +112,7 @@ static inline __wsum csum_add(__wsum csum, __wsum addend) #ifdef __powerpc64__ res += (__force u64)addend; - return (__force __wsum)((u32)res + (res >> 32)); + return (__force __wsum) from64to32(res); #else asm("addc %0,%0,%1;" "addze %0,%0;" -- cgit v1.1 From 10e5778f54765c96fe0c8f104b7a030e5b35bc72 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 4 Mar 2017 07:02:10 -0800 Subject: ARM: OMAP2+: Fix device node reference counts After commit 0549bde0fcb1 ("of: fix of_node leak caused in of_find_node_opts_by_path"), the following error may be reported when running omap images. OF: ERROR: Bad of_node_put() on /ocp@68000000 CPU: 0 PID: 0 Comm: swapper Not tainted 4.10.0-rc7-next-20170210 #1 Hardware name: Generic OMAP3-GP (Flattened Device Tree) [] (unwind_backtrace) from [] (show_stack+0x10/0x14) [] (show_stack) from [] (dump_stack+0x98/0xac) [] (dump_stack) from [] (kobject_release+0x48/0x7c) [] (kobject_release) from [] (of_find_node_by_name+0x74/0x94) [] (of_find_node_by_name) from [] (omap3xxx_hwmod_is_hs_ip_block_usable+0x24/0x2c) [] (omap3xxx_hwmod_is_hs_ip_block_usable) from [] (omap3xxx_hwmod_init+0x180/0x274) [] (omap3xxx_hwmod_init) from [] (omap3_init_early+0xa0/0x11c) [] (omap3_init_early) from [] (omap3430_init_early+0x8/0x30) [] (omap3430_init_early) from [] (setup_arch+0xc04/0xc34) [] (setup_arch) from [] (start_kernel+0x68/0x38c) [] (start_kernel) from [<8020807c>] (0x8020807c) of_find_node_by_name() drops the reference to the passed device node. The commit referenced above exposes this problem. To fix the problem, use of_get_child_by_name() instead of of_find_node_by_name(); of_get_child_by_name() does not drop the reference count of passed device nodes. While semantically different, we only look for immediate children of the passed device node, so of_get_child_by_name() is a more appropriate function to use anyway. Release the reference to the device node obtained with of_get_child_by_name() after it is no longer needed to avoid another device node leak. While at it, clean up the code and change the return type of omap3xxx_hwmod_is_hs_ip_block_usable() to bool to match its use and the return type of of_device_is_available(). Cc: Qi Hou Cc: Peter Rosin Cc: Rob Herring Signed-off-by: Guenter Roeck Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 507ff07..0fa08c1 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -3131,16 +3131,20 @@ static struct omap_hwmod_ocp_if *omap3xxx_dss_hwmod_ocp_ifs[] __initdata = { * Return: 0 if device named @dev_name is not likely to be accessible, * or 1 if it is likely to be accessible. */ -static int __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, - const char *dev_name) +static bool __init omap3xxx_hwmod_is_hs_ip_block_usable(struct device_node *bus, + const char *dev_name) { + struct device_node *node; + bool available; + if (!bus) - return (omap_type() == OMAP2_DEVICE_TYPE_GP) ? 1 : 0; + return omap_type() == OMAP2_DEVICE_TYPE_GP; - if (of_device_is_available(of_find_node_by_name(bus, dev_name))) - return 1; + node = of_get_child_by_name(bus, dev_name); + available = of_device_is_available(node); + of_node_put(node); - return 0; + return available; } int __init omap3xxx_hwmod_init(void) -- cgit v1.1 From b92675d998a9fa37fe9e0e35053a95b4a23c158b Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sat, 4 Mar 2017 07:02:11 -0800 Subject: ARM: OMAP2+: Release device node after it is no longer needed. The device node returned by of_find_node_by_name() needs to be released after it is no longer needed to avoid a device node leak. Signed-off-by: Guenter Roeck Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c index 0fa08c1..1435fee 100644 --- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c @@ -3213,15 +3213,20 @@ int __init omap3xxx_hwmod_init(void) if (h_sham && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "sham")) { r = omap_hwmod_register_links(h_sham); - if (r < 0) + if (r < 0) { + of_node_put(bus); return r; + } } if (h_aes && omap3xxx_hwmod_is_hs_ip_block_usable(bus, "aes")) { r = omap_hwmod_register_links(h_aes); - if (r < 0) + if (r < 0) { + of_node_put(bus); return r; + } } + of_node_put(bus); /* * Register hwmod links specific to certain ES levels of a -- cgit v1.1 From 9e10889a3177340dcda7d29c6d8fbd97247b007b Mon Sep 17 00:00:00 2001 From: Romain Izard Date: Fri, 17 Feb 2017 16:12:50 +0100 Subject: Revert "ARM: at91/dt: sama5d2: Use new compatible for ohci node" This reverts commit cab43282682e ("ARM: at91/dt: sama5d2: Use new compatible for ohci node") It depends from commit 7150bc9b4d43 ("usb: ohci-at91: Forcibly suspend ports while USB suspend") which was reverted and implemented differently. With the new implementation, the compatible string must remain the same. The compatible string introduced by this commit has been used in the default SAMA5D2 dtsi starting from Linux 4.8. As it has never been working correctly in an official release, removing it should not be breaking the stability rules. Fixes: cab43282682e ("ARM: at91/dt: sama5d2: Use new compatible for ohci node") Signed-off-by: Romain Izard cc: Signed-off-by: Alexandre Belloni --- arch/arm/boot/dts/sama5d2.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/sama5d2.dtsi b/arch/arm/boot/dts/sama5d2.dtsi index 22332be..528b4e9 100644 --- a/arch/arm/boot/dts/sama5d2.dtsi +++ b/arch/arm/boot/dts/sama5d2.dtsi @@ -266,7 +266,7 @@ }; usb1: ohci@00400000 { - compatible = "atmel,sama5d2-ohci", "usb-ohci"; + compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00400000 0x100000>; interrupts = <41 IRQ_TYPE_LEVEL_HIGH 2>; clocks = <&uhphs_clk>, <&uhphs_clk>, <&uhpck>; -- cgit v1.1 From e51b999e11b84a766d78347afe9287e2c5b91c97 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Wed, 8 Feb 2017 15:37:11 -0500 Subject: ARM: dts: BCM5301X: Fix UARTs on bcm953012k The UARTs are outputting garbage on the console. This is due to a speed issue. We can simply use the clock speed (which is now defined in the DTSI file) and everything works fine. Signed-off-by: Jon Mason Fixes: cdc36b22 ("ARM: dts: enable clock support for BCM5301X") Signed-off-by: Florian Fainelli --- arch/arm/boot/dts/bcm953012k.dts | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts index bfd9230..da5aa8f 100644 --- a/arch/arm/boot/dts/bcm953012k.dts +++ b/arch/arm/boot/dts/bcm953012k.dts @@ -53,10 +53,9 @@ }; &uart0 { - clock-frequency = <62499840>; + status = "okay"; }; &uart1 { - clock-frequency = <62499840>; status = "okay"; }; -- cgit v1.1 From 88d1fa70c21d7b431386cfe70cdc514d98b0c9c4 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Wed, 8 Feb 2017 15:37:12 -0500 Subject: ARM: dts: BCM5301X: Fix memory start address Memory starts at 0x80000000, not 0. 0 "works" due to mirrior of the first 128M of RAM to that address. Anything greater than 128M will quickly find nothing there. Correcting the starting address has everything working again. Signed-off-by: Jon Mason Fixes: 7eb05f6d ("ARM: dts: bcm5301x: Add BCM SVK DT files") Signed-off-by: Florian Fainelli --- arch/arm/boot/dts/bcm953012k.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/bcm953012k.dts b/arch/arm/boot/dts/bcm953012k.dts index da5aa8f..ae31a58 100644 --- a/arch/arm/boot/dts/bcm953012k.dts +++ b/arch/arm/boot/dts/bcm953012k.dts @@ -48,7 +48,7 @@ }; memory { - reg = <0x00000000 0x10000000>; + reg = <0x80000000 0x10000000>; }; }; -- cgit v1.1 From 0c2bf9f95983fe30aa2f6463cb761cd42c2d521a Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Thu, 2 Mar 2017 19:21:32 -0500 Subject: ARM: dts: BCM5301X: Correct GIC_PPI interrupt flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GIC_PPI flags were misconfigured for the timers, resulting in errors like: [ 0.000000] GIC: PPI11 is secure or misconfigured Changing them to being edge triggered corrects the issue Suggested-by: Rafał Miłecki Signed-off-by: Jon Mason Fixes: d27509f1 ("ARM: BCM5301X: add dts files for BCM4708 SoC") Signed-off-by: Florian Fainelli --- arch/arm/boot/dts/bcm5301x.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/bcm5301x.dtsi b/arch/arm/boot/dts/bcm5301x.dtsi index 4fbb089..00de62d 100644 --- a/arch/arm/boot/dts/bcm5301x.dtsi +++ b/arch/arm/boot/dts/bcm5301x.dtsi @@ -66,14 +66,14 @@ timer@20200 { compatible = "arm,cortex-a9-global-timer"; reg = <0x20200 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; local-timer@20600 { compatible = "arm,cortex-a9-twd-timer"; reg = <0x20600 0x100>; - interrupts = ; + interrupts = ; clocks = <&periph_clk>; }; -- cgit v1.1 From 5e6039d8a307d8411422c154f3d446b44fa32b6d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:00:15 -0500 Subject: uaccess: move VERIFY_{READ,WRITE} definitions to linux/uaccess.h Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 3 --- arch/arm/include/asm/uaccess.h | 3 --- arch/arm64/include/asm/uaccess.h | 3 --- arch/avr32/include/asm/uaccess.h | 3 --- arch/blackfin/include/asm/uaccess.h | 3 --- arch/cris/include/asm/uaccess.h | 3 --- arch/frv/include/asm/uaccess.h | 3 --- arch/hexagon/include/asm/uaccess.h | 2 -- arch/ia64/include/asm/uaccess.h | 3 --- arch/m32r/include/asm/uaccess.h | 3 --- arch/m68k/include/asm/uaccess_mm.h | 3 --- arch/m68k/include/asm/uaccess_no.h | 3 --- arch/metag/include/asm/uaccess.h | 3 --- arch/microblaze/include/asm/uaccess.h | 3 --- arch/mips/include/asm/uaccess.h | 3 --- arch/mn10300/include/asm/uaccess.h | 3 --- arch/nios2/include/asm/uaccess.h | 3 --- arch/openrisc/include/asm/uaccess.h | 3 --- arch/parisc/include/asm/uaccess.h | 3 --- arch/powerpc/include/asm/uaccess.h | 3 --- arch/s390/include/asm/uaccess.h | 3 --- arch/score/include/asm/uaccess.h | 3 --- arch/sh/include/asm/uaccess.h | 3 --- arch/sparc/include/asm/uaccess_32.h | 3 --- arch/sparc/include/asm/uaccess_64.h | 3 --- arch/tile/include/asm/uaccess.h | 3 --- arch/x86/include/asm/uaccess.h | 3 --- arch/xtensa/include/asm/asm-uaccess.h | 3 --- arch/xtensa/include/asm/uaccess.h | 3 --- 29 files changed, 86 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 94f5875..a37267a 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -20,9 +20,6 @@ #define KERNEL_DS ((mm_segment_t) { 0UL }) #define USER_DS ((mm_segment_t) { -0x40000000000UL }) -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_fs() (current_thread_info()->addr_limit) #define get_ds() (KERNEL_DS) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index b7e0125..a13f39b 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -26,9 +26,6 @@ #define __put_user_unaligned __put_user #endif -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 5308d69..f5e1e09 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -36,9 +36,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The exception table consists of pairs of relative offsets: the first * is the relative offset to an instruction that is allowed to fault, diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index b1ec1fa..1c7f234 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -11,9 +11,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - typedef struct { unsigned int is_user_space; } mm_segment_t; diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 0eff88a..d9a9110 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -29,9 +29,6 @@ static inline void set_fs(mm_segment_t fs) #define segment_eq(a, b) ((a) == (b)) -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define access_ok(type, addr, size) _access_ok((unsigned long)(addr), (size)) /* diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 56c7d57..82bfcb5 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -21,9 +21,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index c0f4057e..9e01bd7 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -22,9 +22,6 @@ #define __ptr(x) ((unsigned long __force *)(x)) -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * check that a range of addresses falls within the current address limit */ diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index f61cfb2..21f6359 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -50,8 +50,6 @@ * reasonably simple and not *too* slow. After all, we've got the * MMU for backup. */ -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 #define __access_ok(addr, size) \ ((get_fs().seg == KERNEL_DS.seg) || \ diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 471044b..c60ff6c 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -48,9 +48,6 @@ #define KERNEL_DS ((mm_segment_t) { ~0UL }) /* cf. access_ok() */ #define USER_DS ((mm_segment_t) { TASK_SIZE-1 }) /* cf. access_ok() */ -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 6f89821..7d993a8 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -16,9 +16,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index d228601..fa84e9c 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -10,9 +10,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* We let the MMU do all checking */ static inline int access_ok(int type, const void __user *addr, unsigned long size) diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index 36deeb3..fab489a 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -10,9 +10,6 @@ #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define access_ok(type,addr,size) _access_ok((unsigned long)(addr),(size)) /* diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 273e612..46c1f6c 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -6,9 +6,6 @@ */ #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 253a67e..70cf5f3 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -24,9 +24,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * On Microblaze the fs value is actually the top of the corresponding * address space. diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 5347cfe..a058c04 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -71,9 +71,6 @@ extern u64 __ua_limit; #define USER_DS ((mm_segment_t) { __UA_LIMIT }) #endif -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define set_fs(x) (current_thread_info()->addr_limit = (x)) diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 2eedf6f..3e16850 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -19,9 +19,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index 0ab8232..07fc68c 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -19,9 +19,6 @@ #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The exception table consists of pairs of addresses: the first is the * address of an instruction that is allowed to fault, and the second is diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 140faa1..6f88cf8 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -28,9 +28,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index fb4382c..598b52e 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -13,9 +13,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 0e6add3..8130763 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -11,9 +11,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 136932f..0e1f515 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -17,9 +17,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index db58ab9..5191424 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -6,9 +6,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current_thread_info()->addr_limit) #define segment_eq(a, b) ((a).seg == (b).seg) diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index c4f0fee..6b66d67 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -5,9 +5,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define __addr_ok(addr) \ ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index ea55f86..d8857f5 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -30,9 +30,6 @@ #define KERNEL_DS ((mm_segment_t) { 0 }) #define USER_DS ((mm_segment_t) { -1 }) -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_ds() (KERNEL_DS) #define get_fs() (current->thread.current_ds) #define set_fs(val) ((current->thread.current_ds) = (val)) diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 5373136..619223d 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -36,9 +36,6 @@ #define KERNEL_DS ((mm_segment_t) { ASI_P }) #define USER_DS ((mm_segment_t) { ASI_AIUS }) /* har har har */ -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #define get_fs() ((mm_segment_t){(current_thread_info()->current_ds)}) #define get_ds() (KERNEL_DS) diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index a77369e..7300733 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -24,9 +24,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index ea14831..8dffb8b 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -13,9 +13,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/xtensa/include/asm/asm-uaccess.h b/arch/xtensa/include/asm/asm-uaccess.h index a7a1100..dfdf9fa 100644 --- a/arch/xtensa/include/asm/asm-uaccess.h +++ b/arch/xtensa/include/asm/asm-uaccess.h @@ -19,9 +19,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #include #include #include diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 848a3d7..dd6b136 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -20,9 +20,6 @@ #include #include -#define VERIFY_READ 0 -#define VERIFY_WRITE 1 - #include /* -- cgit v1.1 From af1d5b37d6211c814fac0d5d0b71ec695618054a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:14:09 -0500 Subject: uaccess: drop duplicate includes from asm/uaccess.h Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 4 ---- arch/arc/include/asm/uaccess.h | 2 -- arch/arm/include/asm/uaccess.h | 2 -- arch/arm64/include/asm/uaccess.h | 2 -- arch/avr32/include/asm/uaccess.h | 3 --- arch/blackfin/include/asm/uaccess.h | 1 - arch/cris/include/asm/uaccess.h | 2 -- arch/frv/include/asm/uaccess.h | 1 - arch/hexagon/include/asm/uaccess.h | 1 - arch/ia64/include/asm/uaccess.h | 2 -- arch/m32r/include/asm/uaccess.h | 2 -- arch/m68k/include/asm/uaccess_mm.h | 2 -- arch/m68k/include/asm/uaccess_no.h | 1 - arch/metag/include/asm/uaccess.h | 1 - arch/microblaze/include/asm/uaccess.h | 2 -- arch/mips/include/asm/uaccess.h | 2 -- arch/mn10300/include/asm/uaccess.h | 2 -- arch/nios2/include/asm/uaccess.h | 2 -- arch/openrisc/include/asm/uaccess.h | 2 -- arch/parisc/include/asm/uaccess.h | 2 -- arch/powerpc/include/asm/uaccess.h | 2 -- arch/s390/include/asm/uaccess.h | 2 -- arch/score/include/asm/uaccess.h | 2 -- arch/sh/include/asm/uaccess.h | 2 -- arch/sparc/include/asm/uaccess_32.h | 2 -- arch/sparc/include/asm/uaccess_64.h | 2 -- arch/tile/include/asm/uaccess.h | 1 - arch/um/include/asm/uaccess.h | 1 - arch/unicore32/include/asm/uaccess.h | 3 --- arch/x86/include/asm/uaccess.h | 2 -- arch/x86/include/asm/uaccess_32.h | 2 -- arch/x86/include/asm/uaccess_64.h | 1 - arch/xtensa/include/asm/uaccess.h | 3 --- 33 files changed, 63 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index a37267a..77c55ce 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -1,10 +1,6 @@ #ifndef __ALPHA_UACCESS_H #define __ALPHA_UACCESS_H -#include -#include - - /* * The fs value determines whether argument validity checking should be * performed or not. If get_fs() == USER_DS, checking is performed, with diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 41faf17..0431f56 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -24,8 +24,6 @@ #ifndef _ASM_ARC_UACCESS_H #define _ASM_ARC_UACCESS_H -#include -#include #include /* for generic string functions */ diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index a13f39b..9677a7c 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -12,8 +12,6 @@ * User space memory access functions */ #include -#include -#include #include #include #include diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index f5e1e09..7c514e1 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -28,11 +28,9 @@ #include #include #include -#include #include #include -#include #include #include diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 1c7f234..7ca5cb3 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -8,9 +8,6 @@ #ifndef __ASM_AVR32_UACCESS_H #define __ASM_AVR32_UACCESS_H -#include -#include - typedef struct { unsigned int is_user_space; } mm_segment_t; diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index d9a9110..c9fedc3 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -12,7 +12,6 @@ /* * User space memory access functions */ -#include #include #include diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 82bfcb5..bb3004a 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -16,8 +16,6 @@ #define _CRIS_UACCESS_H #ifndef __ASSEMBLY__ -#include -#include #include #include diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 9e01bd7..55b3a69 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -15,7 +15,6 @@ /* * User space memory access functions */ -#include #include #include #include diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 21f6359..3a7f818 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -23,7 +23,6 @@ /* * User space memory access functions */ -#include #include #include #include diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index c60ff6c..d471d1a 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -33,8 +33,6 @@ */ #include -#include -#include #include #include diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 7d993a8..96b0efd 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -11,8 +11,6 @@ /* * User space memory access functions */ -#include -#include #include #include diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index fa84e9c..fb72b71 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -5,9 +5,7 @@ * User space memory access functions */ #include -#include #include -#include #include /* We let the MMU do all checking */ diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index fab489a..e77ce66 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -4,7 +4,6 @@ /* * User space memory access functions */ -#include #include #include diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 46c1f6c..7fc5277 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -4,7 +4,6 @@ /* * User space memory access functions */ -#include /* * The fs value determines whether argument validity checking should be diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 70cf5f3..a3c0a06 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -15,8 +15,6 @@ #ifndef __ASSEMBLY__ #include -#include -#include /* RLIMIT_FSIZE */ #include #include diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index a058c04..dd25b31 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -12,8 +12,6 @@ #define _ASM_UACCESS_H #include -#include -#include #include #include #include diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 3e16850..2da7b0f 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -14,10 +14,8 @@ /* * User space memory access functions */ -#include #include #include -#include /* * The fs value determines whether argument validity checking should be diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index 07fc68c..198bbf1 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -13,8 +13,6 @@ #ifndef _ASM_NIOS2_UACCESS_H #define _ASM_NIOS2_UACCESS_H -#include -#include #include #include diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 6f88cf8..0b0f604 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -22,8 +22,6 @@ /* * User space memory access functions */ -#include -#include #include #include #include diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index 598b52e..a0b4613 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -6,12 +6,10 @@ */ #include #include -#include #include #include #include -#include #define KERNEL_DS ((mm_segment_t){0}) #define USER_DS ((mm_segment_t){1}) diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 8130763..2ec70aa 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -4,8 +4,6 @@ #ifdef __KERNEL__ #ifndef __ASSEMBLY__ -#include -#include #include #include #include diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 0e1f515..9e9a5e8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -12,8 +12,6 @@ /* * User space memory access functions */ -#include -#include #include #include diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index 5191424..7a6c698 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -2,8 +2,6 @@ #define __SCORE_UACCESS_H #include -#include -#include #include #define get_ds() (KERNEL_DS) diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 6b66d67..89a28df 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -1,8 +1,6 @@ #ifndef __ASM_SH_UACCESS_H #define __ASM_SH_UACCESS_H -#include -#include #include #define __addr_ok(addr) \ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index d8857f5..b10f7d6 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -9,9 +9,7 @@ #ifdef __KERNEL__ #include -#include #include -#include #endif #ifndef __ASSEMBLY__ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 619223d..d76362c 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -6,10 +6,8 @@ */ #ifdef __KERNEL__ -#include #include #include -#include #include #include #include diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index 7300733..14ea3d1 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -18,7 +18,6 @@ /* * User space memory access functions */ -#include #include #include #include diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 3705620..076bdcb 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -7,7 +7,6 @@ #ifndef __UM_UACCESS_H #define __UM_UACCESS_H -#include #include #define __under_task_size(addr, size) \ diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index 897e11ad..1622f37 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -12,9 +12,6 @@ #ifndef __UNICORE_UACCESS_H__ #define __UNICORE_UACCESS_H__ -#include -#include - #include #define __copy_from_user __copy_from_user diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 8dffb8b..0522d88 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -3,10 +3,8 @@ /* * User space memory access functions */ -#include #include #include -#include #include #include #include diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 7d3bdd1..5268ecc 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -4,8 +4,6 @@ /* * User space memory access functions */ -#include -#include #include #include #include diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 673059a..8ddadd93 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -5,7 +5,6 @@ * User space memory access functions */ #include -#include #include #include #include diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index dd6b136..bd8861c 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -16,12 +16,9 @@ #ifndef _XTENSA_UACCESS_H #define _XTENSA_UACCESS_H -#include #include #include -#include - /* * The fs value determines whether argument validity checking should * be performed or not. If get_fs() == USER_DS, checking is -- cgit v1.1 From 444f02c458db00bd6049cc1bfe4254e80f57459e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 18:19:09 -0500 Subject: uaccess: drop pointless ifdefs None of those file is ever included from uapi stuff, so __KERNEL__ is always defined. None of them is ever included from assembler (they are only pulled from linux/uaccess.h, which _can't_ be included from assembler), so __ASSEMBLY__ is never defined. Signed-off-by: Al Viro --- arch/cris/include/asm/uaccess.h | 3 --- arch/microblaze/include/asm/uaccess.h | 6 ------ arch/powerpc/include/asm/uaccess.h | 6 ------ arch/sparc/include/asm/uaccess_32.h | 6 ------ arch/sparc/include/asm/uaccess_64.h | 6 ------ 5 files changed, 27 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index bb3004a..c2462ef 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -15,7 +15,6 @@ #ifndef _CRIS_UACCESS_H #define _CRIS_UACCESS_H -#ifndef __ASSEMBLY__ #include #include @@ -411,6 +410,4 @@ __generic_clear_user_nocheck(void __user *to, unsigned long n) #define strlen_user(str) strnlen_user((str), 0x7ffffffe) -#endif /* __ASSEMBLY__ */ - #endif /* _CRIS_UACCESS_H */ diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index a3c0a06..b132cd3 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -11,9 +11,6 @@ #ifndef _ASM_MICROBLAZE_UACCESS_H #define _ASM_MICROBLAZE_UACCESS_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - #include #include @@ -417,7 +414,4 @@ static inline long strnlen_user(const char __user *src, long n) return __strnlen_user(src, n); } -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* _ASM_MICROBLAZE_UACCESS_H */ diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 2ec70aa..3904040 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -1,9 +1,6 @@ #ifndef _ARCH_POWERPC_UACCESS_H #define _ARCH_POWERPC_UACCESS_H -#ifdef __KERNEL__ -#ifndef __ASSEMBLY__ - #include #include #include @@ -417,7 +414,4 @@ extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); -#endif /* __ASSEMBLY__ */ -#endif /* __KERNEL__ */ - #endif /* _ARCH_POWERPC_UACCESS_H */ diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index b10f7d6..952d512 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -7,12 +7,8 @@ #ifndef _ASM_UACCESS_H #define _ASM_UACCESS_H -#ifdef __KERNEL__ #include #include -#endif - -#ifndef __ASSEMBLY__ #include @@ -307,6 +303,4 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n) __must_check long strlen_user(const char __user *str); __must_check long strnlen_user(const char __user *str, long n); -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_UACCESS_H */ diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index d76362c..7afb4f6 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -5,16 +5,12 @@ * User space memory access functions */ -#ifdef __KERNEL__ #include #include #include #include #include #include -#endif - -#ifndef __ASSEMBLY__ #include @@ -229,6 +225,4 @@ unsigned long compute_effective_address(struct pt_regs *, unsigned int insn, unsigned int rd); -#endif /* __ASSEMBLY__ */ - #endif /* _ASM_UACCESS_H */ -- cgit v1.1 From 39319f504b5d91e853f5ec7753a56e43915fcaf4 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Wed, 30 Nov 2016 12:05:03 +0100 Subject: ARM: sun8i: Fix the mali clock rate The Mali clock rate was improperly assumed to be 408MHz, while it was really 384Mhz, 408MHz being the "extreme" frequency, and definitely not stable. Switch for the stable, correct frequency for the GPU. Signed-off-by: Maxime Ripard --- arch/arm/boot/dts/sun8i-a23-a33.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/sun8i-a23-a33.dtsi b/arch/arm/boot/dts/sun8i-a23-a33.dtsi index a952cc0..8a3ed21 100644 --- a/arch/arm/boot/dts/sun8i-a23-a33.dtsi +++ b/arch/arm/boot/dts/sun8i-a23-a33.dtsi @@ -495,7 +495,7 @@ resets = <&ccu RST_BUS_GPU>; assigned-clocks = <&ccu CLK_GPU>; - assigned-clock-rates = <408000000>; + assigned-clock-rates = <384000000>; }; gic: interrupt-controller@01c81000 { -- cgit v1.1 From 68925176296a8b995e503349200e256674bfe5ac Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Fri, 17 Feb 2017 14:32:18 +0000 Subject: arm64: KVM: VHE: Clear HCR_TGE when invalidating guest TLBs When invalidating guest TLBs, special care must be taken to actually shoot the guest TLBs and not the host ones if we're running on a VHE system. This is controlled by the HCR_EL2.TGE bit, which we forget to clear before invalidating TLBs. Address the issue by introducing two wrappers (__tlb_switch_to_guest and __tlb_switch_to_host) that take care of both the VTTBR_EL2 and HCR_EL2.TGE switching. Reported-by: Tomasz Nowicki Tested-by: Tomasz Nowicki Reviewed-by: Christoffer Dall Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier --- arch/arm64/kvm/hyp/tlb.c | 64 +++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 55 insertions(+), 9 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c index e8e7ba2..9e1d2b7 100644 --- a/arch/arm64/kvm/hyp/tlb.c +++ b/arch/arm64/kvm/hyp/tlb.c @@ -18,14 +18,62 @@ #include #include +static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm) +{ + u64 val; + + /* + * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and + * most TLB operations target EL2/EL0. In order to affect the + * guest TLBs (EL1/EL0), we need to change one of these two + * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so + * let's flip TGE before executing the TLB operation. + */ + write_sysreg(kvm->arch.vttbr, vttbr_el2); + val = read_sysreg(hcr_el2); + val &= ~HCR_TGE; + write_sysreg(val, hcr_el2); + isb(); +} + +static void __hyp_text __tlb_switch_to_guest_nvhe(struct kvm *kvm) +{ + write_sysreg(kvm->arch.vttbr, vttbr_el2); + isb(); +} + +static hyp_alternate_select(__tlb_switch_to_guest, + __tlb_switch_to_guest_nvhe, + __tlb_switch_to_guest_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + +static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm) +{ + /* + * We're done with the TLB operation, let's restore the host's + * view of HCR_EL2. + */ + write_sysreg(0, vttbr_el2); + write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); +} + +static void __hyp_text __tlb_switch_to_host_nvhe(struct kvm *kvm) +{ + write_sysreg(0, vttbr_el2); +} + +static hyp_alternate_select(__tlb_switch_to_host, + __tlb_switch_to_host_nvhe, + __tlb_switch_to_host_vhe, + ARM64_HAS_VIRT_HOST_EXTN); + void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) { dsb(ishst); /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); /* * We could do so much better if we had the VA as well. @@ -46,7 +94,7 @@ void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa) dsb(ish); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) @@ -55,14 +103,13 @@ void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm) /* Switch to requested VMID */ kvm = kern_hyp_va(kvm); - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); __tlbi(vmalls12e1is); dsb(ish); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) @@ -70,14 +117,13 @@ void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu) struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm); /* Switch to requested VMID */ - write_sysreg(kvm->arch.vttbr, vttbr_el2); - isb(); + __tlb_switch_to_guest()(kvm); __tlbi(vmalle1); dsb(nsh); isb(); - write_sysreg(0, vttbr_el2); + __tlb_switch_to_host()(kvm); } void __hyp_text __kvm_flush_vm_context(void) -- cgit v1.1 From a69e2fb70350a66f91175cd2625f1e8215c5b6e9 Mon Sep 17 00:00:00 2001 From: Balbir Singh Date: Fri, 3 Mar 2017 11:58:44 +1100 Subject: powerpc/xics: Work around limitations of OPAL XICS priority handling The CPPR (Current Processor Priority Register) of a XICS interrupt presentation controller contains a value N, such that only interrupts with a priority "more favoured" than N will be received by the CPU, where "more favoured" means "less than". So if the CPPR has the value 5 then only interrupts with a priority of 0-4 inclusive will be received. In theory the CPPR can support a value of 0 to 255 inclusive. In practice Linux only uses values of 0, 4, 5 and 0xff. Setting the CPPR to 0 rejects all interrupts, setting it to 0xff allows all interrupts. The values 4 and 5 are used to differentiate IPIs from external interrupts. Setting the CPPR to 5 allows IPIs to be received but not external interrupts. The CPPR emulation in the OPAL XICS implementation only directly supports priorities 0 and 0xff. All other priorities are considered equivalent, and mapped to a single priority value internally. This means when using icp-opal we can not allow IPIs but not externals. This breaks Linux's use of priority values when a CPU is hot unplugged. After migrating IRQs away from the CPU that is being offlined, we set the priority to 5, meaning we still want the offline CPU to receive IPIs. But the effect of the OPAL XICS emulation's use of a single priority value is that all interrupts are rejected by the CPU. With the CPU offline, and not receiving IPIs, we may not be able to wake it up to bring it back online. The first part of the fix is in icp_opal_set_cpu_priority(). CPPR values of 0 to 4 inclusive will correctly cause all interrupts to be rejected, so we pass those CPPR values through to OPAL. However if we are called with a CPPR of 5 or greater, the caller is expecting to be able to allow IPIs but not external interrupts. We know this doesn't work, so instead of rejecting all interrupts we choose the opposite which is to allow all interrupts. This is still not correct behaviour, but we know for the only existing caller (xics_migrate_irqs_away()), that it is the better option. The other part of the fix is in xics_migrate_irqs_away(). Instead of setting priority (CPPR) to 0, and then back to 5 before migrating IRQs, we migrate the IRQs before setting the priority back to 5. This should have no effect on an ICP backend with a working set_priority(), and on icp-opal it means we will keep all interrupts blocked until after we've finished doing the IRQ migration. Additionally we wait for 5ms after doing the migration to make sure there are no IRQs in flight. Fixes: d74361881f0d ("powerpc/xics: Add ICP OPAL backend") Cc: stable@vger.kernel.org # v4.8+ Suggested-by: Michael Ellerman Reported-by: Vaidyanathan Srinivasan Tested-by: Vaidyanathan Srinivasan Signed-off-by: Balbir Singh [mpe: Rewrote comments and change log, change delay to 5ms] Signed-off-by: Michael Ellerman --- arch/powerpc/sysdev/xics/icp-opal.c | 10 ++++++++++ arch/powerpc/sysdev/xics/xics-common.c | 17 ++++++++++++++--- 2 files changed, 24 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c index f9670ea..b53f80f 100644 --- a/arch/powerpc/sysdev/xics/icp-opal.c +++ b/arch/powerpc/sysdev/xics/icp-opal.c @@ -91,6 +91,16 @@ static unsigned int icp_opal_get_irq(void) static void icp_opal_set_cpu_priority(unsigned char cppr) { + /* + * Here be dragons. The caller has asked to allow only IPI's and not + * external interrupts. But OPAL XIVE doesn't support that. So instead + * of allowing no interrupts allow all. That's still not right, but + * currently the only caller who does this is xics_migrate_irqs_away() + * and it works in that case. + */ + if (cppr >= DEFAULT_PRIORITY) + cppr = LOWEST_PRIORITY; + xics_set_base_cppr(cppr); opal_int_set_cppr(cppr); iosync(); diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c index 69d858e..23efe4e 100644 --- a/arch/powerpc/sysdev/xics/xics-common.c +++ b/arch/powerpc/sysdev/xics/xics-common.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include @@ -198,9 +199,6 @@ void xics_migrate_irqs_away(void) /* Remove ourselves from the global interrupt queue */ xics_set_cpu_giq(xics_default_distrib_server, 0); - /* Allow IPIs again... */ - icp_ops->set_priority(DEFAULT_PRIORITY); - for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; @@ -255,6 +253,19 @@ void xics_migrate_irqs_away(void) unlock: raw_spin_unlock_irqrestore(&desc->lock, flags); } + + /* Allow "sufficient" time to drop any inflight IRQ's */ + mdelay(5); + + /* + * Allow IPIs again. This is done at the very end, after migrating all + * interrupts, the expectation is that we'll only get woken up by an IPI + * interrupt beyond this point, but leave externals masked just to be + * safe. If we're using icp-opal this may actually allow all + * interrupts anyway, but that should be OK. + */ + icp_ops->set_priority(DEFAULT_PRIORITY); + } #endif /* CONFIG_HOTPLUG_CPU */ -- cgit v1.1 From 12cc9fd6b2d8ee307a735b3b9faed0d17b719463 Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Tue, 28 Feb 2017 17:03:47 +1100 Subject: powerpc: Parse the command line before calling CAS On POWER9 the hypervisor requires the guest to decide whether it would like to use a hash or radix mmu model at the time it calls ibm,client-architecture-support (CAS) based on what the hypervisor has said it's allowed to do. It is possible to disable radix by passing "disable_radix" on the command line. The next patch will add support for the new CAS format, thus we need to parse the command line before calling CAS so we can correctly select which mmu we would like to use. Signed-off-by: Suraj Jitindar Singh Reviewed-by: Paul Mackerras Acked-by: Balbir Singh Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/prom_init.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index a394454..bf3966d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -2993,6 +2993,11 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, */ prom_check_initrd(r3, r4); + /* + * Do early parsing of command line + */ + early_cmdline_parse(); + #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) /* * On pSeries, inform the firmware about our capabilities @@ -3009,11 +3014,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, copy_and_flush(0, kbase, 0x100, 0); /* - * Do early parsing of command line - */ - early_cmdline_parse(); - - /* * Initialize memory management within prom_init */ prom_init_mem(); -- cgit v1.1 From 014d02cbf16b3106dc8e93281d2a9c189751ed5e Mon Sep 17 00:00:00 2001 From: Suraj Jitindar Singh Date: Tue, 28 Feb 2017 17:03:48 +1100 Subject: powerpc: Update to new option-vector-5 format for CAS On POWER9 the ibm,client-architecture-support (CAS) negotiation process has been updated to change how the host to guest negotiation is done for the new hash/radix mmu as well as the nest mmu, process tables and guest translation shootdown (GTSE). This is documented in the unreleased PAPR ACR "CAS option vector additions for P9". The host tells the guest which options it supports in ibm,arch-vec-5-platform-support. The guest then chooses a subset of these to request in the CAS call and these are agreed to in the ibm,architecture-vec-5 property of the chosen node. Thus we read ibm,arch-vec-5-platform-support and make our selection before calling CAS. We then parse the ibm,architecture-vec-5 property of the chosen node to check whether we should run as hash or radix. ibm,arch-vec-5-platform-support format: index value pairs: ... index: Option vector 5 byte number val: Some representation of supported values Signed-off-by: Suraj Jitindar Singh Acked-by: Paul Mackerras [mpe: Don't print about unknown options, be consistent with OV5_FEAT] Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/prom.h | 18 ++++--- arch/powerpc/kernel/prom_init.c | 110 +++++++++++++++++++++++++++++++++++++++- arch/powerpc/mm/init_64.c | 36 ++++++++++--- 3 files changed, 150 insertions(+), 14 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index 4a90634..35c00d7 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h @@ -160,12 +160,18 @@ struct of_drconf_cell { #define OV5_PFO_HW_ENCR 0x1120 /* PFO Encryption Accelerator */ #define OV5_SUB_PROCESSORS 0x1501 /* 1,2,or 4 Sub-Processors supported */ #define OV5_XIVE_EXPLOIT 0x1701 /* XIVE exploitation supported */ -#define OV5_MMU_RADIX_300 0x1880 /* ISA v3.00 radix MMU supported */ -#define OV5_MMU_HASH_300 0x1840 /* ISA v3.00 hash MMU supported */ -#define OV5_MMU_SEGM_RADIX 0x1820 /* radix mode (no segmentation) */ -#define OV5_MMU_PROC_TBL 0x1810 /* hcall selects SLB or proc table */ -#define OV5_MMU_SLB 0x1800 /* always use SLB */ -#define OV5_MMU_GTSE 0x1808 /* Guest translation shootdown */ +/* MMU Base Architecture */ +#define OV5_MMU_SUPPORT 0x18C0 /* MMU Mode Support Mask */ +#define OV5_MMU_HASH 0x1800 /* Hash MMU Only */ +#define OV5_MMU_RADIX 0x1840 /* Radix MMU Only */ +#define OV5_MMU_EITHER 0x1880 /* Hash or Radix Supported */ +#define OV5_MMU_DYNAMIC 0x18C0 /* Hash or Radix Can Switch Later */ +#define OV5_NMMU 0x1820 /* Nest MMU Available */ +/* Hash Table Extensions */ +#define OV5_HASH_SEG_TBL 0x1980 /* In Memory Segment Tables Available */ +#define OV5_HASH_GTSE 0x1940 /* Guest Translation Shoot Down Avail */ +/* Radix Table Extensions */ +#define OV5_RADIX_GTSE 0x1A40 /* Guest Translation Shoot Down Avail */ /* Option Vector 6: IBM PAPR hints */ #define OV6_LINUX 0x02 /* Linux is our OS */ diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index bf3966d..1c1b44e 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c @@ -168,6 +168,14 @@ static unsigned long __initdata prom_tce_alloc_start; static unsigned long __initdata prom_tce_alloc_end; #endif +static bool __initdata prom_radix_disable; + +struct platform_support { + bool hash_mmu; + bool radix_mmu; + bool radix_gtse; +}; + /* Platforms codes are now obsolete in the kernel. Now only used within this * file and ultimately gone too. Feel free to change them if you need, they * are not shared with anything outside of this file anymore @@ -626,6 +634,12 @@ static void __init early_cmdline_parse(void) prom_memory_limit = ALIGN(prom_memory_limit, 0x1000000); #endif } + + opt = strstr(prom_cmd_line, "disable_radix"); + if (opt) { + prom_debug("Radix disabled from cmdline\n"); + prom_radix_disable = true; + } } #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) @@ -695,6 +709,8 @@ struct option_vector5 { u8 byte22; u8 intarch; u8 mmu; + u8 hash_ext; + u8 radix_ext; } __packed; struct option_vector6 { @@ -850,8 +866,9 @@ struct ibm_arch_vec __cacheline_aligned ibm_architecture_vec = { .reserved3 = 0, .subprocessors = 1, .intarch = 0, - .mmu = OV5_FEAT(OV5_MMU_RADIX_300) | OV5_FEAT(OV5_MMU_HASH_300) | - OV5_FEAT(OV5_MMU_PROC_TBL) | OV5_FEAT(OV5_MMU_GTSE), + .mmu = 0, + .hash_ext = 0, + .radix_ext = 0, }, /* option vector 6: IBM PAPR hints */ @@ -990,6 +1007,92 @@ static int __init prom_count_smt_threads(void) } +static void __init prom_parse_mmu_model(u8 val, + struct platform_support *support) +{ + switch (val) { + case OV5_FEAT(OV5_MMU_DYNAMIC): + case OV5_FEAT(OV5_MMU_EITHER): /* Either Available */ + prom_debug("MMU - either supported\n"); + support->radix_mmu = !prom_radix_disable; + support->hash_mmu = true; + break; + case OV5_FEAT(OV5_MMU_RADIX): /* Only Radix */ + prom_debug("MMU - radix only\n"); + if (prom_radix_disable) { + /* + * If we __have__ to do radix, we're better off ignoring + * the command line rather than not booting. + */ + prom_printf("WARNING: Ignoring cmdline option disable_radix\n"); + } + support->radix_mmu = true; + break; + case OV5_FEAT(OV5_MMU_HASH): + prom_debug("MMU - hash only\n"); + support->hash_mmu = true; + break; + default: + prom_debug("Unknown mmu support option: 0x%x\n", val); + break; + } +} + +static void __init prom_parse_platform_support(u8 index, u8 val, + struct platform_support *support) +{ + switch (index) { + case OV5_INDX(OV5_MMU_SUPPORT): /* MMU Model */ + prom_parse_mmu_model(val & OV5_FEAT(OV5_MMU_SUPPORT), support); + break; + case OV5_INDX(OV5_RADIX_GTSE): /* Radix Extensions */ + if (val & OV5_FEAT(OV5_RADIX_GTSE)) { + prom_debug("Radix - GTSE supported\n"); + support->radix_gtse = true; + } + break; + } +} + +static void __init prom_check_platform_support(void) +{ + struct platform_support supported = { + .hash_mmu = false, + .radix_mmu = false, + .radix_gtse = false + }; + int prop_len = prom_getproplen(prom.chosen, + "ibm,arch-vec-5-platform-support"); + if (prop_len > 1) { + int i; + u8 vec[prop_len]; + prom_debug("Found ibm,arch-vec-5-platform-support, len: %d\n", + prop_len); + prom_getprop(prom.chosen, "ibm,arch-vec-5-platform-support", + &vec, sizeof(vec)); + for (i = 0; i < prop_len; i += 2) { + prom_debug("%d: index = 0x%x val = 0x%x\n", i / 2 + , vec[i] + , vec[i + 1]); + prom_parse_platform_support(vec[i], vec[i + 1], + &supported); + } + } + + if (supported.radix_mmu && supported.radix_gtse) { + /* Radix preferred - but we require GTSE for now */ + prom_debug("Asking for radix with GTSE\n"); + ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_RADIX); + ibm_architecture_vec.vec5.radix_ext = OV5_FEAT(OV5_RADIX_GTSE); + } else if (supported.hash_mmu) { + /* Default to hash mmu (if we can) */ + prom_debug("Asking for hash\n"); + ibm_architecture_vec.vec5.mmu = OV5_FEAT(OV5_MMU_HASH); + } else { + /* We're probably on a legacy hypervisor */ + prom_debug("Assuming legacy hash support\n"); + } +} static void __init prom_send_capabilities(void) { @@ -997,6 +1100,9 @@ static void __init prom_send_capabilities(void) prom_arg_t ret; u32 cores; + /* Check ibm,arch-vec-5-platform-support and fixup vec5 if required */ + prom_check_platform_support(); + root = call_prom("open", 1, 1, ADDR("/")); if (root != 0) { /* We need to tell the FW about the number of cores we support. diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 6aa3b76..9be9920 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -356,18 +356,42 @@ static void early_check_vec5(void) unsigned long root, chosen; int size; const u8 *vec5; + u8 mmu_supported; root = of_get_flat_dt_root(); chosen = of_get_flat_dt_subnode_by_name(root, "chosen"); - if (chosen == -FDT_ERR_NOTFOUND) + if (chosen == -FDT_ERR_NOTFOUND) { + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; + } vec5 = of_get_flat_dt_prop(chosen, "ibm,architecture-vec-5", &size); - if (!vec5) + if (!vec5) { + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; return; - if (size <= OV5_INDX(OV5_MMU_RADIX_300) || - !(vec5[OV5_INDX(OV5_MMU_RADIX_300)] & OV5_FEAT(OV5_MMU_RADIX_300))) - /* Hypervisor doesn't support radix */ + } + if (size <= OV5_INDX(OV5_MMU_SUPPORT)) { cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; + return; + } + + /* Check for supported configuration */ + mmu_supported = vec5[OV5_INDX(OV5_MMU_SUPPORT)] & + OV5_FEAT(OV5_MMU_SUPPORT); + if (mmu_supported == OV5_FEAT(OV5_MMU_RADIX)) { + /* Hypervisor only supports radix - check enabled && GTSE */ + if (!early_radix_enabled()) { + pr_warn("WARNING: Ignoring cmdline option disable_radix\n"); + } + if (!(vec5[OV5_INDX(OV5_RADIX_GTSE)] & + OV5_FEAT(OV5_RADIX_GTSE))) { + pr_warn("WARNING: Hypervisor doesn't support RADIX with GTSE\n"); + } + /* Do radix anyway - the hypervisor said we had to */ + cur_cpu_spec->mmu_features |= MMU_FTR_TYPE_RADIX; + } else if (mmu_supported == OV5_FEAT(OV5_MMU_HASH)) { + /* Hypervisor only supports hash - disable radix */ + cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; + } } void __init mmu_early_init_devtree(void) @@ -383,7 +407,7 @@ void __init mmu_early_init_devtree(void) * even though the ibm,architecture-vec-5 property created by * skiboot doesn't have the necessary bits set. */ - if (early_radix_enabled() && !(mfmsr() & MSR_HV)) + if (!(mfmsr() & MSR_HV)) early_check_vec5(); if (early_radix_enabled()) -- cgit v1.1 From 6ba422c75facb1b1e0e206c464ee121b8073f7e0 Mon Sep 17 00:00:00 2001 From: Anton Blanchard Date: Sun, 5 Mar 2017 10:54:34 +1100 Subject: powerpc/64: Avoid panic during boot due to divide by zero in init_cache_info() I see a panic in early boot when building with a recent gcc toolchain. The issue is a divide by zero, which is undefined. Older toolchains let us get away with it: int foo(int a) { return a / 0; } foo: li 9,0 divw 3,3,9 extsw 3,3 blr But newer ones catch it: foo: trap Add a check to avoid the divide by zero. Fixes: e2827fe5c156 ("powerpc/64: Clean up ppc64_caches using a struct per cache") Signed-off-by: Anton Blanchard Acked-by: Benjamin Herrenschmidt Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/setup_64.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index adf2084..9cfaa8b 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -408,7 +408,10 @@ static void init_cache_info(struct ppc_cache_info *info, u32 size, u32 lsize, info->line_size = lsize; info->block_size = bsize; info->log_block_size = __ilog2(bsize); - info->blocks_per_page = PAGE_SIZE / bsize; + if (bsize) + info->blocks_per_page = PAGE_SIZE / bsize; + else + info->blocks_per_page = 0; if (sets == 0) info->assoc = 0xffff; -- cgit v1.1 From 3b3e78552d3077ec70d2640e629e07e3ab416a6a Mon Sep 17 00:00:00 2001 From: Matjaz Hegedic Date: Sun, 5 Mar 2017 19:16:44 +0100 Subject: x86/reboot/quirks: Add ASUS EeeBook X205TA/W reboot quirk Without the parameter reboot=a, ASUS EeeBook X205TA/W will hang when it should reboot. This adds the appropriate quirk, thus fixing the problem. Signed-off-by: Matjaz Hegedic Link: http://lkml.kernel.org/r/1488737804-20681-1-git-send-email-matjaz.hegedic@gmail.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/reboot.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 01c9cda..4194d6f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -231,6 +231,14 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"), }, }, + { /* Handle problems with rebooting on ASUS EeeBook X205TAW */ + .callback = set_acpi_reboot, + .ident = "ASUS EeeBook X205TAW", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), + DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"), + }, + }, /* Certec */ { /* Handle problems with rebooting on Certec BPC600 */ -- cgit v1.1 From f2853308b6409b97799b0beceacd9da43a82fe43 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Mon, 6 Mar 2017 10:57:48 +0200 Subject: x86/build/x86_64_defconfig: Enable CONFIG_R8169 Very common PCIe ethernet card. Already enabled in i386_defconfig. Signed-off-by: Andy Shevchenko Cc: Peter Zijlstra Cc: Konstantin Khlebnikov Link: http://lkml.kernel.org/r/20170306085748.85957-1-andriy.shevchenko@linux.intel.com Signed-off-by: Thomas Gleixner --- arch/x86/configs/x86_64_defconfig | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index 7ef4a09..6205d3b 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig @@ -176,6 +176,7 @@ CONFIG_E1000E=y CONFIG_SKY2=y CONFIG_FORCEDETH=y CONFIG_8139TOO=y +CONFIG_R8169=y CONFIG_FDDI=y CONFIG_INPUT_POLLDEV=y # CONFIG_INPUT_MOUSEDEV_PSAUX is not set -- cgit v1.1 From 9c7a00868c3a77c86ab07a2c51f3bb4897bd8550 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 6 Mar 2017 21:51:32 +1100 Subject: powerpc/64: Fix L1D cache shape vector reporting L1I values It seems we didn't pay quite enough attention when testing the new cache shape vectors, which means we didn't notice the bug where the vector for the L1D was using the L1I values. Fix it, resulting in eg: L1I cache size: 0x8000 32768B 32K L1I line size: 0x80 8-way associative L1D cache size: 0x10000 65536B 64K L1D line size: 0x80 8-way associative Fixes: 98a5f361b862 ("powerpc: Add new cache geometry aux vectors") Cut-and-paste-bug-by: Benjamin Herrenschmidt Badly-reviewed-by: Michael Ellerman Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/elf.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index 93b9b84..09bde6e 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h @@ -144,8 +144,8 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm, #define ARCH_DLINFO_CACHE_GEOMETRY \ NEW_AUX_ENT(AT_L1I_CACHESIZE, ppc64_caches.l1i.size); \ NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, get_cache_geometry(l1i)); \ - NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1i.size); \ - NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1i)); \ + NEW_AUX_ENT(AT_L1D_CACHESIZE, ppc64_caches.l1d.size); \ + NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, get_cache_geometry(l1d)); \ NEW_AUX_ENT(AT_L2_CACHESIZE, ppc64_caches.l2.size); \ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, get_cache_geometry(l2)); \ NEW_AUX_ENT(AT_L3_CACHESIZE, ppc64_caches.l3.size); \ -- cgit v1.1 From a7d2475af7aedcb9b5c6343989a8bfadbf84429b Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Mon, 6 Mar 2017 22:53:59 +1100 Subject: powerpc: Sort the selects under CONFIG_PPC We have a big list of selects under CONFIG_PPC, and currently they're completely unsorted. This means people tend to add new selects at the bottom of the list, and so two commits which both add a new select will often conflict. Instead sort it alphabetically. This is nicer in and of itself, but also means two commits that add a new select will have a greater chance of not conflicting. Add a note at the top and bottom asking people to keep it sorted. And while we're here pad out the 'if' expressions to make them stand out. Suggested-by: Stephen Rothwell Signed-off-by: Michael Ellerman --- arch/powerpc/Kconfig | 138 +++++++++++++++++++++++++++------------------------ 1 file changed, 72 insertions(+), 66 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 4940917..97a8bc8 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -80,93 +80,99 @@ config ARCH_HAS_DMA_SET_COHERENT_MASK config PPC bool default y - select BUILDTIME_EXTABLE_SORT + # + # Please keep this list sorted alphabetically. + # + select ARCH_HAS_DEVMEM_IS_ALLOWED + select ARCH_HAS_DMA_SET_COHERENT_MASK + select ARCH_HAS_ELF_RANDOMIZE + select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE + select ARCH_HAS_SG_CHAIN + select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST + select ARCH_HAS_UBSAN_SANITIZE_ALL + select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_SERIO + select ARCH_SUPPORTS_ATOMIC_RMW + select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT + select ARCH_USE_BUILTIN_BSWAP + select ARCH_USE_CMPXCHG_LOCKREF if PPC64 + select ARCH_WANT_IPC_PARSE_VERSION select BINFMT_ELF - select ARCH_HAS_ELF_RANDOMIZE - select OF - select OF_EARLY_FLATTREE - select OF_RESERVED_MEM - select HAVE_FTRACE_MCOUNT_RECORD + select BUILDTIME_EXTABLE_SORT + select CLONE_BACKWARDS + select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN + select EDAC_ATOMIC_SCRUB + select EDAC_SUPPORT + select GENERIC_ATOMIC64 if PPC32 + select GENERIC_CLOCKEVENTS + select GENERIC_CLOCKEVENTS_BROADCAST if SMP + select GENERIC_CMOS_UPDATE + select GENERIC_CPU_AUTOPROBE + select GENERIC_IRQ_SHOW + select GENERIC_IRQ_SHOW_LEVEL + select GENERIC_SMP_IDLE_THREAD + select GENERIC_STRNCPY_FROM_USER + select GENERIC_STRNLEN_USER + select GENERIC_TIME_VSYSCALL_OLD + select HAVE_ARCH_AUDITSYSCALL + select HAVE_ARCH_HARDENED_USERCOPY + select HAVE_ARCH_JUMP_LABEL + select HAVE_ARCH_KGDB + select HAVE_ARCH_SECCOMP_FILTER + select HAVE_ARCH_TRACEHOOK + select HAVE_CBPF_JIT if !PPC64 + select HAVE_CONTEXT_TRACKING if PPC64 + select HAVE_DEBUG_KMEMLEAK + select HAVE_DEBUG_STACKOVERFLOW + select HAVE_DMA_API_DEBUG select HAVE_DYNAMIC_FTRACE - select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL - select HAVE_FUNCTION_TRACER + select HAVE_DYNAMIC_FTRACE_WITH_REGS if MPROFILE_KERNEL + select HAVE_EBPF_JIT if PPC64 + select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) + select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FUNCTION_GRAPH_TRACER + select HAVE_FUNCTION_TRACER select HAVE_GCC_PLUGINS - select SYSCTL_EXCEPTION_TRACE - select VIRT_TO_BUS if !PPC64 + select HAVE_GENERIC_RCU_GUP + select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) select HAVE_IDE select HAVE_IOREMAP_PROT - select HAVE_EFFICIENT_UNALIGNED_ACCESS if !(CPU_LITTLE_ENDIAN && POWER7_CPU) + select HAVE_IRQ_EXIT_ON_IRQ_STACK + select HAVE_KERNEL_GZIP select HAVE_KPROBES - select HAVE_OPTPROBES if PPC64 - select HAVE_ARCH_KGDB select HAVE_KRETPROBES - select HAVE_ARCH_TRACEHOOK + select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_MEMBLOCK select HAVE_MEMBLOCK_NODE_MAP - select HAVE_DMA_API_DEBUG + select HAVE_MOD_ARCH_SPECIFIC + select HAVE_NMI if PERF_EVENTS select HAVE_OPROFILE - select HAVE_DEBUG_KMEMLEAK - select ARCH_HAS_SG_CHAIN - select GENERIC_ATOMIC64 if PPC32 + select HAVE_OPTPROBES if PPC64 select HAVE_PERF_EVENTS + select HAVE_PERF_EVENTS_NMI if PPC64 select HAVE_PERF_REGS select HAVE_PERF_USER_STACK_DUMP + select HAVE_RCU_TABLE_FREE if SMP select HAVE_REGS_AND_STACK_ACCESS_API - select HAVE_HW_BREAKPOINT if PERF_EVENTS && (PPC_BOOK3S || PPC_8xx) - select ARCH_WANT_IPC_PARSE_VERSION - select SPARSE_IRQ + select HAVE_SYSCALL_TRACEPOINTS + select HAVE_VIRT_CPU_ACCOUNTING select IRQ_DOMAIN - select GENERIC_IRQ_SHOW - select GENERIC_IRQ_SHOW_LEVEL select IRQ_FORCED_THREADING - select HAVE_RCU_TABLE_FREE if SMP - select HAVE_SYSCALL_TRACEPOINTS - select HAVE_CBPF_JIT if !PPC64 - select HAVE_EBPF_JIT if PPC64 - select HAVE_ARCH_JUMP_LABEL - select ARCH_HAVE_NMI_SAFE_CMPXCHG - select ARCH_HAS_GCOV_PROFILE_ALL - select GENERIC_SMP_IDLE_THREAD - select GENERIC_CMOS_UPDATE - select GENERIC_TIME_VSYSCALL_OLD - select GENERIC_CLOCKEVENTS - select GENERIC_CLOCKEVENTS_BROADCAST if SMP - select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST - select GENERIC_STRNCPY_FROM_USER - select GENERIC_STRNLEN_USER - select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA - select CLONE_BACKWARDS - select ARCH_USE_BUILTIN_BSWAP - select OLD_SIGSUSPEND - select OLD_SIGACTION if PPC32 - select HAVE_DEBUG_STACKOVERFLOW - select HAVE_IRQ_EXIT_ON_IRQ_STACK - select ARCH_USE_CMPXCHG_LOCKREF if PPC64 - select HAVE_ARCH_AUDITSYSCALL - select ARCH_SUPPORTS_ATOMIC_RMW - select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN select NO_BOOTMEM - select HAVE_GENERIC_RCU_GUP - select HAVE_PERF_EVENTS_NMI if PPC64 - select HAVE_NMI if PERF_EVENTS - select EDAC_SUPPORT - select EDAC_ATOMIC_SCRUB - select ARCH_HAS_DMA_SET_COHERENT_MASK - select ARCH_HAS_DEVMEM_IS_ALLOWED - select HAVE_ARCH_SECCOMP_FILTER - select ARCH_HAS_UBSAN_SANITIZE_ALL - select ARCH_SUPPORTS_DEFERRED_STRUCT_PAGE_INIT - select HAVE_LIVEPATCH if HAVE_DYNAMIC_FTRACE_WITH_REGS - select GENERIC_CPU_AUTOPROBE - select HAVE_VIRT_CPU_ACCOUNTING - select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE - select HAVE_ARCH_HARDENED_USERCOPY - select HAVE_KERNEL_GZIP - select HAVE_CONTEXT_TRACKING if PPC64 + select OF + select OF_EARLY_FLATTREE + select OF_RESERVED_MEM + select OLD_SIGACTION if PPC32 + select OLD_SIGSUSPEND + select SPARSE_IRQ + select SYSCTL_EXCEPTION_TRACE + select VIRT_TO_BUS if !PPC64 + # + # Please keep this list sorted alphabetically. + # config GENERIC_CSUM def_bool n -- cgit v1.1 From 587d7e72aedca91cee80c0a56811649c3efab765 Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Thu, 2 Mar 2017 12:41:48 -0800 Subject: kvm: nVMX: VMCLEAR should not cause the vCPU to shut down MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit VMCLEAR should silently ignore a failure to clear the launch state of the VMCS referenced by the operand. Signed-off-by: Jim Mattson [Changed "kvm_write_guest(vcpu->kvm" to "kvm_vcpu_write_guest(vcpu".] Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 283aa86..3b626d6 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -7258,9 +7258,8 @@ static int handle_vmoff(struct kvm_vcpu *vcpu) static int handle_vmclear(struct kvm_vcpu *vcpu) { struct vcpu_vmx *vmx = to_vmx(vcpu); + u32 zero = 0; gpa_t vmptr; - struct vmcs12 *vmcs12; - struct page *page; if (!nested_vmx_check_permission(vcpu)) return 1; @@ -7271,22 +7270,9 @@ static int handle_vmclear(struct kvm_vcpu *vcpu) if (vmptr == vmx->nested.current_vmptr) nested_release_vmcs12(vmx); - page = nested_get_page(vcpu, vmptr); - if (page == NULL) { - /* - * For accurate processor emulation, VMCLEAR beyond available - * physical memory should do nothing at all. However, it is - * possible that a nested vmx bug, not a guest hypervisor bug, - * resulted in this case, so let's shut down before doing any - * more damage: - */ - kvm_make_request(KVM_REQ_TRIPLE_FAULT, vcpu); - return 1; - } - vmcs12 = kmap(page); - vmcs12->launch_state = 0; - kunmap(page); - nested_release_page(page); + kvm_vcpu_write_guest(vcpu, + vmptr + offsetof(struct vmcs12, launch_state), + &zero, sizeof(zero)); nested_free_vmcs02(vmx, vmptr); -- cgit v1.1 From 1fbdbcea80056acfc8c8506594744f5ec52208c1 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 5 Mar 2017 17:05:57 -0800 Subject: avr32: Fix build error caused by include file reshuffling Various avr32 builds fail: arch/avr32/oprofile/backtrace.c:58: error: dereferencing pointer to incomplete type arch/avr32/oprofile/backtrace.c:60: error: implicit declaration of function 'user_mode' Signed-off-by: Guenter Roeck Acked-by: Hans-Christian Noren Egtvedt Cc: Haavard Skinnemoen Cc: Hans-Christian Egtvedt Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Robert Richter Cc: Thomas Gleixner Cc: oprofile-list@lists.sf.net Fixes: f780d89a0e82 ("sched/headers: Remove from ...") Link: http://lkml.kernel.org/r/1488762357-4500-1-git-send-email-linux@roeck-us.net Signed-off-by: Ingo Molnar --- arch/avr32/oprofile/backtrace.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/avr32/oprofile/backtrace.c b/arch/avr32/oprofile/backtrace.c index 75d9ad6..29cf2f1 100644 --- a/arch/avr32/oprofile/backtrace.c +++ b/arch/avr32/oprofile/backtrace.c @@ -14,7 +14,7 @@ */ #include -#include +#include #include /* The first two words of each frame on the stack look like this if we have -- cgit v1.1 From 80aa1a54f054a1c71cc88e0cad6cfa0d20a10f23 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 5 Mar 2017 10:27:14 -0800 Subject: h8300: Fix build breakage caused by header file changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the following h8300 build failures: arch/h8300/kernel/ptrace_h.c: In function ‘trace_trap’: arch/h8300/kernel/ptrace_h.c:253:3: error: implicit declaration of function ‘force_sig’ Signed-off-by: Guenter Roeck Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: Yoshinori Sato Cc: uclinux-h8-devel@lists.sourceforge.jp Fixes: c3edc4010e9d ("sched/headers: Move task_struct::signal and ...") Link: http://lkml.kernel.org/r/1488738434-3504-1-git-send-email-linux@roeck-us.net Signed-off-by: Ingo Molnar --- arch/h8300/kernel/ptrace_h.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/h8300/kernel/ptrace_h.c b/arch/h8300/kernel/ptrace_h.c index fe3b567..f5ff3b7 100644 --- a/arch/h8300/kernel/ptrace_h.c +++ b/arch/h8300/kernel/ptrace_h.c @@ -9,7 +9,7 @@ */ #include -#include +#include #include #define BREAKINST 0x5730 /* trapa #3 */ -- cgit v1.1 From 90922a2d03d84de36bf8a9979d62580102f31a92 Mon Sep 17 00:00:00 2001 From: Shanker Donthineni Date: Tue, 7 Mar 2017 08:20:38 -0600 Subject: irqchip/gicv3-its: Add workaround for QDF2400 ITS erratum 0065 On Qualcomm Datacenter Technologies QDF2400 SoCs, the ITS hardware implementation uses 16Bytes for Interrupt Translation Entry (ITE), but reports an incorrect value of 8Bytes in GITS_TYPER.ITTE_size. It might cause kernel memory corruption depending on the number of MSI(x) that are configured and the amount of memory that has been allocated for ITEs in its_create_device(). This patch fixes the potential memory corruption by setting the correct ITE size to 16Bytes. Cc: stable@vger.kernel.org Signed-off-by: Shanker Donthineni Signed-off-by: Marc Zyngier --- arch/arm64/Kconfig | 10 ++++++++++ 1 file changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a39029b..8c7c244 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -508,6 +508,16 @@ config QCOM_FALKOR_ERRATUM_1009 If unsure, say Y. +config QCOM_QDF2400_ERRATUM_0065 + bool "QDF2400 E0065: Incorrect GITS_TYPER.ITT_Entry_size" + default y + help + On Qualcomm Datacenter Technologies QDF2400 SoC, ITS hardware reports + ITE size incorrectly. The GITS_TYPER.ITT_Entry_size field should have + been indicated as 16Bytes (0xf), not 8Bytes (0x7). + + If unsure, say Y. + endmenu -- cgit v1.1 From 2f707d97982286b307ef2a9b034e19aabc1abb56 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 6 Mar 2017 04:03:28 -0800 Subject: KVM: nVMX: reset nested_run_pending if the vCPU is going to be reset MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Reported by syzkaller: WARNING: CPU: 1 PID: 27742 at arch/x86/kvm/vmx.c:11029 nested_vmx_vmexit+0x5c35/0x74d0 arch/x86/kvm/vmx.c:11029 CPU: 1 PID: 27742 Comm: a.out Not tainted 4.10.0+ #229 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:15 [inline] dump_stack+0x2ee/0x3ef lib/dump_stack.c:51 panic+0x1fb/0x412 kernel/panic.c:179 __warn+0x1c4/0x1e0 kernel/panic.c:540 warn_slowpath_null+0x2c/0x40 kernel/panic.c:583 nested_vmx_vmexit+0x5c35/0x74d0 arch/x86/kvm/vmx.c:11029 vmx_leave_nested arch/x86/kvm/vmx.c:11136 [inline] vmx_set_msr+0x1565/0x1910 arch/x86/kvm/vmx.c:3324 kvm_set_msr+0xd4/0x170 arch/x86/kvm/x86.c:1099 do_set_msr+0x11e/0x190 arch/x86/kvm/x86.c:1128 __msr_io arch/x86/kvm/x86.c:2577 [inline] msr_io+0x24b/0x450 arch/x86/kvm/x86.c:2614 kvm_arch_vcpu_ioctl+0x35b/0x46a0 arch/x86/kvm/x86.c:3497 kvm_vcpu_ioctl+0x232/0x1120 arch/x86/kvm/../../../virt/kvm/kvm_main.c:2721 vfs_ioctl fs/ioctl.c:43 [inline] do_vfs_ioctl+0x1bf/0x1790 fs/ioctl.c:683 SYSC_ioctl fs/ioctl.c:698 [inline] SyS_ioctl+0x8f/0xc0 fs/ioctl.c:689 entry_SYSCALL_64_fastpath+0x1f/0xc2 The syzkaller folks reported a nested_run_pending warning during userspace clear VMX capability which is exposed to L1 before. The warning gets thrown while doing (*(uint32_t*)0x20aecfe8 = (uint32_t)0x1); (*(uint32_t*)0x20aecfec = (uint32_t)0x0); (*(uint32_t*)0x20aecff0 = (uint32_t)0x3a); (*(uint32_t*)0x20aecff4 = (uint32_t)0x0); (*(uint64_t*)0x20aecff8 = (uint64_t)0x0); r[29] = syscall(__NR_ioctl, r[4], 0x4008ae89ul, 0x20aecfe8ul, 0, 0, 0, 0, 0, 0); i.e. KVM_SET_MSR ioctl with struct kvm_msrs { .nmsrs = 1, .pad = 0, .entries = { {.index = MSR_IA32_FEATURE_CONTROL, .reserved = 0, .data = 0} } } The VMLANCH/VMRESUME emulation should be stopped since the CPU is going to reset here. This patch resets the nested_run_pending since the CPU is going to be reset hence there should be nothing pending. Reported-by: Dmitry Vyukov Suggested-by: Radim Krčmář Cc: Paolo Bonzini Cc: Radim Krčmář Cc: Dmitry Vyukov Cc: David Hildenbrand Signed-off-by: Wanpeng Li Reviewed-by: David Hildenbrand Reviewed-by: Jim Mattson Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 3b626d6..ab33858 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -11107,8 +11107,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, */ static void vmx_leave_nested(struct kvm_vcpu *vcpu) { - if (is_guest_mode(vcpu)) + if (is_guest_mode(vcpu)) { + to_vmx(vcpu)->nested.nested_run_pending = 0; nested_vmx_vmexit(vcpu, -1, 0, 0); + } free_nested(to_vmx(vcpu)); } -- cgit v1.1 From f050fe7a9164945dd1c28be05bf00e8cfb082ccf Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 20 Feb 2017 12:30:11 +0000 Subject: arm: KVM: Survive unknown traps from guests Currently we BUG() if we see a HSR.EC value we don't recognise. As configurable disables/enables are added to the architecture (controlled by RES1/RES0 bits respectively), with associated synchronous exceptions, it may be possible for a guest to trigger exceptions with classes that we don't recognise. While we can't service these exceptions in a manner useful to the guest, we can avoid bringing down the host. Per ARM DDI 0406C.c, all currently unallocated HSR EC encodings are reserved, and per ARM DDI 0487A.k_iss10775, page G6-4395, EC values within the range 0x00 - 0x2c are reserved for future use with synchronous exceptions, and EC values within the range 0x2d - 0x3f may be used for either synchronous or asynchronous exceptions. The patch makes KVM handle any unknown EC by injecting an UNDEFINED exception into the guest, with a corresponding (ratelimited) warning in the host dmesg. We could later improve on this with with a new (opt-in) exit to the host userspace. Cc: Dave Martin Cc: Suzuki K Poulose Reviewed-by: Christoffer Dall Signed-off-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_arm.h | 1 + arch/arm/kvm/handle_exit.c | 19 ++++++++++++------- 2 files changed, 13 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_arm.h b/arch/arm/include/asm/kvm_arm.h index e22089f..a3f0b3d 100644 --- a/arch/arm/include/asm/kvm_arm.h +++ b/arch/arm/include/asm/kvm_arm.h @@ -209,6 +209,7 @@ #define HSR_EC_IABT_HYP (0x21) #define HSR_EC_DABT (0x24) #define HSR_EC_DABT_HYP (0x25) +#define HSR_EC_MAX (0x3f) #define HSR_WFI_IS_WFE (_AC(1, UL) << 0) diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c index 4e40d19..96af65a 100644 --- a/arch/arm/kvm/handle_exit.c +++ b/arch/arm/kvm/handle_exit.c @@ -79,7 +79,19 @@ static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run) return 1; } +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + kvm_pr_unimpl("Unknown exception class: hsr: %#08x\n", + hsr); + + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { + [0 ... HSR_EC_MAX] = kvm_handle_unknown_ec, [HSR_EC_WFI] = kvm_handle_wfx, [HSR_EC_CP15_32] = kvm_handle_cp15_32, [HSR_EC_CP15_64] = kvm_handle_cp15_64, @@ -98,13 +110,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) { u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu); - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || - !arm_exit_handlers[hsr_ec]) { - kvm_err("Unknown exception class: hsr: %#08x\n", - (unsigned int)kvm_vcpu_get_hsr(vcpu)); - BUG(); - } - return arm_exit_handlers[hsr_ec]; } -- cgit v1.1 From ba4dd156eabdca93501d92a980ba27fa5f4bbd27 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 20 Feb 2017 12:30:12 +0000 Subject: arm64: KVM: Survive unknown traps from guests Currently we BUG() if we see an ESR_EL2.EC value we don't recognise. As configurable disables/enables are added to the architecture (controlled by RES1/RES0 bits respectively), with associated synchronous exceptions, it may be possible for a guest to trigger exceptions with classes that we don't recognise. While we can't service these exceptions in a manner useful to the guest, we can avoid bringing down the host. Per ARM DDI 0487A.k_iss10775, page D7-1937, EC values within the range 0x00 - 0x2c are reserved for future use with synchronous exceptions, and EC values within the range 0x2d - 0x3f may be used for either synchronous or asynchronous exceptions. The patch makes KVM handle any unknown EC by injecting an UNDEFINED exception into the guest, with a corresponding (ratelimited) warning in the host dmesg. We could later improve on this with with a new (opt-in) exit to the host userspace. Cc: Dave Martin Cc: Suzuki K Poulose Reviewed-by: Christoffer Dall Signed-off-by: Mark Rutland Signed-off-by: Marc Zyngier --- arch/arm64/kvm/handle_exit.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c index 1bfe30d..fa1b18e 100644 --- a/arch/arm64/kvm/handle_exit.c +++ b/arch/arm64/kvm/handle_exit.c @@ -135,7 +135,19 @@ static int kvm_handle_guest_debug(struct kvm_vcpu *vcpu, struct kvm_run *run) return ret; } +static int kvm_handle_unknown_ec(struct kvm_vcpu *vcpu, struct kvm_run *run) +{ + u32 hsr = kvm_vcpu_get_hsr(vcpu); + + kvm_pr_unimpl("Unknown exception class: hsr: %#08x -- %s\n", + hsr, esr_get_class_string(hsr)); + + kvm_inject_undefined(vcpu); + return 1; +} + static exit_handle_fn arm_exit_handlers[] = { + [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec, [ESR_ELx_EC_WFx] = kvm_handle_wfx, [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32, [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64, @@ -162,13 +174,6 @@ static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu) u32 hsr = kvm_vcpu_get_hsr(vcpu); u8 hsr_ec = ESR_ELx_EC(hsr); - if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) || - !arm_exit_handlers[hsr_ec]) { - kvm_err("Unknown exception class: hsr: %#08x -- %s\n", - hsr, esr_get_class_string(hsr)); - BUG(); - } - return arm_exit_handlers[hsr_ec]; } -- cgit v1.1 From 97ee351b50a49717543533cfb85b4bf9d88c9680 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Tue, 7 Mar 2017 16:14:49 +1100 Subject: powerpc/boot: Fix zImage TOC alignment Recent toolchains force the TOC to be 256 byte aligned. We need to enforce this alignment in the zImage linker script, otherwise pointers to our TOC variables (__toc_start) could be incorrect. If the actual start of the TOC and __toc_start don't have the same value we crash early in the zImage wrapper. Cc: stable@vger.kernel.org Suggested-by: Alan Modra Signed-off-by: Michael Ellerman --- arch/powerpc/boot/zImage.lds.S | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S index 861e721..f080abf 100644 --- a/arch/powerpc/boot/zImage.lds.S +++ b/arch/powerpc/boot/zImage.lds.S @@ -68,6 +68,7 @@ SECTIONS } #ifdef CONFIG_PPC64_BOOT_WRAPPER + . = ALIGN(256); .got : { __toc_start = .; -- cgit v1.1 From aa2be9b3d6d2d699e9ca7cbfc00867c80e5da213 Mon Sep 17 00:00:00 2001 From: Daniel Axtens Date: Fri, 3 Mar 2017 17:56:55 +1100 Subject: crypto: powerpc - Fix initialisation of crc32c context Turning on crypto self-tests on a POWER8 shows: alg: hash: Test 1 failed for crc32c-vpmsum 00000000: ff ff ff ff Comparing the code with the Intel CRC32c implementation on which ours is based shows that we are doing an init with 0, not ~0 as CRC32c requires. This probably wasn't caught because btrfs does its own weird open-coded initialisation. Initialise our internal context to ~0 on init. This makes the self-tests pass, and btrfs continues to work. Fixes: 6dd7a82cc54e ("crypto: powerpc - Add POWER8 optimised crc32c") Cc: Anton Blanchard Cc: stable@vger.kernel.org Signed-off-by: Daniel Axtens Acked-by: Anton Blanchard Signed-off-by: Herbert Xu --- arch/powerpc/crypto/crc32c-vpmsum_glue.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/crypto/crc32c-vpmsum_glue.c b/arch/powerpc/crypto/crc32c-vpmsum_glue.c index 9fa046d..4119945 100644 --- a/arch/powerpc/crypto/crc32c-vpmsum_glue.c +++ b/arch/powerpc/crypto/crc32c-vpmsum_glue.c @@ -52,7 +52,7 @@ static int crc32c_vpmsum_cra_init(struct crypto_tfm *tfm) { u32 *key = crypto_tfm_ctx(tfm); - *key = 0; + *key = ~0; return 0; } -- cgit v1.1 From fc69910f329d61821897871e0e957eda39beb3d8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Wed, 8 Mar 2017 08:29:31 +0100 Subject: MIPS: Add missing include files After the split of linux/sched.h, several platforms in arch/mips stopped building. Add the respective additional #include statements to fix the problem I first tried adding these into asm/processor.h, but ran into circular header dependencies with that which I could not figure out. The commit I listed as causing the problem is the branch merge, as there is likely a combination of multiple patches in that branch. Signed-off-by: Arnd Bergmann Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-mips@linux-mips.org Cc: ralf@linux-mips.org Fixes: 1827adb11ad2 ("Merge branch 'WIP.sched-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip") Link: http://lkml.kernel.org/r/20170308072931.3836696-1-arnd@arndb.de Signed-off-by: Ingo Molnar --- arch/mips/cavium-octeon/cpu.c | 2 ++ arch/mips/cavium-octeon/crypto/octeon-crypto.c | 1 + arch/mips/cavium-octeon/smp.c | 1 + arch/mips/include/asm/fpu.h | 1 + arch/mips/kernel/smp-bmips.c | 1 + arch/mips/kernel/smp-mt.c | 1 + arch/mips/loongson64/loongson-3/cop2-ex.c | 1 + arch/mips/netlogic/common/smp.c | 1 + arch/mips/netlogic/xlp/cop2-ex.c | 3 +++ arch/mips/sgi-ip22/ip28-berr.c | 1 + arch/mips/sgi-ip27/ip27-berr.c | 2 ++ arch/mips/sgi-ip27/ip27-smp.c | 3 +++ arch/mips/sgi-ip32/ip32-berr.c | 1 + arch/mips/sgi-ip32/ip32-reset.c | 1 + 14 files changed, 20 insertions(+) (limited to 'arch') diff --git a/arch/mips/cavium-octeon/cpu.c b/arch/mips/cavium-octeon/cpu.c index a5b4279..036d56c 100644 --- a/arch/mips/cavium-octeon/cpu.c +++ b/arch/mips/cavium-octeon/cpu.c @@ -10,7 +10,9 @@ #include #include #include +#include #include +#include #include #include diff --git a/arch/mips/cavium-octeon/crypto/octeon-crypto.c b/arch/mips/cavium-octeon/crypto/octeon-crypto.c index 4d22365..cfb4a14 100644 --- a/arch/mips/cavium-octeon/crypto/octeon-crypto.c +++ b/arch/mips/cavium-octeon/crypto/octeon-crypto.c @@ -9,6 +9,7 @@ #include #include #include +#include #include "octeon-crypto.h" diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c index 4b94b7f..3de7865 100644 --- a/arch/mips/cavium-octeon/smp.c +++ b/arch/mips/cavium-octeon/smp.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h index 321752b..f94455f 100644 --- a/arch/mips/include/asm/fpu.h +++ b/arch/mips/include/asm/fpu.h @@ -12,6 +12,7 @@ #include #include +#include #include #include diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c index 3daa2ca..1b070a7 100644 --- a/arch/mips/kernel/smp-bmips.c +++ b/arch/mips/kernel/smp-bmips.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c index e077ea3..e398cbc 100644 --- a/arch/mips/kernel/smp-mt.c +++ b/arch/mips/kernel/smp-mt.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/loongson64/loongson-3/cop2-ex.c b/arch/mips/loongson64/loongson-3/cop2-ex.c index ea13764..621d6af 100644 --- a/arch/mips/loongson64/loongson-3/cop2-ex.c +++ b/arch/mips/loongson64/loongson-3/cop2-ex.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c index 10d86d5..bddf1ef 100644 --- a/arch/mips/netlogic/common/smp.c +++ b/arch/mips/netlogic/common/smp.c @@ -35,6 +35,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/netlogic/xlp/cop2-ex.c b/arch/mips/netlogic/xlp/cop2-ex.c index 52bc5de..21e439b 100644 --- a/arch/mips/netlogic/xlp/cop2-ex.c +++ b/arch/mips/netlogic/xlp/cop2-ex.c @@ -9,11 +9,14 @@ * Copyright (C) 2009 Wind River Systems, * written by Ralf Baechle */ +#include #include #include #include #include +#include #include +#include #include #include diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c index 1f2a5bc..75460e1 100644 --- a/arch/mips/sgi-ip22/ip28-berr.c +++ b/arch/mips/sgi-ip22/ip28-berr.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include diff --git a/arch/mips/sgi-ip27/ip27-berr.c b/arch/mips/sgi-ip27/ip27-berr.c index d12879e..83efe03 100644 --- a/arch/mips/sgi-ip27/ip27-berr.c +++ b/arch/mips/sgi-ip27/ip27-berr.c @@ -12,7 +12,9 @@ #include /* for SIGBUS */ #include /* schow_regs(), force_sig() */ #include +#include +#include #include #include #include diff --git a/arch/mips/sgi-ip27/ip27-smp.c b/arch/mips/sgi-ip27/ip27-smp.c index f5ed45e..4cd47d2 100644 --- a/arch/mips/sgi-ip27/ip27-smp.c +++ b/arch/mips/sgi-ip27/ip27-smp.c @@ -8,10 +8,13 @@ */ #include #include +#include #include #include + #include #include +#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-berr.c b/arch/mips/sgi-ip32/ip32-berr.c index 57d8c74..c1f12a9 100644 --- a/arch/mips/sgi-ip32/ip32-berr.c +++ b/arch/mips/sgi-ip32/ip32-berr.c @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/arch/mips/sgi-ip32/ip32-reset.c b/arch/mips/sgi-ip32/ip32-reset.c index 8bd415c..b3b442d 100644 --- a/arch/mips/sgi-ip32/ip32-reset.c +++ b/arch/mips/sgi-ip32/ip32-reset.c @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include -- cgit v1.1 From 12aff99723901bcc0e2a6a34343a4f62c371fdd9 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Tue, 7 Feb 2017 17:14:14 -0200 Subject: ARM: dts: imx6sx-udoo-neo: Fix reboot hang After issuing a 'reboot' command the imx6sx-udoo-neo board does not reboot as expected and it just hangs instead. In mainline kernel only LDO enabled mode is supported. Do not provide arm-supply/soc-supply nodes in the device tree, so that the board operates in LDO enabled mode and can then successfully reboot via watchdog. Fixes: 76e691fc7653b85d39 ("ARM: dts: imx6sx: Add UDOO Neo support") Signed-off-by: Fabio Estevam Tested-by: Breno Lima Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6sx-udoo-neo.dtsi | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi index 49f466f..dcfc975 100644 --- a/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi +++ b/arch/arm/boot/dts/imx6sx-udoo-neo.dtsi @@ -121,11 +121,6 @@ }; }; -&cpu0 { - arm-supply = <&sw1a_reg>; - soc-supply = <&sw1c_reg>; -}; - &fec1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet1>; -- cgit v1.1 From 672a2c87c83649fb0167202342ce85af9a3b4f1c Mon Sep 17 00:00:00 2001 From: Jan Kara Date: Wed, 8 Mar 2017 14:56:05 +0100 Subject: axonram: Fix gendisk handling It is invalid to call del_gendisk() when disk->queue is NULL. Fix error handling in axon_ram_probe() to avoid doing that. Also del_gendisk() does not drop a reference to gendisk allocated by alloc_disk(). That has to be done by put_disk(). Add that call where needed. Reported-by: Dan Carpenter Signed-off-by: Jan Kara Signed-off-by: Jens Axboe --- arch/powerpc/sysdev/axonram.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index ada29ea..f523ac8 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c @@ -274,7 +274,9 @@ failed: if (bank->disk->major > 0) unregister_blkdev(bank->disk->major, bank->disk->disk_name); - del_gendisk(bank->disk); + if (bank->disk->flags & GENHD_FL_UP) + del_gendisk(bank->disk); + put_disk(bank->disk); } device->dev.platform_data = NULL; if (bank->io_addr != 0) @@ -299,6 +301,7 @@ axon_ram_remove(struct platform_device *device) device_remove_file(&device->dev, &dev_attr_ecc); free_irq(bank->irq_id, device); del_gendisk(bank->disk); + put_disk(bank->disk); iounmap((void __iomem *) bank->io_addr); kfree(bank); -- cgit v1.1 From f04d108029063a8a67528a88449c412aecf4d3d8 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Mon, 20 Feb 2017 19:26:30 +0530 Subject: powerpc/perf: Fix perf_get_data_addr() for power9 DD1 Power9 DD1 do not support PMU_HAS_SIER flag and sdsync in perf_get_data_addr() defaults to MMCRA_SDSYNC which is wrong. Since power9 MMCRA does not support SDSYNC bit, patch includes PPMU_NO_SIAR flag to the check and set the sdsync with MMCRA_SAMPLE_ENABLE; Fixes: 27593d72c4ad ("powerpc/perf: Use MSR to report privilege level on P9 DD1") Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/core-book3s.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c index 595dd71..2ff1324 100644 --- a/arch/powerpc/perf/core-book3s.c +++ b/arch/powerpc/perf/core-book3s.c @@ -188,6 +188,8 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) sdsync = POWER7P_MMCRA_SDAR_VALID; else if (ppmu->flags & PPMU_ALT_SIPR) sdsync = POWER6_MMCRA_SDSYNC; + else if (ppmu->flags & PPMU_NO_SIAR) + sdsync = MMCRA_SAMPLE_ENABLE; else sdsync = MMCRA_SDSYNC; -- cgit v1.1 From 78b4416aa249365dd3c1b64da4d3a232014320b0 Mon Sep 17 00:00:00 2001 From: Madhavan Srinivasan Date: Mon, 20 Feb 2017 19:29:03 +0530 Subject: powerpc/perf: Handle sdar_mode for marked event in power9 MMCRA[SDAR_MODE] specifices how the SDAR should be updated in continous sampling mode. On P9 it must be set to 0b00 when MMCRA[63] is set. Fixes: c7c3f568beff2 ('powerpc/perf: macros for power9 format encoding') Signed-off-by: Madhavan Srinivasan Signed-off-by: Michael Ellerman --- arch/powerpc/perf/isa207-common.c | 43 ++++++++++++++++++++++++++++++++------- arch/powerpc/perf/isa207-common.h | 1 + 2 files changed, 37 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/perf/isa207-common.c b/arch/powerpc/perf/isa207-common.c index e79fb5f..cd951fd 100644 --- a/arch/powerpc/perf/isa207-common.c +++ b/arch/powerpc/perf/isa207-common.c @@ -65,12 +65,41 @@ static bool is_event_valid(u64 event) return !(event & ~valid_mask); } -static u64 mmcra_sdar_mode(u64 event) +static inline bool is_event_marked(u64 event) { - if (cpu_has_feature(CPU_FTR_ARCH_300) && !cpu_has_feature(CPU_FTR_POWER9_DD1)) - return p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; + if (event & EVENT_IS_MARKED) + return true; + + return false; +} - return MMCRA_SDAR_MODE_TLB; +static void mmcra_sdar_mode(u64 event, unsigned long *mmcra) +{ + /* + * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in + * continous sampling mode. + * + * Incase of Power8: + * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling + * mode and will be un-changed when setting MMCRA[63] (Marked events). + * + * Incase of Power9: + * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'), + * or if group already have any marked events. + * Non-Marked events (for DD1): + * MMCRA[SDAR_MODE] will be set to 0b01 + * For rest + * MMCRA[SDAR_MODE] will be set from event code. + */ + if (cpu_has_feature(CPU_FTR_ARCH_300)) { + if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE)) + *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES; + else if (!cpu_has_feature(CPU_FTR_POWER9_DD1)) + *mmcra |= p9_SDAR_MODE(event) << MMCRA_SDAR_MODE_SHIFT; + else if (cpu_has_feature(CPU_FTR_POWER9_DD1)) + *mmcra |= MMCRA_SDAR_MODE_TLB; + } else + *mmcra |= MMCRA_SDAR_MODE_TLB; } static u64 thresh_cmp_val(u64 value) @@ -180,7 +209,7 @@ int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp) value |= CNST_L1_QUAL_VAL(cache); } - if (event & EVENT_IS_MARKED) { + if (is_event_marked(event)) { mask |= CNST_SAMPLE_MASK; value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT); } @@ -276,7 +305,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, } /* In continuous sampling mode, update SDAR on TLB miss */ - mmcra |= mmcra_sdar_mode(event[i]); + mmcra_sdar_mode(event[i], &mmcra); if (event[i] & EVENT_IS_L1) { cache = event[i] >> EVENT_CACHE_SEL_SHIFT; @@ -285,7 +314,7 @@ int isa207_compute_mmcr(u64 event[], int n_ev, mmcr1 |= (cache & 1) << MMCR1_DC_QUAL_SHIFT; } - if (event[i] & EVENT_IS_MARKED) { + if (is_event_marked(event[i])) { mmcra |= MMCRA_SAMPLE_ENABLE; val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK; diff --git a/arch/powerpc/perf/isa207-common.h b/arch/powerpc/perf/isa207-common.h index cf9bd899..899210f 100644 --- a/arch/powerpc/perf/isa207-common.h +++ b/arch/powerpc/perf/isa207-common.h @@ -246,6 +246,7 @@ #define MMCRA_THR_CMP_SHIFT 32 #define MMCRA_SDAR_MODE_SHIFT 42 #define MMCRA_SDAR_MODE_TLB (1ull << MMCRA_SDAR_MODE_SHIFT) +#define MMCRA_SDAR_MODE_NO_UPDATES ~(0x3ull << MMCRA_SDAR_MODE_SHIFT) #define MMCRA_IFM_SHIFT 30 /* MMCR1 Threshold Compare bit constant for power9 */ -- cgit v1.1 From 7aafac11e308d37ed3c509829bb43d80c1811ac3 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Wed, 22 Feb 2017 15:43:59 +1100 Subject: powerpc/powernv/ioda2: Gracefully fail if too many TCE levels requested The IODA2 specification says that a 64 DMA address cannot use top 4 bits (3 are reserved and one is a "TVE select"); bottom page_shift bits cannot be used for multilevel table addressing either. The existing IODA2 table allocation code aligns the minimum TCE table size to PAGE_SIZE so in the case of 64K system pages and 4K IOMMU pages, we have 64-4-12=48 bits. Since 64K page stores 8192 TCEs, i.e. needs 13 bits, the maximum number of levels is 48/13 = 3 so we physically cannot address more and EEH happens on DMA accesses. This adds a check that too many levels were requested. It is still possible to have 5 levels in the case of 4K system page size. Signed-off-by: Alexey Kardashevskiy Acked-by: Gavin Shan Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/pci-ioda.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 6901a06..957a57a 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -2624,6 +2624,9 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset, level_shift = entries_shift + 3; level_shift = max_t(unsigned, level_shift, PAGE_SHIFT); + if ((level_shift - 3) * levels + page_shift >= 60) + return -EINVAL; + /* Allocate TCE table */ addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, levels, tce_table_size, &offset, &total_allocated); -- cgit v1.1 From 7af4df85796589e60a2dfc0f821eca0c4bbce4d2 Mon Sep 17 00:00:00 2001 From: Linu Cherian Date: Wed, 8 Mar 2017 11:38:33 +0530 Subject: KVM: arm/arm64: Enable KVM_CAP_NR_MEMSLOTS on arm/arm64 Return KVM_USER_MEM_SLOTS for userspace capability query on NR_MEMSLOTS. Reviewed-by: Christoffer Dall Signed-off-by: Linu Cherian Signed-off-by: Marc Zyngier --- arch/arm/kvm/arm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index c9a2103..96dba7c 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c @@ -221,6 +221,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) case KVM_CAP_MAX_VCPUS: r = KVM_MAX_VCPUS; break; + case KVM_CAP_NR_MEMSLOTS: + r = KVM_USER_MEM_SLOTS; + break; case KVM_CAP_MSI_DEVID: if (!kvm) r = -EINVAL; -- cgit v1.1 From 3e92f94a3b8e925a6dd7ec88a5794b2084b5fb65 Mon Sep 17 00:00:00 2001 From: Linu Cherian Date: Wed, 8 Mar 2017 11:38:34 +0530 Subject: KVM: arm/arm64: Remove KVM_PRIVATE_MEM_SLOTS definition that are unused arm/arm64 architecture doesnt use private memslots, hence removing KVM_PRIVATE_MEM_SLOTS macro definition. Reviewed-by: Christoffer Dall Signed-off-by: Linu Cherian Signed-off-by: Marc Zyngier --- arch/arm/include/asm/kvm_host.h | 1 - arch/arm64/include/asm/kvm_host.h | 1 - 2 files changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index cc495d79..31ee468 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -30,7 +30,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED #define KVM_USER_MEM_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HAVE_ONE_REG #define KVM_HALT_POLL_NS_DEFAULT 500000 diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index f21fd38..6ac17ee 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -31,7 +31,6 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED #define KVM_USER_MEM_SLOTS 32 -#define KVM_PRIVATE_MEM_SLOTS 4 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HALT_POLL_NS_DEFAULT 500000 -- cgit v1.1 From 955a3fc6d2a1c11d6d00bce4f3816100ce0530cf Mon Sep 17 00:00:00 2001 From: Linu Cherian Date: Wed, 8 Mar 2017 11:38:35 +0530 Subject: KVM: arm64: Increase number of user memslots to 512 Having only 32 memslots is a real constraint for the maximum number of PCI devices that can be assigned to a single guest. Assuming each PCI device/virtual function having two memory BAR regions, we could assign only 15 devices/virtual functions to a guest. Hence increase KVM_USER_MEM_SLOTS to 512 as done in other archs like powerpc. Reviewed-by: Christoffer Dall Signed-off-by: Linu Cherian Signed-off-by: Marc Zyngier --- arch/arm64/include/asm/kvm_host.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 6ac17ee..e7705e7 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -30,7 +30,7 @@ #define __KVM_HAVE_ARCH_INTC_INITIALIZED -#define KVM_USER_MEM_SLOTS 32 +#define KVM_USER_MEM_SLOTS 512 #define KVM_COALESCED_MMIO_PAGE_OFFSET 1 #define KVM_HALT_POLL_NS_DEFAULT 500000 -- cgit v1.1 From db08e1d53034a54fe177ced70476fda73954b9e9 Mon Sep 17 00:00:00 2001 From: Alexey Kardashevskiy Date: Tue, 21 Feb 2017 13:41:31 +1100 Subject: powerpc/powernv/ioda2: Update iommu table base on ownership change On POWERNV platform, in order to do DMA via IOMMU (i.e. 32bit DMA in our case), a device needs an iommu_table pointer set via set_iommu_table_base(). The codeflow is: - pnv_pci_ioda2_setup_dma_pe() - pnv_pci_ioda2_setup_default_config() - pnv_ioda_setup_bus_dma() [1] pnv_pci_ioda2_setup_dma_pe() creates IOMMU groups, pnv_pci_ioda2_setup_default_config() does default DMA setup, pnv_ioda_setup_bus_dma() takes a bus PE (on IODA2, all physical function PEs as bus PEs except NPU), walks through all underlying buses and devices, adds all devices to an IOMMU group and sets iommu_table. On IODA2, when VFIO is used, it takes ownership over a PE which means it removes all tables and creates new ones (with a possibility of sharing them among PEs). So when the ownership is returned from VFIO to the kernel, the iommu_table pointer written to a device at [1] is stale and needs an update. This adds an "add_to_group" parameter to pnv_ioda_setup_bus_dma() (in fact re-adds as it used to be there a while ago for different reasons) to tell the helper if a device needs to be added to an IOMMU group with an iommu_table update or just the latter. This calls pnv_ioda_setup_bus_dma(..., false) from pnv_ioda2_release_ownership() so when the ownership is restored, 32bit DMA can work again for a device. This does the same thing on obtaining ownership as the iommu_table point is stale at this point anyway and it is safer to have NULL there. We did not hit this earlier as all tested devices in recent years were only using 64bit DMA; the rare exception for this is MPT3 SAS adapter which uses both 32bit and 64bit DMA access and it has not been tested with VFIO much. Signed-off-by: Alexey Kardashevskiy Acked-by: Gavin Shan Reviewed-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/pci-ioda.c | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c index 957a57a..e367382 100644 --- a/arch/powerpc/platforms/powernv/pci-ioda.c +++ b/arch/powerpc/platforms/powernv/pci-ioda.c @@ -1775,17 +1775,20 @@ static u64 pnv_pci_ioda_dma_get_required_mask(struct pci_dev *pdev) } static void pnv_ioda_setup_bus_dma(struct pnv_ioda_pe *pe, - struct pci_bus *bus) + struct pci_bus *bus, + bool add_to_group) { struct pci_dev *dev; list_for_each_entry(dev, &bus->devices, bus_list) { set_iommu_table_base(&dev->dev, pe->table_group.tables[0]); set_dma_offset(&dev->dev, pe->tce_bypass_base); - iommu_add_device(&dev->dev); + if (add_to_group) + iommu_add_device(&dev->dev); if ((pe->flags & PNV_IODA_PE_BUS_ALL) && dev->subordinate) - pnv_ioda_setup_bus_dma(pe, dev->subordinate); + pnv_ioda_setup_bus_dma(pe, dev->subordinate, + add_to_group); } } @@ -2191,7 +2194,7 @@ found: set_iommu_table_base(&pe->pdev->dev, tbl); iommu_add_device(&pe->pdev->dev); } else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus); + pnv_ioda_setup_bus_dma(pe, pe->pbus, true); return; fail: @@ -2426,6 +2429,8 @@ static void pnv_ioda2_take_ownership(struct iommu_table_group *table_group) pnv_pci_ioda2_set_bypass(pe, false); pnv_pci_ioda2_unset_window(&pe->table_group, 0); + if (pe->pbus) + pnv_ioda_setup_bus_dma(pe, pe->pbus, false); pnv_ioda2_table_free(tbl); } @@ -2435,6 +2440,8 @@ static void pnv_ioda2_release_ownership(struct iommu_table_group *table_group) table_group); pnv_pci_ioda2_setup_default_config(pe); + if (pe->pbus) + pnv_ioda_setup_bus_dma(pe, pe->pbus, false); } static struct iommu_table_group_ops pnv_pci_ioda2_ops = { @@ -2731,7 +2738,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb, if (pe->flags & PNV_IODA_PE_DEV) iommu_add_device(&pe->pdev->dev); else if (pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)) - pnv_ioda_setup_bus_dma(pe, pe->pbus); + pnv_ioda_setup_bus_dma(pe, pe->pbus, true); } #ifdef CONFIG_PCI_MSI -- cgit v1.1 From 05d8d34611139f8435af90ac54b65eb31e82e388 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Tue, 7 Mar 2017 17:51:49 +0100 Subject: KVM: nVMX: do not warn when MSR bitmap address is not backed MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Before trying to do nested_get_page() in nested_vmx_merge_msr_bitmap(), we have already checked that the MSR bitmap address is valid (4k aligned and within physical limits). SDM doesn't specify what happens if the there is no memory mapped at the valid address, but Intel CPUs treat the situation as if the bitmap was configured to trap all MSRs. KVM already does that by returning false and a correct handling doesn't need the guest-trigerrable warning that was reported by syzkaller: (The warning was originally there to catch some possible bugs in nVMX.) ------------[ cut here ]------------ WARNING: CPU: 0 PID: 7832 at arch/x86/kvm/vmx.c:9709 nested_vmx_merge_msr_bitmap arch/x86/kvm/vmx.c:9709 [inline] WARNING: CPU: 0 PID: 7832 at arch/x86/kvm/vmx.c:9709 nested_get_vmcs12_pages+0xfb6/0x15c0 arch/x86/kvm/vmx.c:9640 Kernel panic - not syncing: panic_on_warn set ... CPU: 0 PID: 7832 Comm: syz-executor1 Not tainted 4.10.0+ #229 Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS Bochs 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:15 [inline] dump_stack+0x2ee/0x3ef lib/dump_stack.c:51 panic+0x1fb/0x412 kernel/panic.c:179 __warn+0x1c4/0x1e0 kernel/panic.c:540 warn_slowpath_null+0x2c/0x40 kernel/panic.c:583 nested_vmx_merge_msr_bitmap arch/x86/kvm/vmx.c:9709 [inline] nested_get_vmcs12_pages+0xfb6/0x15c0 arch/x86/kvm/vmx.c:9640 enter_vmx_non_root_mode arch/x86/kvm/vmx.c:10471 [inline] nested_vmx_run+0x6186/0xaab0 arch/x86/kvm/vmx.c:10561 handle_vmlaunch+0x1a/0x20 arch/x86/kvm/vmx.c:7312 vmx_handle_exit+0xfc0/0x3f00 arch/x86/kvm/vmx.c:8526 vcpu_enter_guest arch/x86/kvm/x86.c:6982 [inline] vcpu_run arch/x86/kvm/x86.c:7044 [inline] kvm_arch_vcpu_ioctl_run+0x1418/0x4840 arch/x86/kvm/x86.c:7205 kvm_vcpu_ioctl+0x673/0x1120 arch/x86/kvm/../../../virt/kvm/kvm_main.c:2570 Reported-by: Dmitry Vyukov Reviewed-by: Jim Mattson [Jim Mattson explained the bare metal behavior: "I believe this behavior would be documented in the chipset data sheet rather than the SDM, since the chipset returns all 1s for an unclaimed read."] Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index ab33858..98e82ee 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9680,10 +9680,8 @@ static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu, return false; page = nested_get_page(vcpu, vmcs12->msr_bitmap); - if (!page) { - WARN_ON(1); + if (!page) return false; - } msr_bitmap_l1 = (unsigned long *)kmap(page); memset(msr_bitmap_l0, 0xff, PAGE_SIZE); -- cgit v1.1 From 6fb895692a034393d58679cd8d00c9e229719a5f Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 9 Mar 2017 17:24:02 +0300 Subject: x86/cpufeature: Add 5-level paging detection Look for 'la57' in /proc/cpuinfo to see if your machine supports 5-level paging. Signed-off-by: Kirill A. Shutemov Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- arch/x86/include/asm/cpufeatures.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h index 4e77723..b04bb6d 100644 --- a/arch/x86/include/asm/cpufeatures.h +++ b/arch/x86/include/asm/cpufeatures.h @@ -289,7 +289,8 @@ #define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */ #define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */ #define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */ -#define X86_FEATURE_RDPID (16*32+ 22) /* RDPID instruction */ +#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */ +#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */ /* AMD-defined CPU features, CPUID level 0x80000007 (ebx), word 17 */ #define X86_FEATURE_OVERFLOW_RECOV (17*32+0) /* MCA overflow recovery support */ -- cgit v1.1 From 9849a5697d3defb2087cb6b9be5573a142697889 Mon Sep 17 00:00:00 2001 From: "Kirill A. Shutemov" Date: Thu, 9 Mar 2017 17:24:05 +0300 Subject: arch, mm: convert all architectures to use 5level-fixup.h If an architecture uses 4level-fixup.h we don't need to do anything as it includes 5level-fixup.h. If an architecture uses pgtable-nop*d.h, define __ARCH_USE_5LEVEL_HACK before inclusion of the header. It makes asm-generic code to use 5level-fixup.h. If an architecture has 4-level paging or folds levels on its own, include 5level-fixup.h directly. Signed-off-by: Kirill A. Shutemov Acked-by: Michal Hocko Signed-off-by: Linus Torvalds --- arch/arc/include/asm/hugepage.h | 1 + arch/arc/include/asm/pgtable.h | 1 + arch/arm/include/asm/pgtable.h | 1 + arch/arm64/include/asm/pgtable-types.h | 4 ++++ arch/avr32/include/asm/pgtable-2level.h | 1 + arch/cris/include/asm/pgtable.h | 1 + arch/frv/include/asm/pgtable.h | 1 + arch/h8300/include/asm/pgtable.h | 1 + arch/hexagon/include/asm/pgtable.h | 1 + arch/ia64/include/asm/pgtable.h | 2 ++ arch/metag/include/asm/pgtable.h | 1 + arch/microblaze/include/asm/page.h | 3 ++- arch/mips/include/asm/pgtable-32.h | 1 + arch/mips/include/asm/pgtable-64.h | 1 + arch/mn10300/include/asm/page.h | 1 + arch/nios2/include/asm/pgtable.h | 1 + arch/openrisc/include/asm/pgtable.h | 1 + arch/powerpc/include/asm/book3s/32/pgtable.h | 1 + arch/powerpc/include/asm/book3s/64/pgtable.h | 3 +++ arch/powerpc/include/asm/nohash/32/pgtable.h | 1 + arch/powerpc/include/asm/nohash/64/pgtable-4k.h | 3 +++ arch/powerpc/include/asm/nohash/64/pgtable-64k.h | 1 + arch/s390/include/asm/pgtable.h | 1 + arch/score/include/asm/pgtable.h | 1 + arch/sh/include/asm/pgtable-2level.h | 1 + arch/sh/include/asm/pgtable-3level.h | 1 + arch/sparc/include/asm/pgtable_64.h | 1 + arch/tile/include/asm/pgtable_32.h | 1 + arch/tile/include/asm/pgtable_64.h | 1 + arch/um/include/asm/pgtable-2level.h | 1 + arch/um/include/asm/pgtable-3level.h | 1 + arch/unicore32/include/asm/pgtable.h | 1 + arch/x86/include/asm/pgtable_types.h | 4 ++++ arch/xtensa/include/asm/pgtable.h | 1 + 34 files changed, 46 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arc/include/asm/hugepage.h b/arch/arc/include/asm/hugepage.h index 317ff77..b18fcb6 100644 --- a/arch/arc/include/asm/hugepage.h +++ b/arch/arc/include/asm/hugepage.h @@ -11,6 +11,7 @@ #define _ASM_ARC_HUGEPAGE_H #include +#define __ARCH_USE_5LEVEL_HACK #include static inline pte_t pmd_pte(pmd_t pmd) diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index e94ca72..ee22d40 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h @@ -37,6 +37,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h index a8d656d..1c46238 100644 --- a/arch/arm/include/asm/pgtable.h +++ b/arch/arm/include/asm/pgtable.h @@ -20,6 +20,7 @@ #else +#define __ARCH_USE_5LEVEL_HACK #include #include #include diff --git a/arch/arm64/include/asm/pgtable-types.h b/arch/arm64/include/asm/pgtable-types.h index 69b2fd4..345a072 100644 --- a/arch/arm64/include/asm/pgtable-types.h +++ b/arch/arm64/include/asm/pgtable-types.h @@ -55,9 +55,13 @@ typedef struct { pteval_t pgprot; } pgprot_t; #define __pgprot(x) ((pgprot_t) { (x) } ) #if CONFIG_PGTABLE_LEVELS == 2 +#define __ARCH_USE_5LEVEL_HACK #include #elif CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include +#elif CONFIG_PGTABLE_LEVELS == 4 +#include #endif #endif /* __ASM_PGTABLE_TYPES_H */ diff --git a/arch/avr32/include/asm/pgtable-2level.h b/arch/avr32/include/asm/pgtable-2level.h index 425dd56..d5b1c63 100644 --- a/arch/avr32/include/asm/pgtable-2level.h +++ b/arch/avr32/include/asm/pgtable-2level.h @@ -8,6 +8,7 @@ #ifndef __ASM_AVR32_PGTABLE_2LEVEL_H #define __ASM_AVR32_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/cris/include/asm/pgtable.h b/arch/cris/include/asm/pgtable.h index 2a3210b..fa3a730 100644 --- a/arch/cris/include/asm/pgtable.h +++ b/arch/cris/include/asm/pgtable.h @@ -6,6 +6,7 @@ #define _CRIS_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/frv/include/asm/pgtable.h b/arch/frv/include/asm/pgtable.h index a0513d4..ab6e7e9 100644 --- a/arch/frv/include/asm/pgtable.h +++ b/arch/frv/include/asm/pgtable.h @@ -16,6 +16,7 @@ #ifndef _ASM_PGTABLE_H #define _ASM_PGTABLE_H +#include #include #include #include diff --git a/arch/h8300/include/asm/pgtable.h b/arch/h8300/include/asm/pgtable.h index 8341db6..7d265d2 100644 --- a/arch/h8300/include/asm/pgtable.h +++ b/arch/h8300/include/asm/pgtable.h @@ -1,5 +1,6 @@ #ifndef _H8300_PGTABLE_H #define _H8300_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #define pgtable_cache_init() do { } while (0) diff --git a/arch/hexagon/include/asm/pgtable.h b/arch/hexagon/include/asm/pgtable.h index 49eab81..24a9177 100644 --- a/arch/hexagon/include/asm/pgtable.h +++ b/arch/hexagon/include/asm/pgtable.h @@ -26,6 +26,7 @@ */ #include #include +#define __ARCH_USE_5LEVEL_HACK #include /* A handy thing to have if one has the RAM. Declared in head.S */ diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h index 384794e..6cc22c8d 100644 --- a/arch/ia64/include/asm/pgtable.h +++ b/arch/ia64/include/asm/pgtable.h @@ -587,8 +587,10 @@ extern struct page *zero_page_memmap_ptr; #if CONFIG_PGTABLE_LEVELS == 3 +#define __ARCH_USE_5LEVEL_HACK #include #endif +#include #include #endif /* _ASM_IA64_PGTABLE_H */ diff --git a/arch/metag/include/asm/pgtable.h b/arch/metag/include/asm/pgtable.h index ffa3a3a..0c151e5 100644 --- a/arch/metag/include/asm/pgtable.h +++ b/arch/metag/include/asm/pgtable.h @@ -6,6 +6,7 @@ #define _METAG_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include /* Invalid regions on Meta: 0x00000000-0x001FFFFF and 0xFFFF0000-0xFFFFFFFF */ diff --git a/arch/microblaze/include/asm/page.h b/arch/microblaze/include/asm/page.h index fd85087..d506bb0 100644 --- a/arch/microblaze/include/asm/page.h +++ b/arch/microblaze/include/asm/page.h @@ -95,7 +95,8 @@ typedef struct { unsigned long pgd; } pgd_t; # else /* CONFIG_MMU */ typedef struct { unsigned long ste[64]; } pmd_t; typedef struct { pmd_t pue[1]; } pud_t; -typedef struct { pud_t pge[1]; } pgd_t; +typedef struct { pud_t p4e[1]; } p4d_t; +typedef struct { p4d_t pge[1]; } pgd_t; # endif /* CONFIG_MMU */ # define pte_val(x) ((x).pte) diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h index d21f3da..6f94bed 100644 --- a/arch/mips/include/asm/pgtable-32.h +++ b/arch/mips/include/asm/pgtable-32.h @@ -16,6 +16,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include extern int temp_tlb_entry; diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h index 514cbc0..130a2a6 100644 --- a/arch/mips/include/asm/pgtable-64.h +++ b/arch/mips/include/asm/pgtable-64.h @@ -17,6 +17,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48) #include #else diff --git a/arch/mn10300/include/asm/page.h b/arch/mn10300/include/asm/page.h index 3810a6f..dfe730a 100644 --- a/arch/mn10300/include/asm/page.h +++ b/arch/mn10300/include/asm/page.h @@ -57,6 +57,7 @@ typedef struct page *pgtable_t; #define __pgd(x) ((pgd_t) { (x) }) #define __pgprot(x) ((pgprot_t) { (x) }) +#define __ARCH_USE_5LEVEL_HACK #include #endif /* !__ASSEMBLY__ */ diff --git a/arch/nios2/include/asm/pgtable.h b/arch/nios2/include/asm/pgtable.h index 298393c..db4f7d1 100644 --- a/arch/nios2/include/asm/pgtable.h +++ b/arch/nios2/include/asm/pgtable.h @@ -22,6 +22,7 @@ #include #include +#define __ARCH_USE_5LEVEL_HACK #include #define FIRST_USER_ADDRESS 0UL diff --git a/arch/openrisc/include/asm/pgtable.h b/arch/openrisc/include/asm/pgtable.h index 3567aa7..ff97374 100644 --- a/arch/openrisc/include/asm/pgtable.h +++ b/arch/openrisc/include/asm/pgtable.h @@ -25,6 +25,7 @@ #ifndef __ASM_OPENRISC_PGTABLE_H #define __ASM_OPENRISC_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/book3s/32/pgtable.h b/arch/powerpc/include/asm/book3s/32/pgtable.h index 0122236..26ed228 100644 --- a/arch/powerpc/include/asm/book3s/32/pgtable.h +++ b/arch/powerpc/include/asm/book3s/32/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_BOOK3S_32_PGTABLE_H #define _ASM_POWERPC_BOOK3S_32_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 1eeeb72..13c39b6 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -1,9 +1,12 @@ #ifndef _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ #define _ASM_POWERPC_BOOK3S_64_PGTABLE_H_ +#include + #ifndef __ASSEMBLY__ #include #endif + /* * Common bits between hash and Radix page table */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h index ba9921b..5134ade 100644 --- a/arch/powerpc/include/asm/nohash/32/pgtable.h +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H #define _ASM_POWERPC_NOHASH_32_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #ifndef __ASSEMBLY__ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h index d0db987..9f4de0a 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -1,5 +1,8 @@ #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H + +#include + /* * Entries per page directory level. The PTE level must use a 64b record * for each page table entry. The PMD and PGD level use a 32b record for diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h index 55b28ef..1facb58 100644 --- a/arch/powerpc/include/asm/nohash/64/pgtable-64k.h +++ b/arch/powerpc/include/asm/nohash/64/pgtable-64k.h @@ -1,6 +1,7 @@ #ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_64K_H +#define __ARCH_USE_5LEVEL_HACK #include diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 7ed1972..93e37b1 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -24,6 +24,7 @@ * the S390 page table tree. */ #ifndef __ASSEMBLY__ +#include #include #include #include diff --git a/arch/score/include/asm/pgtable.h b/arch/score/include/asm/pgtable.h index 0553e5c..46ff8fd 100644 --- a/arch/score/include/asm/pgtable.h +++ b/arch/score/include/asm/pgtable.h @@ -2,6 +2,7 @@ #define _ASM_SCORE_PGTABLE_H #include +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/sh/include/asm/pgtable-2level.h b/arch/sh/include/asm/pgtable-2level.h index 19bd89d..f75cf43 100644 --- a/arch/sh/include/asm/pgtable-2level.h +++ b/arch/sh/include/asm/pgtable-2level.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_PGTABLE_2LEVEL_H #define __ASM_SH_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/sh/include/asm/pgtable-3level.h b/arch/sh/include/asm/pgtable-3level.h index 249a985..9b1e776 100644 --- a/arch/sh/include/asm/pgtable-3level.h +++ b/arch/sh/include/asm/pgtable-3level.h @@ -1,6 +1,7 @@ #ifndef __ASM_SH_PGTABLE_3LEVEL_H #define __ASM_SH_PGTABLE_3LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 56e49c8..8a59852 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -12,6 +12,7 @@ * the SpitFire page tables. */ +#include #include #include #include diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h index d26a422..5f8c615 100644 --- a/arch/tile/include/asm/pgtable_32.h +++ b/arch/tile/include/asm/pgtable_32.h @@ -74,6 +74,7 @@ extern unsigned long VMALLOC_RESERVE /* = CONFIG_VMALLOC_RESERVE */; #define MAXMEM (_VMALLOC_START - PAGE_OFFSET) /* We have no pmd or pud since we are strictly a two-level page table */ +#define __ARCH_USE_5LEVEL_HACK #include static inline int pud_huge_page(pud_t pud) { return 0; } diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h index e96cec5..96fe58b 100644 --- a/arch/tile/include/asm/pgtable_64.h +++ b/arch/tile/include/asm/pgtable_64.h @@ -59,6 +59,7 @@ #ifndef __ASSEMBLY__ /* We have no pud since we are a three-level page table. */ +#define __ARCH_USE_5LEVEL_HACK #include /* diff --git a/arch/um/include/asm/pgtable-2level.h b/arch/um/include/asm/pgtable-2level.h index cfbe597..179c0ea 100644 --- a/arch/um/include/asm/pgtable-2level.h +++ b/arch/um/include/asm/pgtable-2level.h @@ -8,6 +8,7 @@ #ifndef __UM_PGTABLE_2LEVEL_H #define __UM_PGTABLE_2LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* PGDIR_SHIFT determines what a third-level page table entry can map */ diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h index bae8523..c4d876d 100644 --- a/arch/um/include/asm/pgtable-3level.h +++ b/arch/um/include/asm/pgtable-3level.h @@ -7,6 +7,7 @@ #ifndef __UM_PGTABLE_3LEVEL_H #define __UM_PGTABLE_3LEVEL_H +#define __ARCH_USE_5LEVEL_HACK #include /* PGDIR_SHIFT determines what a third-level page table entry can map */ diff --git a/arch/unicore32/include/asm/pgtable.h b/arch/unicore32/include/asm/pgtable.h index 818d0f5..a4f2bef 100644 --- a/arch/unicore32/include/asm/pgtable.h +++ b/arch/unicore32/include/asm/pgtable.h @@ -12,6 +12,7 @@ #ifndef __UNICORE_PGTABLE_H__ #define __UNICORE_PGTABLE_H__ +#define __ARCH_USE_5LEVEL_HACK #include #include diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h index 8b4de22..6248433 100644 --- a/arch/x86/include/asm/pgtable_types.h +++ b/arch/x86/include/asm/pgtable_types.h @@ -273,6 +273,8 @@ static inline pgdval_t pgd_flags(pgd_t pgd) } #if CONFIG_PGTABLE_LEVELS > 3 +#include + typedef struct { pudval_t pud; } pud_t; static inline pud_t native_make_pud(pmdval_t val) @@ -285,6 +287,7 @@ static inline pudval_t native_pud_val(pud_t pud) return pud.pud; } #else +#define __ARCH_USE_5LEVEL_HACK #include static inline pudval_t native_pud_val(pud_t pud) @@ -306,6 +309,7 @@ static inline pmdval_t native_pmd_val(pmd_t pmd) return pmd.pmd; } #else +#define __ARCH_USE_5LEVEL_HACK #include static inline pmdval_t native_pmd_val(pmd_t pmd) diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h index 8aa0e0d..30dd5b2 100644 --- a/arch/xtensa/include/asm/pgtable.h +++ b/arch/xtensa/include/asm/pgtable.h @@ -11,6 +11,7 @@ #ifndef _XTENSA_PGTABLE_H #define _XTENSA_PGTABLE_H +#define __ARCH_USE_5LEVEL_HACK #include #include #include -- cgit v1.1 From 8bd49ac86677ca43d64f08c45864e438283d6a76 Mon Sep 17 00:00:00 2001 From: Heiko Carstens Date: Mon, 6 Mar 2017 09:57:17 +0100 Subject: s390: wire up statx system call Signed-off-by: Heiko Carstens Signed-off-by: Martin Schwidefsky --- arch/s390/include/uapi/asm/unistd.h | 4 +++- arch/s390/kernel/compat_wrapper.c | 1 + arch/s390/kernel/syscalls.S | 2 ++ 3 files changed, 6 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 4384bc7..152de9b 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -313,7 +313,9 @@ #define __NR_copy_file_range 375 #define __NR_preadv2 376 #define __NR_pwritev2 377 -#define NR_syscalls 378 +/* Number 378 is reserved for guarded storage */ +#define __NR_statx 379 +#define NR_syscalls 380 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c index ae2cda5..e89cc2e 100644 --- a/arch/s390/kernel/compat_wrapper.c +++ b/arch/s390/kernel/compat_wrapper.c @@ -178,3 +178,4 @@ COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len); COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags); COMPAT_SYSCALL_WRAP6(copy_file_range, int, fd_in, loff_t __user *, off_in, int, fd_out, loff_t __user *, off_out, size_t, len, unsigned int, flags); +COMPAT_SYSCALL_WRAP5(statx, int, dfd, const char __user *, path, unsigned, flags, unsigned, mask, struct statx __user *, buffer); diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index 9b59e62..2659b5c 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -386,3 +386,5 @@ SYSCALL(sys_mlock2,compat_sys_mlock2) SYSCALL(sys_copy_file_range,compat_sys_copy_file_range) /* 375 */ SYSCALL(sys_preadv2,compat_sys_preadv2) SYSCALL(sys_pwritev2,compat_sys_pwritev2) +NI_SYSCALL +SYSCALL(sys_statx,compat_sys_statx) -- cgit v1.1 From 8a1115ff6b6d90cf1066ec3a0c4e51276553eebe Mon Sep 17 00:00:00 2001 From: Masahiro Yamada Date: Thu, 9 Mar 2017 16:16:31 -0800 Subject: scripts/spelling.txt: add "disble(d)" pattern and fix typo instances Fix typos and add the following to the scripts/spelling.txt: disble||disable disbled||disabled I kept the TSL2563_INT_DISBLED in /drivers/iio/light/tsl2563.c untouched. The macro is not referenced at all, but this commit is touching only comment blocks just in case. Link: http://lkml.kernel.org/r/1481573103-11329-20-git-send-email-yamada.masahiro@socionext.com Signed-off-by: Masahiro Yamada Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/cris/arch-v32/drivers/cryptocop.c | 2 +- arch/x86/kernel/ftrace.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c index ae6903d..14970f1 100644 --- a/arch/cris/arch-v32/drivers/cryptocop.c +++ b/arch/cris/arch-v32/drivers/cryptocop.c @@ -2086,7 +2086,7 @@ static void cryptocop_job_queue_close(void) dma_in_cfg.en = regk_dma_no; REG_WR(dma, IN_DMA_INST, rw_cfg, dma_in_cfg); - /* Disble the cryptocop. */ + /* Disable the cryptocop. */ rw_cfg = REG_RD(strcop, regi_strcop, rw_cfg); rw_cfg.en = 0; REG_WR(strcop, regi_strcop, rw_cfg, rw_cfg); diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c index 8639bb2..8f3d9cf 100644 --- a/arch/x86/kernel/ftrace.c +++ b/arch/x86/kernel/ftrace.c @@ -535,7 +535,7 @@ static void run_sync(void) { int enable_irqs = irqs_disabled(); - /* We may be called with interrupts disbled (on bootup). */ + /* We may be called with interrupts disabled (on bootup). */ if (enable_irqs) local_irq_enable(); on_each_cpu(do_sync_core, NULL, 1); -- cgit v1.1 From 52c50ca75c534c0772b71900a29b3a71439b32ef Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Mar 2017 16:16:36 -0800 Subject: powerpc/mm: handle protnone ptes on fork We need to mark pages of parent process read only on fork. Numa fault pte needs a protnone ptes variant with saved write flag set. On fork we need to make sure we remove the saved write bit. Instead of adding the protnone check in the caller update ptep_set_wrprotect variants to clear savedwrite bit. Without this we see random segfaults in application on fork. Fixes: c137a2757b886 ("powerpc/mm/autonuma: switch ppc64 to its own implementation of saved write") Link: http://lkml.kernel.org/r/1488203787-17849-1-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Rik van Riel Cc: Mel Gorman Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/book3s/64/pgtable.h | 73 ++++++++++++++++------------ 1 file changed, 42 insertions(+), 31 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index 1eeeb72..f0b08ac 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -347,23 +347,53 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, __r; \ }) +static inline int pte_write(pte_t pte) +{ + return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); +} + +#ifdef CONFIG_NUMA_BALANCING +#define pte_savedwrite pte_savedwrite +static inline bool pte_savedwrite(pte_t pte) +{ + /* + * Saved write ptes are prot none ptes that doesn't have + * privileged bit sit. We mark prot none as one which has + * present and pviliged bit set and RWX cleared. To mark + * protnone which used to have _PAGE_WRITE set we clear + * the privileged bit. + */ + return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); +} +#else +#define pte_savedwrite pte_savedwrite +static inline bool pte_savedwrite(pte_t pte) +{ + return false; +} +#endif + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); + if (pte_write(*ptep)) + pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); + else if (unlikely(pte_savedwrite(*ptep))) + pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); } static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if ((pte_raw(*ptep) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); + /* + * We should not find protnone for hugetlb, but this complete the + * interface. + */ + if (pte_write(*ptep)) + pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); + else if (unlikely(pte_savedwrite(*ptep))) + pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1); } #define __HAVE_ARCH_PTEP_GET_AND_CLEAR @@ -397,11 +427,6 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_update(mm, addr, ptep, ~0UL, 0, 0); } -static inline int pte_write(pte_t pte) -{ - return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); -} - static inline int pte_dirty(pte_t pte) { return !!(pte_raw(pte) & cpu_to_be64(_PAGE_DIRTY)); @@ -466,19 +491,6 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) return __pte(pte_val(pte) | _PAGE_PRIVILEGED); } -#define pte_savedwrite pte_savedwrite -static inline bool pte_savedwrite(pte_t pte) -{ - /* - * Saved write ptes are prot none ptes that doesn't have - * privileged bit sit. We mark prot none as one which has - * present and pviliged bit set and RWX cleared. To mark - * protnone which used to have _PAGE_WRITE set we clear - * the privileged bit. - */ - VM_BUG_ON(!pte_protnone(pte)); - return !(pte_raw(pte) & cpu_to_be64(_PAGE_RWX | _PAGE_PRIVILEGED)); -} #endif /* CONFIG_NUMA_BALANCING */ static inline int pte_present(pte_t pte) @@ -982,11 +994,10 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { - - if ((pmd_raw(*pmdp) & cpu_to_be64(_PAGE_WRITE)) == 0) - return; - - pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); + if (pmd_write((*pmdp))) + pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); + else if (unlikely(pmd_savedwrite(*pmdp))) + pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); } static inline int pmd_trans_huge(pmd_t pmd) -- cgit v1.1 From d19469e8415813cceaa494b6f538e327b9a95f3b Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Thu, 9 Mar 2017 16:16:39 -0800 Subject: power/mm: update pte_write and pte_wrprotect to handle savedwrite We use pte_write() to check whethwer the pte entry is writable. This is mostly used to later mark the pte read only if it is writable. The other use of pte_write() is to check whether the pte_entry is writable so that hardware page table entry can be marked accordingly. This is used in kvm where we look at qemu page table entry and update hardware hash page table for the guest with correct write enable bit. With the above, for the first usage we should also check the savedwrite bit so that we can correctly clear the savedwite bit. For the later, we add a new variant __pte_write(). With this we can revert write_protect_page part of 595cd8f256d2 ("mm/ksm: handle protnone saved writes when making page write protect"). But I left it as it is as an example code for savedwrite check. Fixes: c137a2757b886 ("powerpc/mm/autonuma: switch ppc64 to its own implementation of saved write") Link: http://lkml.kernel.org/r/1488203787-17849-2-git-send-email-aneesh.kumar@linux.vnet.ibm.com Signed-off-by: Aneesh Kumar K.V Cc: Rik van Riel Cc: Mel Gorman Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Michael Ellerman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/powerpc/include/asm/book3s/64/pgtable.h | 24 +++++++++++++++++++----- arch/powerpc/kvm/book3s_64_mmu_hv.c | 2 +- arch/powerpc/kvm/book3s_hv_rm_mmu.c | 2 +- 3 files changed, 21 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h index f0b08ac..ec1e731 100644 --- a/arch/powerpc/include/asm/book3s/64/pgtable.h +++ b/arch/powerpc/include/asm/book3s/64/pgtable.h @@ -347,7 +347,7 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, __r; \ }) -static inline int pte_write(pte_t pte) +static inline int __pte_write(pte_t pte) { return !!(pte_raw(pte) & cpu_to_be64(_PAGE_WRITE)); } @@ -373,11 +373,16 @@ static inline bool pte_savedwrite(pte_t pte) } #endif +static inline int pte_write(pte_t pte) +{ + return __pte_write(pte) || pte_savedwrite(pte); +} + #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) { - if (pte_write(*ptep)) + if (__pte_write(*ptep)) pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 0); else if (unlikely(pte_savedwrite(*ptep))) pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 0); @@ -390,7 +395,7 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, * We should not find protnone for hugetlb, but this complete the * interface. */ - if (pte_write(*ptep)) + if (__pte_write(*ptep)) pte_update(mm, addr, ptep, _PAGE_WRITE, 0, 1); else if (unlikely(pte_savedwrite(*ptep))) pte_update(mm, addr, ptep, 0, _PAGE_PRIVILEGED, 1); @@ -490,7 +495,13 @@ static inline pte_t pte_clear_savedwrite(pte_t pte) VM_BUG_ON(!pte_protnone(pte)); return __pte(pte_val(pte) | _PAGE_PRIVILEGED); } - +#else +#define pte_clear_savedwrite pte_clear_savedwrite +static inline pte_t pte_clear_savedwrite(pte_t pte) +{ + VM_WARN_ON(1); + return __pte(pte_val(pte) & ~_PAGE_WRITE); +} #endif /* CONFIG_NUMA_BALANCING */ static inline int pte_present(pte_t pte) @@ -518,6 +529,8 @@ static inline unsigned long pte_pfn(pte_t pte) /* Generic modifiers for PTE bits */ static inline pte_t pte_wrprotect(pte_t pte) { + if (unlikely(pte_savedwrite(pte))) + return pte_clear_savedwrite(pte); return __pte(pte_val(pte) & ~_PAGE_WRITE); } @@ -938,6 +951,7 @@ static inline int pmd_protnone(pmd_t pmd) #define __HAVE_ARCH_PMD_WRITE #define pmd_write(pmd) pte_write(pmd_pte(pmd)) +#define __pmd_write(pmd) __pte_write(pmd_pte(pmd)) #define pmd_savedwrite(pmd) pte_savedwrite(pmd_pte(pmd)) #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -994,7 +1008,7 @@ static inline int __pmdp_test_and_clear_young(struct mm_struct *mm, static inline void pmdp_set_wrprotect(struct mm_struct *mm, unsigned long addr, pmd_t *pmdp) { - if (pmd_write((*pmdp))) + if (__pmd_write((*pmdp))) pmd_hugepage_update(mm, addr, pmdp, _PAGE_WRITE, 0); else if (unlikely(pmd_savedwrite(*pmdp))) pmd_hugepage_update(mm, addr, pmdp, 0, _PAGE_PRIVILEGED); diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c index f3158fb..8c68145 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_hv.c +++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c @@ -601,7 +601,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, hva, NULL, NULL); if (ptep) { pte = kvmppc_read_update_linux_pte(ptep, 1); - if (pte_write(pte)) + if (__pte_write(pte)) write_ok = 1; } local_irq_restore(flags); diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c index 6fca970..ce6f212 100644 --- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c +++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c @@ -256,7 +256,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags, } pte = kvmppc_read_update_linux_pte(ptep, writing); if (pte_present(pte) && !pte_protnone(pte)) { - if (writing && !pte_write(pte)) + if (writing && !__pte_write(pte)) /* make the actual HPTE be read-only */ ptel = hpte_make_readonly(ptel); is_ci = pte_ci(pte); -- cgit v1.1 From ef947b2529f918d9606533eb9c32b187ed6a5ede Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 9 Mar 2017 16:16:42 -0800 Subject: x86, mm: fix gup_pte_range() vs DAX mappings gup_pte_range() fails to check pte_allows_gup() before translating a DAX pte entry, pte_devmap(), to a page. This allows writes to read-only mappings, and bypasses the DAX cacheline dirty tracking due to missed 'mkwrite' faults. The gup_huge_pmd() path and the gup_huge_pud() path correctly check pte_allows_gup() before checking for _devmap() entries. Fixes: 3565fce3a659 ("mm, x86: get_user_pages() for dax mappings") Link: http://lkml.kernel.org/r/148804251312.36605.12665024794196605053.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ross Zwisler Signed-off-by: Dan Williams Reported-by: Dave Hansen Reported-by: Ross Zwisler Cc: Xiong Zhou Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/gup.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 99c7805..9d32ee6 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -120,6 +120,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, return 0; } + if (!pte_allows_gup(pte_val(pte), write)) { + pte_unmap(ptep); + return 0; + } + if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { @@ -127,8 +132,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, pte_unmap(ptep); return 0; } - } else if (!pte_allows_gup(pte_val(pte), write) || - pte_special(pte)) { + } else if (pte_special(pte)) { pte_unmap(ptep); return 0; } -- cgit v1.1 From b2e593e271b0760ebc8999e5f9dd068ae2b9d30a Mon Sep 17 00:00:00 2001 From: Dan Williams Date: Thu, 9 Mar 2017 16:16:45 -0800 Subject: x86, mm: unify exit paths in gup_pte_range() All exit paths from gup_pte_range() require pte_unmap() of the original pte page before returning. Refactor the code to have a single exit point to do the unmap. This mirrors the flow of the generic gup_pte_range() in mm/gup.c. Link: http://lkml.kernel.org/r/148804251828.36605.14910389618497006945.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Dan Williams Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Gleixner Cc: Dave Hansen Cc: Ross Zwisler Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/gup.c | 39 ++++++++++++++++++++------------------- 1 file changed, 20 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c index 9d32ee6..1f3b6ef 100644 --- a/arch/x86/mm/gup.c +++ b/arch/x86/mm/gup.c @@ -106,36 +106,35 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end, int write, struct page **pages, int *nr) { struct dev_pagemap *pgmap = NULL; - int nr_start = *nr; - pte_t *ptep; + int nr_start = *nr, ret = 0; + pte_t *ptep, *ptem; - ptep = pte_offset_map(&pmd, addr); + /* + * Keep the original mapped PTE value (ptem) around since we + * might increment ptep off the end of the page when finishing + * our loop iteration. + */ + ptem = ptep = pte_offset_map(&pmd, addr); do { pte_t pte = gup_get_pte(ptep); struct page *page; /* Similar to the PMD case, NUMA hinting must take slow path */ - if (pte_protnone(pte)) { - pte_unmap(ptep); - return 0; - } + if (pte_protnone(pte)) + break; - if (!pte_allows_gup(pte_val(pte), write)) { - pte_unmap(ptep); - return 0; - } + if (!pte_allows_gup(pte_val(pte), write)) + break; if (pte_devmap(pte)) { pgmap = get_dev_pagemap(pte_pfn(pte), pgmap); if (unlikely(!pgmap)) { undo_dev_pagemap(nr, nr_start, pages); - pte_unmap(ptep); - return 0; + break; } - } else if (pte_special(pte)) { - pte_unmap(ptep); - return 0; - } + } else if (pte_special(pte)) + break; + VM_BUG_ON(!pfn_valid(pte_pfn(pte))); page = pte_page(pte); get_page(page); @@ -145,9 +144,11 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, (*nr)++; } while (ptep++, addr += PAGE_SIZE, addr != end); - pte_unmap(ptep - 1); + if (addr == end) + ret = 1; + pte_unmap(ptem); - return 1; + return ret; } static inline void get_head_page_multiple(struct page *page, int nr) -- cgit v1.1 From ca5b58ea3db88b5e69ba2a1f6bc3cf239cdcc64f Mon Sep 17 00:00:00 2001 From: Bartlomiej Zolnierkiewicz Date: Thu, 9 Mar 2017 16:17:34 -0800 Subject: sh: cayman: IDE support fix Remove incorrect CONFIG_IDE ifdef (CONFIG_IDE config option is for internal drivers/ide/ use) and make IDE hardware interface always initialized (not only when IDE subsystem is built-in). This patch allows Cayman board to work with modular IDE subsystem support and removes the requirement of having the whole core IDE subsystem built-in when using libata PATA support. Link: http://lkml.kernel.org/r/1990884.yFoE6lSB9G@amdc3058 Signed-off-by: Bartlomiej Zolnierkiewicz Cc: Yoshinori Sato Cc: Rich Felker Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/sh/boards/mach-cayman/setup.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/sh/boards/mach-cayman/setup.c b/arch/sh/boards/mach-cayman/setup.c index 340fd40..9c292c2 100644 --- a/arch/sh/boards/mach-cayman/setup.c +++ b/arch/sh/boards/mach-cayman/setup.c @@ -128,7 +128,6 @@ static int __init smsc_superio_setup(void) SMSC_SUPERIO_WRITE_INDEXED(1, SMSC_PRIMARY_INT_INDEX); SMSC_SUPERIO_WRITE_INDEXED(12, SMSC_SECONDARY_INT_INDEX); -#ifdef CONFIG_IDE /* * Only IDE1 exists on the Cayman */ @@ -158,7 +157,6 @@ static int __init smsc_superio_setup(void) SMSC_SUPERIO_WRITE_INDEXED(0x01, 0xc5); /* GP45 = IDE1_IRQ */ SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc6); /* GP46 = nIOROP */ SMSC_SUPERIO_WRITE_INDEXED(0x00, 0xc7); /* GP47 = nIOWOP */ -#endif /* Exit the configuration state */ outb(SMSC_EXIT_CONFIG_KEY, SMSC_CONFIG_PORT_ADDR); -- cgit v1.1 From 1363875bdb6317a2d0798284d7aaf320f0782f6d Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 28 Feb 2017 12:00:46 +1000 Subject: powerpc/64s: fix handling of non-synchronous machine checks A synchronous machine check is an exception raised by the attempt to execute the current instruction. If the error can't be corrected, it can make sense to SIGBUS the currently running process. In other cases, the error condition is not related to the current instruction, so killing the current process is not the right thing to do. Today, all machine checks are MCE_SEV_ERROR_SYNC, so this has no practical change. It will be used to handle POWER9 asynchronous machine checks. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/powernv/opal.c | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c index 86d9fde..e0f856b 100644 --- a/arch/powerpc/platforms/powernv/opal.c +++ b/arch/powerpc/platforms/powernv/opal.c @@ -395,7 +395,6 @@ static int opal_recover_mce(struct pt_regs *regs, struct machine_check_event *evt) { int recovered = 0; - uint64_t ea = get_mce_fault_addr(evt); if (!(regs->msr & MSR_RI)) { /* If MSR_RI isn't set, we cannot recover */ @@ -404,26 +403,18 @@ static int opal_recover_mce(struct pt_regs *regs, } else if (evt->disposition == MCE_DISPOSITION_RECOVERED) { /* Platform corrected itself */ recovered = 1; - } else if (ea && !is_kernel_addr(ea)) { + } else if (evt->severity == MCE_SEV_FATAL) { + /* Fatal machine check */ + pr_err("Machine check interrupt is fatal\n"); + recovered = 0; + } else if ((evt->severity == MCE_SEV_ERROR_SYNC) && + (user_mode(regs) && !is_global_init(current))) { /* - * Faulting address is not in kernel text. We should be fine. - * We need to find which process uses this address. * For now, kill the task if we have received exception when * in userspace. * * TODO: Queue up this address for hwpoisioning later. */ - if (user_mode(regs) && !is_global_init(current)) { - _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); - recovered = 1; - } else - recovered = 0; - } else if (user_mode(regs) && !is_global_init(current) && - evt->severity == MCE_SEV_ERROR_SYNC) { - /* - * If we have received a synchronous error when in userspace - * kill the task. - */ _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); recovered = 1; } -- cgit v1.1 From c1bbf387d6191e6e18f3adc4db45b922822c2ba4 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 28 Feb 2017 12:00:47 +1000 Subject: powerpc/64s: allow machine check handler to set severity and initiator Currently severity and initiator are always set to MCE_SEV_ERROR_SYNC and MCE_INITIATOR_CPU in the core mce code. Allow them to be set by the machine specific mce handlers. No functional change for existing handlers. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/mce.h | 3 ++- arch/powerpc/kernel/mce.c | 5 +++-- arch/powerpc/kernel/mce_power.c | 6 ++++++ 3 files changed, 11 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index f97d8cb..b2a5865 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -177,7 +177,8 @@ struct mce_error_info { enum MCE_EratErrorType erat_error_type:8; enum MCE_TlbErrorType tlb_error_type:8; } u; - uint8_t reserved[2]; + enum MCE_Severity severity:8; + enum MCE_Initiator initiator:8; }; #define MAX_MC_EVT 100 diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index c6923ff..9495072 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -90,13 +90,14 @@ void save_mce_event(struct pt_regs *regs, long handled, mce->gpr3 = regs->gpr[3]; mce->in_use = 1; - mce->initiator = MCE_INITIATOR_CPU; /* Mark it recovered if we have handled it and MSR(RI=1). */ if (handled && (regs->msr & MSR_RI)) mce->disposition = MCE_DISPOSITION_RECOVERED; else mce->disposition = MCE_DISPOSITION_NOT_RECOVERED; - mce->severity = MCE_SEV_ERROR_SYNC; + + mce->initiator = mce_err->initiator; + mce->severity = mce_err->severity; /* * Populate the mce error_type and type-specific error_type. diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index 7353991..c37fc5f 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -281,6 +281,9 @@ long __machine_check_early_realmode_p7(struct pt_regs *regs) long handled = 1; struct mce_error_info mce_error_info = { 0 }; + mce_error_info.severity = MCE_SEV_ERROR_SYNC; + mce_error_info.initiator = MCE_INITIATOR_CPU; + srr1 = regs->msr; nip = regs->nip; @@ -352,6 +355,9 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) long handled = 1; struct mce_error_info mce_error_info = { 0 }; + mce_error_info.severity = MCE_SEV_ERROR_SYNC; + mce_error_info.initiator = MCE_INITIATOR_CPU; + srr1 = regs->msr; nip = regs->nip; -- cgit v1.1 From 7b9f71f974a12740e79e918cfd58c2fce0b5b580 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Tue, 28 Feb 2017 12:00:48 +1000 Subject: powerpc/64s: POWER9 machine check handler Add POWER9 machine check handler. There are several new types of errors added, so logging messages for those are also added. This doesn't attempt to reuse any of the P7/8 defines or functions, because that becomes too complex. The better option in future is to use a table driven approach. Signed-off-by: Nicholas Piggin Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/bitops.h | 4 + arch/powerpc/include/asm/mce.h | 105 +++++++++++++++++ arch/powerpc/kernel/cputable.c | 3 + arch/powerpc/kernel/mce.c | 83 ++++++++++++++ arch/powerpc/kernel/mce_power.c | 231 ++++++++++++++++++++++++++++++++++++++ 5 files changed, 426 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 73eb794..bc5fdfd 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h @@ -51,6 +51,10 @@ #define PPC_BIT(bit) (1UL << PPC_BITLSHIFT(bit)) #define PPC_BITMASK(bs, be) ((PPC_BIT(bs) - PPC_BIT(be)) | PPC_BIT(bs)) +/* Put a PPC bit into a "normal" bit position */ +#define PPC_BITEXTRACT(bits, ppc_bit, dst_bit) \ + ((((bits) >> PPC_BITLSHIFT(ppc_bit)) & 1) << (dst_bit)) + #include /* Macro for generating the ***_bits() functions */ diff --git a/arch/powerpc/include/asm/mce.h b/arch/powerpc/include/asm/mce.h index b2a5865..ed62efe 100644 --- a/arch/powerpc/include/asm/mce.h +++ b/arch/powerpc/include/asm/mce.h @@ -66,6 +66,55 @@ #define P8_DSISR_MC_SLB_ERRORS (P7_DSISR_MC_SLB_ERRORS | \ P8_DSISR_MC_ERAT_MULTIHIT_SEC) + +/* + * Machine Check bits on power9 + */ +#define P9_SRR1_MC_LOADSTORE(srr1) (((srr1) >> PPC_BITLSHIFT(42)) & 1) + +#define P9_SRR1_MC_IFETCH(srr1) ( \ + PPC_BITEXTRACT(srr1, 45, 0) | \ + PPC_BITEXTRACT(srr1, 44, 1) | \ + PPC_BITEXTRACT(srr1, 43, 2) | \ + PPC_BITEXTRACT(srr1, 36, 3) ) + +/* 0 is reserved */ +#define P9_SRR1_MC_IFETCH_UE 1 +#define P9_SRR1_MC_IFETCH_SLB_PARITY 2 +#define P9_SRR1_MC_IFETCH_SLB_MULTIHIT 3 +#define P9_SRR1_MC_IFETCH_ERAT_MULTIHIT 4 +#define P9_SRR1_MC_IFETCH_TLB_MULTIHIT 5 +#define P9_SRR1_MC_IFETCH_UE_TLB_RELOAD 6 +/* 7 is reserved */ +#define P9_SRR1_MC_IFETCH_LINK_TIMEOUT 8 +#define P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT 9 +/* 10 ? */ +#define P9_SRR1_MC_IFETCH_RA 11 +#define P9_SRR1_MC_IFETCH_RA_TABLEWALK 12 +#define P9_SRR1_MC_IFETCH_RA_ASYNC_STORE 13 +#define P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT 14 +#define P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN 15 + +/* DSISR bits for machine check (On Power9) */ +#define P9_DSISR_MC_UE (PPC_BIT(48)) +#define P9_DSISR_MC_UE_TABLEWALK (PPC_BIT(49)) +#define P9_DSISR_MC_LINK_LOAD_TIMEOUT (PPC_BIT(50)) +#define P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT (PPC_BIT(51)) +#define P9_DSISR_MC_ERAT_MULTIHIT (PPC_BIT(52)) +#define P9_DSISR_MC_TLB_MULTIHIT_MFTLB (PPC_BIT(53)) +#define P9_DSISR_MC_USER_TLBIE (PPC_BIT(54)) +#define P9_DSISR_MC_SLB_PARITY_MFSLB (PPC_BIT(55)) +#define P9_DSISR_MC_SLB_MULTIHIT_MFSLB (PPC_BIT(56)) +#define P9_DSISR_MC_RA_LOAD (PPC_BIT(57)) +#define P9_DSISR_MC_RA_TABLEWALK (PPC_BIT(58)) +#define P9_DSISR_MC_RA_TABLEWALK_FOREIGN (PPC_BIT(59)) +#define P9_DSISR_MC_RA_FOREIGN (PPC_BIT(60)) + +/* SLB error bits */ +#define P9_DSISR_MC_SLB_ERRORS (P9_DSISR_MC_ERAT_MULTIHIT | \ + P9_DSISR_MC_SLB_PARITY_MFSLB | \ + P9_DSISR_MC_SLB_MULTIHIT_MFSLB) + enum MCE_Version { MCE_V1 = 1, }; @@ -93,6 +142,9 @@ enum MCE_ErrorType { MCE_ERROR_TYPE_SLB = 2, MCE_ERROR_TYPE_ERAT = 3, MCE_ERROR_TYPE_TLB = 4, + MCE_ERROR_TYPE_USER = 5, + MCE_ERROR_TYPE_RA = 6, + MCE_ERROR_TYPE_LINK = 7, }; enum MCE_UeErrorType { @@ -121,6 +173,32 @@ enum MCE_TlbErrorType { MCE_TLB_ERROR_MULTIHIT = 2, }; +enum MCE_UserErrorType { + MCE_USER_ERROR_INDETERMINATE = 0, + MCE_USER_ERROR_TLBIE = 1, +}; + +enum MCE_RaErrorType { + MCE_RA_ERROR_INDETERMINATE = 0, + MCE_RA_ERROR_IFETCH = 1, + MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH = 2, + MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN = 3, + MCE_RA_ERROR_LOAD = 4, + MCE_RA_ERROR_STORE = 5, + MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 6, + MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN = 7, + MCE_RA_ERROR_LOAD_STORE_FOREIGN = 8, +}; + +enum MCE_LinkErrorType { + MCE_LINK_ERROR_INDETERMINATE = 0, + MCE_LINK_ERROR_IFETCH_TIMEOUT = 1, + MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT = 2, + MCE_LINK_ERROR_LOAD_TIMEOUT = 3, + MCE_LINK_ERROR_STORE_TIMEOUT = 4, + MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT = 5, +}; + struct machine_check_event { enum MCE_Version version:8; /* 0x00 */ uint8_t in_use; /* 0x01 */ @@ -166,6 +244,30 @@ struct machine_check_event { uint64_t effective_address; uint8_t reserved_2[16]; } tlb_error; + + struct { + enum MCE_UserErrorType user_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } user_error; + + struct { + enum MCE_RaErrorType ra_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } ra_error; + + struct { + enum MCE_LinkErrorType link_error_type:8; + uint8_t effective_address_provided; + uint8_t reserved_1[6]; + uint64_t effective_address; + uint8_t reserved_2[16]; + } link_error; } u; }; @@ -176,6 +278,9 @@ struct mce_error_info { enum MCE_SlbErrorType slb_error_type:8; enum MCE_EratErrorType erat_error_type:8; enum MCE_TlbErrorType tlb_error_type:8; + enum MCE_UserErrorType user_error_type:8; + enum MCE_RaErrorType ra_error_type:8; + enum MCE_LinkErrorType link_error_type:8; } u; enum MCE_Severity severity:8; enum MCE_Initiator initiator:8; diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index bb7a189..e79b9da 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c @@ -77,6 +77,7 @@ extern void __flush_tlb_power8(unsigned int action); extern void __flush_tlb_power9(unsigned int action); extern long __machine_check_early_realmode_p7(struct pt_regs *regs); extern long __machine_check_early_realmode_p8(struct pt_regs *regs); +extern long __machine_check_early_realmode_p9(struct pt_regs *regs); #endif /* CONFIG_PPC64 */ #if defined(CONFIG_E500) extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); @@ -540,6 +541,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_power9, .cpu_restore = __restore_cpu_power9, .flush_tlb = __flush_tlb_power9, + .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, { /* Power9 */ @@ -559,6 +561,7 @@ static struct cpu_spec __initdata cpu_specs[] = { .cpu_setup = __setup_cpu_power9, .cpu_restore = __restore_cpu_power9, .flush_tlb = __flush_tlb_power9, + .machine_check_early = __machine_check_early_realmode_p9, .platform = "power9", }, { /* Cell Broadband Engine */ diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c index 9495072..a1475e6 100644 --- a/arch/powerpc/kernel/mce.c +++ b/arch/powerpc/kernel/mce.c @@ -58,6 +58,15 @@ static void mce_set_error_info(struct machine_check_event *mce, case MCE_ERROR_TYPE_TLB: mce->u.tlb_error.tlb_error_type = mce_err->u.tlb_error_type; break; + case MCE_ERROR_TYPE_USER: + mce->u.user_error.user_error_type = mce_err->u.user_error_type; + break; + case MCE_ERROR_TYPE_RA: + mce->u.ra_error.ra_error_type = mce_err->u.ra_error_type; + break; + case MCE_ERROR_TYPE_LINK: + mce->u.link_error.link_error_type = mce_err->u.link_error_type; + break; case MCE_ERROR_TYPE_UNKNOWN: default: break; @@ -116,6 +125,15 @@ void save_mce_event(struct pt_regs *regs, long handled, } else if (mce->error_type == MCE_ERROR_TYPE_ERAT) { mce->u.erat_error.effective_address_provided = true; mce->u.erat_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_USER) { + mce->u.user_error.effective_address_provided = true; + mce->u.user_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_RA) { + mce->u.ra_error.effective_address_provided = true; + mce->u.ra_error.effective_address = addr; + } else if (mce->error_type == MCE_ERROR_TYPE_LINK) { + mce->u.link_error.effective_address_provided = true; + mce->u.link_error.effective_address = addr; } else if (mce->error_type == MCE_ERROR_TYPE_UE) { mce->u.ue_error.effective_address_provided = true; mce->u.ue_error.effective_address = addr; @@ -240,6 +258,29 @@ void machine_check_print_event_info(struct machine_check_event *evt) "Parity", "Multihit", }; + static const char *mc_user_types[] = { + "Indeterminate", + "tlbie(l) invalid", + }; + static const char *mc_ra_types[] = { + "Indeterminate", + "Instruction fetch (bad)", + "Page table walk ifetch (bad)", + "Page table walk ifetch (foreign)", + "Load (bad)", + "Store (bad)", + "Page table walk Load/Store (bad)", + "Page table walk Load/Store (foreign)", + "Load/Store (foreign)", + }; + static const char *mc_link_types[] = { + "Indeterminate", + "Instruction fetch (timeout)", + "Page table walk ifetch (timeout)", + "Load (timeout)", + "Store (timeout)", + "Page table walk Load/Store (timeout)", + }; /* Print things out */ if (evt->version != MCE_V1) { @@ -316,6 +357,36 @@ void machine_check_print_event_info(struct machine_check_event *evt) printk("%s Effective address: %016llx\n", level, evt->u.tlb_error.effective_address); break; + case MCE_ERROR_TYPE_USER: + subtype = evt->u.user_error.user_error_type < + ARRAY_SIZE(mc_user_types) ? + mc_user_types[evt->u.user_error.user_error_type] + : "Unknown"; + printk("%s Error type: User [%s]\n", level, subtype); + if (evt->u.user_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.user_error.effective_address); + break; + case MCE_ERROR_TYPE_RA: + subtype = evt->u.ra_error.ra_error_type < + ARRAY_SIZE(mc_ra_types) ? + mc_ra_types[evt->u.ra_error.ra_error_type] + : "Unknown"; + printk("%s Error type: Real address [%s]\n", level, subtype); + if (evt->u.ra_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.ra_error.effective_address); + break; + case MCE_ERROR_TYPE_LINK: + subtype = evt->u.link_error.link_error_type < + ARRAY_SIZE(mc_link_types) ? + mc_link_types[evt->u.link_error.link_error_type] + : "Unknown"; + printk("%s Error type: Link [%s]\n", level, subtype); + if (evt->u.link_error.effective_address_provided) + printk("%s Effective address: %016llx\n", + level, evt->u.link_error.effective_address); + break; default: case MCE_ERROR_TYPE_UNKNOWN: printk("%s Error type: Unknown\n", level); @@ -342,6 +413,18 @@ uint64_t get_mce_fault_addr(struct machine_check_event *evt) if (evt->u.tlb_error.effective_address_provided) return evt->u.tlb_error.effective_address; break; + case MCE_ERROR_TYPE_USER: + if (evt->u.user_error.effective_address_provided) + return evt->u.user_error.effective_address; + break; + case MCE_ERROR_TYPE_RA: + if (evt->u.ra_error.effective_address_provided) + return evt->u.ra_error.effective_address; + break; + case MCE_ERROR_TYPE_LINK: + if (evt->u.link_error.effective_address_provided) + return evt->u.link_error.effective_address; + break; default: case MCE_ERROR_TYPE_UNKNOWN: break; diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c index c37fc5f..763d6f5 100644 --- a/arch/powerpc/kernel/mce_power.c +++ b/arch/powerpc/kernel/mce_power.c @@ -116,6 +116,51 @@ static void flush_and_reload_slb(void) } #endif +static void flush_erat(void) +{ + asm volatile(PPC_INVALIDATE_ERAT : : :"memory"); +} + +#define MCE_FLUSH_SLB 1 +#define MCE_FLUSH_TLB 2 +#define MCE_FLUSH_ERAT 3 + +static int mce_flush(int what) +{ +#ifdef CONFIG_PPC_STD_MMU_64 + if (what == MCE_FLUSH_SLB) { + flush_and_reload_slb(); + return 1; + } +#endif + if (what == MCE_FLUSH_ERAT) { + flush_erat(); + return 1; + } + if (what == MCE_FLUSH_TLB) { + if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { + cur_cpu_spec->flush_tlb(TLB_INVAL_SCOPE_GLOBAL); + return 1; + } + } + + return 0; +} + +static int mce_handle_flush_derrors(uint64_t dsisr, uint64_t slb, uint64_t tlb, uint64_t erat) +{ + if ((dsisr & slb) && mce_flush(MCE_FLUSH_SLB)) + dsisr &= ~slb; + if ((dsisr & erat) && mce_flush(MCE_FLUSH_ERAT)) + dsisr &= ~erat; + if ((dsisr & tlb) && mce_flush(MCE_FLUSH_TLB)) + dsisr &= ~tlb; + /* Any other errors we don't understand? */ + if (dsisr) + return 0; + return 1; +} + static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits) { long handled = 1; @@ -378,3 +423,189 @@ long __machine_check_early_realmode_p8(struct pt_regs *regs) save_mce_event(regs, handled, &mce_error_info, nip, addr); return handled; } + +static int mce_handle_derror_p9(struct pt_regs *regs) +{ + uint64_t dsisr = regs->dsisr; + + return mce_handle_flush_derrors(dsisr, + P9_DSISR_MC_SLB_PARITY_MFSLB | + P9_DSISR_MC_SLB_MULTIHIT_MFSLB, + + P9_DSISR_MC_TLB_MULTIHIT_MFTLB, + + P9_DSISR_MC_ERAT_MULTIHIT); +} + +static int mce_handle_ierror_p9(struct pt_regs *regs) +{ + uint64_t srr1 = regs->msr; + + switch (P9_SRR1_MC_IFETCH(srr1)) { + case P9_SRR1_MC_IFETCH_SLB_PARITY: + case P9_SRR1_MC_IFETCH_SLB_MULTIHIT: + return mce_flush(MCE_FLUSH_SLB); + case P9_SRR1_MC_IFETCH_TLB_MULTIHIT: + return mce_flush(MCE_FLUSH_TLB); + case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT: + return mce_flush(MCE_FLUSH_ERAT); + default: + return 0; + } +} + +static void mce_get_derror_p9(struct pt_regs *regs, + struct mce_error_info *mce_err, uint64_t *addr) +{ + uint64_t dsisr = regs->dsisr; + + mce_err->severity = MCE_SEV_ERROR_SYNC; + mce_err->initiator = MCE_INITIATOR_CPU; + + if (dsisr & P9_DSISR_MC_USER_TLBIE) + *addr = regs->nip; + else + *addr = regs->dar; + + if (dsisr & P9_DSISR_MC_UE) { + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_LOAD_STORE; + } else if (dsisr & P9_DSISR_MC_UE_TABLEWALK) { + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE; + } else if (dsisr & P9_DSISR_MC_LINK_LOAD_TIMEOUT) { + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_LOAD_TIMEOUT; + } else if (dsisr & P9_DSISR_MC_LINK_TABLEWALK_TIMEOUT) { + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_LOAD_STORE_TIMEOUT; + } else if (dsisr & P9_DSISR_MC_ERAT_MULTIHIT) { + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + } else if (dsisr & P9_DSISR_MC_TLB_MULTIHIT_MFTLB) { + mce_err->error_type = MCE_ERROR_TYPE_TLB; + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + } else if (dsisr & P9_DSISR_MC_USER_TLBIE) { + mce_err->error_type = MCE_ERROR_TYPE_USER; + mce_err->u.user_error_type = MCE_USER_ERROR_TLBIE; + } else if (dsisr & P9_DSISR_MC_SLB_PARITY_MFSLB) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; + } else if (dsisr & P9_DSISR_MC_SLB_MULTIHIT_MFSLB) { + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + } else if (dsisr & P9_DSISR_MC_RA_LOAD) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD; + } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE; + } else if (dsisr & P9_DSISR_MC_RA_TABLEWALK_FOREIGN) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_LOAD_STORE_FOREIGN; + } else if (dsisr & P9_DSISR_MC_RA_FOREIGN) { + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_LOAD_STORE_FOREIGN; + } +} + +static void mce_get_ierror_p9(struct pt_regs *regs, + struct mce_error_info *mce_err, uint64_t *addr) +{ + uint64_t srr1 = regs->msr; + + switch (P9_SRR1_MC_IFETCH(srr1)) { + case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE: + case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT: + mce_err->severity = MCE_SEV_FATAL; + break; + default: + mce_err->severity = MCE_SEV_ERROR_SYNC; + break; + } + + mce_err->initiator = MCE_INITIATOR_CPU; + + *addr = regs->nip; + + switch (P9_SRR1_MC_IFETCH(srr1)) { + case P9_SRR1_MC_IFETCH_UE: + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_IFETCH; + break; + case P9_SRR1_MC_IFETCH_SLB_PARITY: + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_PARITY; + break; + case P9_SRR1_MC_IFETCH_SLB_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_SLB; + mce_err->u.slb_error_type = MCE_SLB_ERROR_MULTIHIT; + break; + case P9_SRR1_MC_IFETCH_ERAT_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_ERAT; + mce_err->u.erat_error_type = MCE_ERAT_ERROR_MULTIHIT; + break; + case P9_SRR1_MC_IFETCH_TLB_MULTIHIT: + mce_err->error_type = MCE_ERROR_TYPE_TLB; + mce_err->u.tlb_error_type = MCE_TLB_ERROR_MULTIHIT; + break; + case P9_SRR1_MC_IFETCH_UE_TLB_RELOAD: + mce_err->error_type = MCE_ERROR_TYPE_UE; + mce_err->u.ue_error_type = MCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH; + break; + case P9_SRR1_MC_IFETCH_LINK_TIMEOUT: + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_IFETCH_TIMEOUT; + break; + case P9_SRR1_MC_IFETCH_LINK_TABLEWALK_TIMEOUT: + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_PAGE_TABLE_WALK_IFETCH_TIMEOUT; + break; + case P9_SRR1_MC_IFETCH_RA: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_IFETCH; + break; + case P9_SRR1_MC_IFETCH_RA_TABLEWALK: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH; + break; + case P9_SRR1_MC_IFETCH_RA_ASYNC_STORE: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_STORE; + break; + case P9_SRR1_MC_IFETCH_LINK_ASYNC_STORE_TIMEOUT: + mce_err->error_type = MCE_ERROR_TYPE_LINK; + mce_err->u.link_error_type = MCE_LINK_ERROR_STORE_TIMEOUT; + break; + case P9_SRR1_MC_IFETCH_RA_TABLEWALK_FOREIGN: + mce_err->error_type = MCE_ERROR_TYPE_RA; + mce_err->u.ra_error_type = MCE_RA_ERROR_PAGE_TABLE_WALK_IFETCH_FOREIGN; + break; + default: + break; + } +} + +long __machine_check_early_realmode_p9(struct pt_regs *regs) +{ + uint64_t nip, addr; + long handled; + struct mce_error_info mce_error_info = { 0 }; + + nip = regs->nip; + + if (P9_SRR1_MC_LOADSTORE(regs->msr)) { + handled = mce_handle_derror_p9(regs); + mce_get_derror_p9(regs, &mce_error_info, &addr); + } else { + handled = mce_handle_ierror_p9(regs); + mce_get_ierror_p9(regs, &mce_error_info, &addr); + } + + /* Handle UE error. */ + if (mce_error_info.error_type == MCE_ERROR_TYPE_UE) + handled = mce_handle_ue_error(regs); + + save_mce_event(regs, handled, &mce_error_info, nip, addr); + return handled; +} -- cgit v1.1 From a1016e94cce9fb6ea56d7602263783e2d95d6e92 Mon Sep 17 00:00:00 2001 From: Russell King Date: Thu, 9 Mar 2017 17:14:32 +0000 Subject: ARM: wire up statx syscall Wire up the new statx syscall for ARM. Signed-off-by: Russell King --- arch/arm/tools/syscall.tbl | 1 + 1 file changed, 1 insertion(+) (limited to 'arch') diff --git a/arch/arm/tools/syscall.tbl b/arch/arm/tools/syscall.tbl index 3c2cb5d..0bb0e9c 100644 --- a/arch/arm/tools/syscall.tbl +++ b/arch/arm/tools/syscall.tbl @@ -411,3 +411,4 @@ 394 common pkey_mprotect sys_pkey_mprotect 395 common pkey_alloc sys_pkey_alloc 396 common pkey_free sys_pkey_free +397 common statx sys_statx -- cgit v1.1 From bba8376aea1dcbbe22bbda118c52abee317c7609 Mon Sep 17 00:00:00 2001 From: Matjaz Hegedic Date: Thu, 9 Mar 2017 14:00:17 +0100 Subject: x86/reboot/quirks: Fix typo in ASUS EeeBook X205TA reboot quirk The reboot quirk for ASUS EeeBook X205TA contains a typo in DMI_PRODUCT_NAME, improperly referring to X205TAW instead of X205TA, which prevents the quirk from being triggered. The model X205TAW already has a reboot quirk of its own. This fix simply removes the inappropriate final letter W. Fixes: 90b28ded88dd ("x86/reboot/quirks: Add ASUS EeeBook X205TA reboot quirk") Signed-off-by: Matjaz Hegedic Link: http://lkml.kernel.org/r/1489064417-7445-1-git-send-email-matjaz.hegedic@gmail.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/reboot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 4194d6f..067f981 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -228,7 +228,7 @@ static struct dmi_system_id __initdata reboot_dmi_table[] = { .ident = "ASUS EeeBook X205TA", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), - DMI_MATCH(DMI_PRODUCT_NAME, "X205TAW"), + DMI_MATCH(DMI_PRODUCT_NAME, "X205TA"), }, }, { /* Handle problems with rebooting on ASUS EeeBook X205TAW */ -- cgit v1.1 From cb6950b7152fb3760942f9cb16bd2a35e5a1bfd1 Mon Sep 17 00:00:00 2001 From: "Naveen N. Rao" Date: Tue, 7 Mar 2017 00:34:57 +0530 Subject: arm64: kprobes: remove kprobe_exceptions_notify Commit fc62d0207ae0 ("kprobes: Introduce weak variant of kprobe_exceptions_notify()") introduces a generic empty version of the function for architectures that don't need special handling, like arm64. As such, remove the arch/arm64/ specific handler. Signed-off-by: Naveen N. Rao Signed-off-by: Will Deacon --- arch/arm64/kernel/probes/kprobes.c | 6 ------ 1 file changed, 6 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c index 2a07aae..c5c4594 100644 --- a/arch/arm64/kernel/probes/kprobes.c +++ b/arch/arm64/kernel/probes/kprobes.c @@ -372,12 +372,6 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned int fsr) return 0; } -int __kprobes kprobe_exceptions_notify(struct notifier_block *self, - unsigned long val, void *data) -{ - return NOTIFY_DONE; -} - static void __kprobes kprobe_handler(struct pt_regs *regs) { struct kprobe *p, *cur_kprobe; -- cgit v1.1 From b0de0ccc8b9edd8846828e0ecdc35deacdf186b0 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Mon, 6 Mar 2017 19:06:40 +0000 Subject: arm64: kasan: avoid bad virt_to_pfn() Booting a v4.11-rc1 kernel with DEBUG_VIRTUAL and KASAN enabled produces the following splat (trimmed for brevity): [ 0.000000] virt_to_phys used for non-linear address: ffff200008080000 (0xffff200008080000) [ 0.000000] WARNING: CPU: 0 PID: 0 at arch/arm64/mm/physaddr.c:14 __virt_to_phys+0x48/0x70 [ 0.000000] PC is at __virt_to_phys+0x48/0x70 [ 0.000000] LR is at __virt_to_phys+0x48/0x70 [ 0.000000] Call trace: [ 0.000000] [] __virt_to_phys+0x48/0x70 [ 0.000000] [] kasan_init+0x1c0/0x498 [ 0.000000] [] setup_arch+0x2fc/0x948 [ 0.000000] [] start_kernel+0xb8/0x570 [ 0.000000] [] __primary_switched+0x6c/0x74 This is because we use virt_to_pfn() on a kernel image address when trying to figure out its nid, so that we can allocate its shadow from the same node. As with other recent changes, this patch uses lm_alias() to solve this. We could instead use NUMA_NO_NODE, as x86 does for all shadow allocations, though we'll likely want the "real" memory shadow to be backed from its corresponding nid anyway, so we may as well be consistent and find the nid for the image shadow. Cc: Catalin Marinas Cc: Will Deacon Acked-by: Laura Abbott Signed-off-by: Mark Rutland Signed-off-by: Will Deacon --- arch/arm64/mm/kasan_init.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c index 55d1e92..687a358 100644 --- a/arch/arm64/mm/kasan_init.c +++ b/arch/arm64/mm/kasan_init.c @@ -162,7 +162,7 @@ void __init kasan_init(void) clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); vmemmap_populate(kimg_shadow_start, kimg_shadow_end, - pfn_to_nid(virt_to_pfn(_text))); + pfn_to_nid(virt_to_pfn(lm_alias(_text)))); /* * vmemmap_populate() has populated the shadow region that covers the -- cgit v1.1 From 5c2a625937ba49bc691089370638223d310cda9a Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Wed, 8 Mar 2017 16:27:04 -0800 Subject: arm64: support keyctl() system call in 32-bit mode As is the case for a number of other architectures that have a 32-bit compat mode, enable KEYS_COMPAT if both COMPAT and KEYS are enabled. This allows AArch32 programs to use the keyctl() system call when running on an AArch64 kernel. Signed-off-by: Eric Biggers Signed-off-by: Will Deacon --- arch/arm64/Kconfig | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a39029b..f21e9a7 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -1063,6 +1063,10 @@ config SYSVIPC_COMPAT def_bool y depends on COMPAT && SYSVIPC +config KEYS_COMPAT + def_bool y + depends on COMPAT && KEYS + endmenu menu "Power management options" -- cgit v1.1 From 14088540ad63c648e5cdf490412033f792d16b6b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Fri, 10 Mar 2017 17:44:18 +0000 Subject: arm64: use const cap for system_uses_ttbr0_pan() Since commit 4b65a5db362783ab ("arm64: Introduce uaccess_{disable,enable} functionality based on TTBR0_EL1"), system_uses_ttbr0_pan() has used cpus_have_cap() to determine whether PAN is present. Since commit a4023f682739439b ("arm64: Add hypervisor safe helper for checking constant capabilities"), which was introduced around the same time, cpus_have_cap() doesn't try to use a static key, and must always perform a load, test, and consitional branch (likely a tbnz for the latter two). Elsewhere, we moved to using cpus_have_const_cap(), which can use a static key (i.e. a non-conditional branch), which is patched at runtime when the feature is detected. This patch makes system_uses_ttbr0_pan() use cpus_have_const_cap(). The static key is likely a win for hot-paths like the uacccess primitives, and this makes our usage consistent regardless. Signed-off-by: Mark Rutland Reviewed-by: Suzuki K Poulose Cc: Catalin Marinas Cc: Will Deacon Signed-off-by: Will Deacon --- arch/arm64/include/asm/cpufeature.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h index 05310ad..f31c48d 100644 --- a/arch/arm64/include/asm/cpufeature.h +++ b/arch/arm64/include/asm/cpufeature.h @@ -251,7 +251,7 @@ static inline bool system_supports_fpsimd(void) static inline bool system_uses_ttbr0_pan(void) { return IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN) && - !cpus_have_cap(ARM64_HAS_PAN); + !cpus_have_const_cap(ARM64_HAS_PAN); } #endif /* __ASSEMBLY__ */ -- cgit v1.1 From 0e4c0e6ea7d4a988a5ae2791c7cb5769b5256dad Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Fri, 17 Feb 2017 15:25:08 +0100 Subject: arm64: kernel: Update kerneldoc for cpu_suspend() rename Commit af391b15f7b56ce1 ("arm64: kernel: rename __cpu_suspend to keep it aligned with arm") renamed cpu_suspend() to arm_cpuidle_suspend(), but forgot to update the kerneldoc header. Fixes: af391b15f7b56ce1 ("arm64: kernel: rename __cpu_suspend to keep it aligned with arm") Signed-off-by: Geert Uytterhoeven Signed-off-by: Will Deacon --- arch/arm64/kernel/cpuidle.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/kernel/cpuidle.c b/arch/arm64/kernel/cpuidle.c index 75a0f8a..fd69108 100644 --- a/arch/arm64/kernel/cpuidle.c +++ b/arch/arm64/kernel/cpuidle.c @@ -30,7 +30,7 @@ int arm_cpuidle_init(unsigned int cpu) } /** - * cpu_suspend() - function to enter a low-power idle state + * arm_cpuidle_suspend() - function to enter a low-power idle state * @arg: argument to pass to CPU suspend operations * * Return: 0 on success, -EOPNOTSUPP if CPU suspend hook not initialized, CPU -- cgit v1.1 From 40c50c1fecdf012a3bf055ec813f0ef2eda2749c Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Fri, 10 Mar 2017 13:17:18 +0100 Subject: kexec, x86/purgatory: Unbreak it and clean it up The purgatory code defines global variables which are referenced via a symbol lookup in the kexec code (core and arch). A recent commit addressing sparse warnings made these static and thereby broke kexec_file. Why did this happen? Simply because the whole machinery is undocumented and lacks any form of forward declarations. The variable names are unspecific and lack a prefix, so adding forward declarations creates shadow variables in the core code. Aside of that the code relies on magic constants and duplicate struct definitions with no way to ensure that these things stay in sync. The section placement of the purgatory variables happened by chance and not by design. Unbreak kexec and cleanup the mess: - Add proper forward declarations and document the usage - Use common struct definition - Use the proper common defines instead of magic constants - Add a purgatory_ prefix to have a proper name space - Use ARRAY_SIZE() instead of a homebrewn reimplementation - Add proper sections to the purgatory variables [ From Mike ] Fixes: 72042a8c7b01 ("x86/purgatory: Make functions and variables static") Reported-by: Mike Galbraith < Signed-off-by: Thomas Gleixner Cc: Nicholas Mc Guire Cc: Borislav Petkov Cc: Vivek Goyal Cc: "Tobin C. Harding" Link: http://lkml.kernel.org/r/alpine.DEB.2.20.1703101315140.3681@nanos Signed-off-by: Thomas Gleixner --- arch/powerpc/purgatory/trampoline.S | 12 ++++++------ arch/x86/include/asm/purgatory.h | 20 ++++++++++++++++++++ arch/x86/kernel/machine_kexec_64.c | 9 ++++++--- arch/x86/purgatory/purgatory.c | 35 +++++++++++++++++------------------ arch/x86/purgatory/purgatory.h | 8 -------- arch/x86/purgatory/setup-x86_64.S | 2 +- arch/x86/purgatory/sha256.h | 1 - 7 files changed, 50 insertions(+), 37 deletions(-) create mode 100644 arch/x86/include/asm/purgatory.h delete mode 100644 arch/x86/purgatory/purgatory.h (limited to 'arch') diff --git a/arch/powerpc/purgatory/trampoline.S b/arch/powerpc/purgatory/trampoline.S index f9760cc..3696ea6 100644 --- a/arch/powerpc/purgatory/trampoline.S +++ b/arch/powerpc/purgatory/trampoline.S @@ -116,13 +116,13 @@ dt_offset: .data .balign 8 -.globl sha256_digest -sha256_digest: +.globl purgatory_sha256_digest +purgatory_sha256_digest: .skip 32 - .size sha256_digest, . - sha256_digest + .size purgatory_sha256_digest, . - purgatory_sha256_digest .balign 8 -.globl sha_regions -sha_regions: +.globl purgatory_sha_regions +purgatory_sha_regions: .skip 8 * 2 * 16 - .size sha_regions, . - sha_regions + .size purgatory_sha_regions, . - purgatory_sha_regions diff --git a/arch/x86/include/asm/purgatory.h b/arch/x86/include/asm/purgatory.h new file mode 100644 index 0000000..d7da272 --- /dev/null +++ b/arch/x86/include/asm/purgatory.h @@ -0,0 +1,20 @@ +#ifndef _ASM_X86_PURGATORY_H +#define _ASM_X86_PURGATORY_H + +#ifndef __ASSEMBLY__ +#include + +extern void purgatory(void); +/* + * These forward declarations serve two purposes: + * + * 1) Make sparse happy when checking arch/purgatory + * 2) Document that these are required to be global so the symbol + * lookup in kexec works + */ +extern unsigned long purgatory_backup_dest; +extern unsigned long purgatory_backup_src; +extern unsigned long purgatory_backup_sz; +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_PURGATORY_H */ diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c index 307b1f4..857cdbd 100644 --- a/arch/x86/kernel/machine_kexec_64.c +++ b/arch/x86/kernel/machine_kexec_64.c @@ -194,19 +194,22 @@ static int arch_update_purgatory(struct kimage *image) /* Setup copying of backup region */ if (image->type == KEXEC_TYPE_CRASH) { - ret = kexec_purgatory_get_set_symbol(image, "backup_dest", + ret = kexec_purgatory_get_set_symbol(image, + "purgatory_backup_dest", &image->arch.backup_load_addr, sizeof(image->arch.backup_load_addr), 0); if (ret) return ret; - ret = kexec_purgatory_get_set_symbol(image, "backup_src", + ret = kexec_purgatory_get_set_symbol(image, + "purgatory_backup_src", &image->arch.backup_src_start, sizeof(image->arch.backup_src_start), 0); if (ret) return ret; - ret = kexec_purgatory_get_set_symbol(image, "backup_sz", + ret = kexec_purgatory_get_set_symbol(image, + "purgatory_backup_sz", &image->arch.backup_src_sz, sizeof(image->arch.backup_src_sz), 0); if (ret) diff --git a/arch/x86/purgatory/purgatory.c b/arch/x86/purgatory/purgatory.c index b6d5c89..470edad 100644 --- a/arch/x86/purgatory/purgatory.c +++ b/arch/x86/purgatory/purgatory.c @@ -10,22 +10,19 @@ * Version 2. See the file COPYING for more details. */ +#include +#include + #include "sha256.h" -#include "purgatory.h" #include "../boot/string.h" -struct sha_region { - unsigned long start; - unsigned long len; -}; - -static unsigned long backup_dest; -static unsigned long backup_src; -static unsigned long backup_sz; +unsigned long purgatory_backup_dest __section(.kexec-purgatory); +unsigned long purgatory_backup_src __section(.kexec-purgatory); +unsigned long purgatory_backup_sz __section(.kexec-purgatory); -static u8 sha256_digest[SHA256_DIGEST_SIZE] = { 0 }; +u8 purgatory_sha256_digest[SHA256_DIGEST_SIZE] __section(.kexec-purgatory); -struct sha_region sha_regions[16] = {}; +struct kexec_sha_region purgatory_sha_regions[KEXEC_SEGMENT_MAX] __section(.kexec-purgatory); /* * On x86, second kernel requries first 640K of memory to boot. Copy @@ -34,26 +31,28 @@ struct sha_region sha_regions[16] = {}; */ static int copy_backup_region(void) { - if (backup_dest) - memcpy((void *)backup_dest, (void *)backup_src, backup_sz); - + if (purgatory_backup_dest) { + memcpy((void *)purgatory_backup_dest, + (void *)purgatory_backup_src, purgatory_backup_sz); + } return 0; } static int verify_sha256_digest(void) { - struct sha_region *ptr, *end; + struct kexec_sha_region *ptr, *end; u8 digest[SHA256_DIGEST_SIZE]; struct sha256_state sctx; sha256_init(&sctx); - end = &sha_regions[sizeof(sha_regions)/sizeof(sha_regions[0])]; - for (ptr = sha_regions; ptr < end; ptr++) + end = purgatory_sha_regions + ARRAY_SIZE(purgatory_sha_regions); + + for (ptr = purgatory_sha_regions; ptr < end; ptr++) sha256_update(&sctx, (uint8_t *)(ptr->start), ptr->len); sha256_final(&sctx, digest); - if (memcmp(digest, sha256_digest, sizeof(digest))) + if (memcmp(digest, purgatory_sha256_digest, sizeof(digest))) return 1; return 0; diff --git a/arch/x86/purgatory/purgatory.h b/arch/x86/purgatory/purgatory.h deleted file mode 100644 index e2e365a..0000000 --- a/arch/x86/purgatory/purgatory.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef PURGATORY_H -#define PURGATORY_H - -#ifndef __ASSEMBLY__ -extern void purgatory(void); -#endif /* __ASSEMBLY__ */ - -#endif /* PURGATORY_H */ diff --git a/arch/x86/purgatory/setup-x86_64.S b/arch/x86/purgatory/setup-x86_64.S index f90e9df..dfae9b9 100644 --- a/arch/x86/purgatory/setup-x86_64.S +++ b/arch/x86/purgatory/setup-x86_64.S @@ -9,7 +9,7 @@ * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ -#include "purgatory.h" +#include .text .globl purgatory_start diff --git a/arch/x86/purgatory/sha256.h b/arch/x86/purgatory/sha256.h index bd15a41..2867d98 100644 --- a/arch/x86/purgatory/sha256.h +++ b/arch/x86/purgatory/sha256.h @@ -10,7 +10,6 @@ #ifndef SHA256_H #define SHA256_H - #include #include -- cgit v1.1 From c962cff17dfa11f4a8227ac16de2b28aea3312e4 Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 3 Mar 2017 16:02:23 +0800 Subject: Revert "x86/acpi: Set persistent cpuid <-> nodeid mapping when booting" Revert: dc6db24d2476 ("x86/acpi: Set persistent cpuid <-> nodeid mapping when booting") The mapping of "cpuid <-> nodeid" is established at boot time via ACPI tables to keep associations of workqueues and other node related items consistent across cpu hotplug. But, ACPI tables are unreliable and failures with that boot time mapping have been reported on machines where the ACPI table and the physical information which is retrieved at actual hotplug is inconsistent. Revert the mapping implementation so it can be replaced with a less error prone approach. Signed-off-by: Dou Liyang Tested-by: Xiaolong Ye Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: guzheng1@huawei.com Cc: izumi.taku@jp.fujitsu.com Cc: lenb@kernel.org Link: http://lkml.kernel.org/r/1488528147-2279-2-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/acpi/boot.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index ae32838..f6b0e87 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -710,7 +710,7 @@ static void __init acpi_set_irq_model_ioapic(void) #ifdef CONFIG_ACPI_HOTPLUG_CPU #include -int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) +static int acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) { #ifdef CONFIG_ACPI_NUMA int nid; -- cgit v1.1 From 2b85b3d22920db7473e5fed5719e7955c0ec323e Mon Sep 17 00:00:00 2001 From: Dou Liyang Date: Fri, 3 Mar 2017 16:02:25 +0800 Subject: x86/acpi: Restore the order of CPU IDs The following commits: f7c28833c2 ("x86/acpi: Enable acpi to register all possible cpus at boot time") and 8f54969dc8 ("x86/acpi: Introduce persistent storage for cpuid <-> apicid mapping") ... registered all the possible CPUs at boot time via ACPI tables to make the mapping of cpuid <-> apicid fixed. Both enabled and disabled CPUs could have a logical CPU ID after boot time. But, ACPI tables are unreliable. the number amd order of Local APIC entries which depends on the firmware is often inconsistent with the physical devices. Even if they are consistent, The disabled CPUs which take up some logical CPU IDs will also make the order discontinuous. Revert the part of disabled CPUs registration, keep the allocation logic of logical CPU IDs and also keep some code location changes. Signed-off-by: Dou Liyang Tested-by: Xiaolong Ye Cc: rjw@rjwysocki.net Cc: linux-acpi@vger.kernel.org Cc: guzheng1@huawei.com Cc: izumi.taku@jp.fujitsu.com Cc: lenb@kernel.org Link: http://lkml.kernel.org/r/1488528147-2279-4-git-send-email-douly.fnst@cn.fujitsu.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/acpi/boot.c | 7 ++++++- arch/x86/kernel/apic/apic.c | 26 +++++++------------------- 2 files changed, 13 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index f6b0e87..b2879cc23 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -179,10 +179,15 @@ static int acpi_register_lapic(int id, u32 acpiid, u8 enabled) return -EINVAL; } + if (!enabled) { + ++disabled_cpus; + return -EINVAL; + } + if (boot_cpu_physical_apicid != -1U) ver = boot_cpu_apic_version; - cpu = __generic_processor_info(id, ver, enabled); + cpu = generic_processor_info(id, ver); if (cpu >= 0) early_per_cpu(x86_cpu_to_acpiid, cpu) = acpiid; diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index aee7ded..8ccb7ef 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c @@ -2063,7 +2063,7 @@ static int allocate_logical_cpuid(int apicid) return nr_logical_cpuids++; } -int __generic_processor_info(int apicid, int version, bool enabled) +int generic_processor_info(int apicid, int version) { int cpu, max = nr_cpu_ids; bool boot_cpu_detected = physid_isset(boot_cpu_physical_apicid, @@ -2121,11 +2121,9 @@ int __generic_processor_info(int apicid, int version, bool enabled) if (num_processors >= nr_cpu_ids) { int thiscpu = max + disabled_cpus; - if (enabled) { - pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " - "reached. Processor %d/0x%x ignored.\n", - max, thiscpu, apicid); - } + pr_warning("APIC: NR_CPUS/possible_cpus limit of %i " + "reached. Processor %d/0x%x ignored.\n", + max, thiscpu, apicid); disabled_cpus++; return -EINVAL; @@ -2177,23 +2175,13 @@ int __generic_processor_info(int apicid, int version, bool enabled) apic->x86_32_early_logical_apicid(cpu); #endif set_cpu_possible(cpu, true); - - if (enabled) { - num_processors++; - physid_set(apicid, phys_cpu_present_map); - set_cpu_present(cpu, true); - } else { - disabled_cpus++; - } + physid_set(apicid, phys_cpu_present_map); + set_cpu_present(cpu, true); + num_processors++; return cpu; } -int generic_processor_info(int apicid, int version) -{ - return __generic_processor_info(apicid, version, true); -} - int hard_smp_processor_id(void) { return read_apic_id(); -- cgit v1.1 From 0acf611997d9d05dbfb559c3c6e379c861eb5957 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Wed, 22 Feb 2017 11:07:57 -0800 Subject: score: Fix implicit includes now failing build after extable change After changing from module.h to extable.h, score builds fail with: arch/score/kernel/traps.c: In function 'do_ri': arch/score/kernel/traps.c:248:4: error: implicit declaration of function 'user_disable_single_step' arch/score/mm/extable.c: In function 'fixup_exception': arch/score/mm/extable.c:32:38: error: dereferencing pointer to incomplete type arch/score/mm/extable.c:34:24: error: dereferencing pointer to incomplete type because extable.h doesn't drag in the same amount of headers as the module.h did. Add in the headers which were implicitly expected. Fixes: 90858794c960 ("module.h: remove extable.h include now users have migrated") Signed-off-by: Guenter Roeck [PG: tweak commit log; refresh for sched header refactoring.] Signed-off-by: Paul Gortmaker --- arch/score/kernel/traps.c | 1 + arch/score/mm/extable.c | 2 ++ 2 files changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/score/kernel/traps.c b/arch/score/kernel/traps.c index e359ec6..12daf45 100644 --- a/arch/score/kernel/traps.c +++ b/arch/score/kernel/traps.c @@ -24,6 +24,7 @@ */ #include +#include #include #include #include diff --git a/arch/score/mm/extable.c b/arch/score/mm/extable.c index ec87135..6736a3a 100644 --- a/arch/score/mm/extable.c +++ b/arch/score/mm/extable.c @@ -24,6 +24,8 @@ */ #include +#include +#include int fixup_exception(struct pt_regs *regs) { -- cgit v1.1 From 2c4ea6e28dbf15ab93632c5c189f3948366b8885 Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Sat, 11 Mar 2017 01:31:19 +0100 Subject: x86/tlb: Fix tlb flushing when lguest clears PGE Fengguang reported random corruptions from various locations on x86-32 after commits d2852a224050 ("arch: add ARCH_HAS_SET_MEMORY config") and 9d876e79df6a ("bpf: fix unlocking of jited image when module ronx not set") that uses the former. While x86-32 doesn't have a JIT like x86_64, the bpf_prog_lock_ro() and bpf_prog_unlock_ro() got enabled due to ARCH_HAS_SET_MEMORY, whereas Fengguang's test kernel doesn't have module support built in and therefore never had the DEBUG_SET_MODULE_RONX setting enabled. After investigating the crashes further, it turned out that using set_memory_ro() and set_memory_rw() didn't have the desired effect, for example, setting the pages as read-only on x86-32 would still let probe_kernel_write() succeed without error. This behavior would manifest itself in situations where the vmalloc'ed buffer was accessed prior to set_memory_*() such as in case of bpf_prog_alloc(). In cases where it wasn't, the page attribute changes seemed to have taken effect, leading to the conclusion that a TLB invalidate didn't happen. Moreover, it turned out that this issue reproduced with qemu in "-cpu kvm64" mode, but not for "-cpu host". When the issue occurs, change_page_attr_set_clr() did trigger a TLB flush as expected via __flush_tlb_all() through cpa_flush_range(), though. There are 3 variants for issuing a TLB flush: invpcid_flush_all() (depends on CPU feature bits X86_FEATURE_INVPCID, X86_FEATURE_PGE), cr4 based flush (depends on X86_FEATURE_PGE), and cr3 based flush. For "-cpu host" case in my setup, the flush used invpcid_flush_all() variant, whereas for "-cpu kvm64", the flush was cr4 based. Switching the kvm64 case to cr3 manually worked fine, and further investigating the cr4 one turned out that X86_CR4_PGE bit was not set in cr4 register, meaning the __native_flush_tlb_global_irq_disabled() wrote cr4 twice with the same value instead of clearing X86_CR4_PGE in the first write to trigger the flush. It turned out that X86_CR4_PGE was cleared from cr4 during init from lguest_arch_host_init() via adjust_pge(). The X86_FEATURE_PGE bit is also cleared from there due to concerns of using PGE in guest kernel that can lead to hard to trace bugs (see bff672e630a0 ("lguest: documentation V: Host") in init()). The CPU feature bits are cleared in dynamic boot_cpu_data, but they never propagated to __flush_tlb_all() as it uses static_cpu_has() instead of boot_cpu_has() for testing which variant of TLB flushing to use, meaning they still used the old setting of the host kernel. Clearing via setup_clear_cpu_cap(X86_FEATURE_PGE) so this would propagate to static_cpu_has() checks is too late at this point as sections have been patched already, so for now, it seems reasonable to switch back to boot_cpu_has(X86_FEATURE_PGE) as it was prior to commit c109bf95992b ("x86/cpufeature: Remove cpu_has_pge"). This lets the TLB flush trigger via cr3 as originally intended, properly makes the new page attributes visible and thus fixes the crashes seen by Fengguang. Fixes: c109bf95992b ("x86/cpufeature: Remove cpu_has_pge") Reported-by: Fengguang Wu Signed-off-by: Daniel Borkmann Cc: bp@suse.de Cc: Kees Cook Cc: "David S. Miller" Cc: netdev@vger.kernel.org Cc: Rusty Russell Cc: Alexei Starovoitov Cc: Linus Torvalds Cc: lkp@01.org Cc: Laura Abbott Cc: stable@vger.kernel.org Link: http://lkml.kernrl.org/r/20170301125426.l4nf65rx4wahohyl@wfg-t540p.sh.intel.com Link: http://lkml.kernel.org/r/25c41ad9eca164be4db9ad84f768965b7eb19d9e.1489191673.git.daniel@iogearbox.net Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/tlbflush.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h index 6fa8594..fc5abff 100644 --- a/arch/x86/include/asm/tlbflush.h +++ b/arch/x86/include/asm/tlbflush.h @@ -188,7 +188,7 @@ static inline void __native_flush_tlb_single(unsigned long addr) static inline void __flush_tlb_all(void) { - if (static_cpu_has(X86_FEATURE_PGE)) + if (boot_cpu_has(X86_FEATURE_PGE)) __flush_tlb_global(); else __flush_tlb(); -- cgit v1.1 From 80354c29025833acd72ddac1ffa21c6cb50128cd Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Sun, 12 Mar 2017 17:07:44 +0200 Subject: x86/platform/intel-mid: Correct MSI IRQ line for watchdog device The interrupt line used for the watchdog is 12, according to the official Intel Edison BSP code. And indeed after fixing it we start getting an interrupt and thus the watchdog starts working again: [ 191.699951] Kernel panic - not syncing: Kernel Watchdog Signed-off-by: Andy Shevchenko Cc: Borislav Petkov Cc: David Cohen Cc: H. Peter Anvin Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Fixes: 78a3bb9e408b ("x86: intel-mid: add watchdog platform code for Merrifield") Link: http://lkml.kernel.org/r/20170312150744.45493-1-andriy.shevchenko@linux.intel.com Signed-off-by: Ingo Molnar --- arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c index 86edd1e9..9e304e2 100644 --- a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_wdt.c @@ -19,7 +19,7 @@ #include #include -#define TANGIER_EXT_TIMER0_MSI 15 +#define TANGIER_EXT_TIMER0_MSI 12 static struct platform_device wdt_dev = { .name = "intel_mid_wdt", -- cgit v1.1 From 44fee88cea43d3c2cac962e0439cb10a3cabff6d Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 13 Mar 2017 15:57:12 +0100 Subject: x86/tsc: Fix ART for TSC_KNOWN_FREQ Subhransu reported that convert_art_to_tsc() isn't working for him. The ART to TSC relation is only set up for systems which use the refined TSC calibration. Systems with known TSC frequency (available via CPUID 15) are not using the refined calibration and therefor the ART to TSC relation is never established. Add the setup to the known frequency init path which skips ART calibration. The init code needs to be duplicated as for systems which use refined calibration the ART setup must be delayed until calibration has been done. The problem has been there since the ART support was introdduced, but only detected now because Subhransu tested the first time on hardware which has TSC frequency enumerated via CPUID 15. Note for stable: The conditional has changed from TSC_RELIABLE to TSC_KNOWN_FREQUENCY. [ tglx: Rewrote changelog and identified the proper 'Fixes' commit ] Fixes: f9677e0f8308 ("x86/tsc: Always Running Timer (ART) correlated clocksource") Reported-by: "Prusty, Subhransu S" Signed-off-by: Peter Zijlstra (Intel) Cc: stable@vger.kernel.org Cc: christopher.s.hall@intel.com Cc: kevin.b.stanton@intel.com Cc: john.stultz@linaro.org Cc: akataria@vmware.com Link: http://lkml.kernel.org/r/20170313145712.GI3312@twins.programming.kicks-ass.net Signed-off-by: Thomas Gleixner --- arch/x86/kernel/tsc.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 4f7a983..c73a7f9 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c @@ -1333,6 +1333,8 @@ static int __init init_tsc_clocksource(void) * the refined calibration and directly register it as a clocksource. */ if (boot_cpu_has(X86_FEATURE_TSC_KNOWN_FREQ)) { + if (boot_cpu_has(X86_FEATURE_ART)) + art_related_clocksource = &clocksource_tsc; clocksource_register_khz(&clocksource_tsc, tsc_khz); return 0; } -- cgit v1.1 From 0d443b70cc92d741cbc1dcbf1079897b3d8bc3cc Mon Sep 17 00:00:00 2001 From: Mike Travis Date: Tue, 7 Mar 2017 15:08:42 -0600 Subject: x86/platform: Remove warning message for duplicate NMI handlers Remove the WARNING message associated with multiple NMI handlers as there are at least two that are legitimate. These are the KGDB and the UV handlers and both want to be called if the NMI has not been claimed by any other NMI handler. Use of the UNKNOWN NMI call chain dramatically lowers the NMI call rate when high frequency NMI tools are in use, notably the perf tools. It is required on systems that cannot sustain a high NMI call rate without adversely affecting the system operation. Signed-off-by: Mike Travis Reviewed-by: Dimitri Sivanich Cc: Don Zickus Cc: Peter Zijlstra Cc: Russ Anderson Cc: Frank Ramsay Cc: Tony Ernst Link: http://lkml.kernel.org/r/20170307210841.730959611@asylum.americas.sgi.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/nmi.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c index f088ea4..a723ae94 100644 --- a/arch/x86/kernel/nmi.c +++ b/arch/x86/kernel/nmi.c @@ -166,11 +166,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action) spin_lock_irqsave(&desc->lock, flags); /* - * most handlers of type NMI_UNKNOWN never return because - * they just assume the NMI is theirs. Just a sanity check - * to manage expectations + * Indicate if there are multiple registrations on the + * internal NMI handler call chains (SERR and IO_CHECK). */ - WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); -- cgit v1.1 From c836e5cf0d0880d6668966476c4ffe727f29f69a Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 8 Mar 2017 13:24:21 +0200 Subject: x86/platform/intel-mid: Use common power off sequence Intel Medfield may use common for Intel MID devices power sequence. Remove unneded custom power off stub. While here, remove function forward declaration. Signed-off-by: Andy Shevchenko Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20170308112422.67533-1-andriy.shevchenko@linux.intel.com Signed-off-by: Thomas Gleixner --- arch/x86/platform/intel-mid/mfld.c | 15 ++++----------- 1 file changed, 4 insertions(+), 11 deletions(-) (limited to 'arch') diff --git a/arch/x86/platform/intel-mid/mfld.c b/arch/x86/platform/intel-mid/mfld.c index e793fe5..e42978d 100644 --- a/arch/x86/platform/intel-mid/mfld.c +++ b/arch/x86/platform/intel-mid/mfld.c @@ -17,16 +17,6 @@ #include "intel_mid_weak_decls.h" -static void penwell_arch_setup(void); -/* penwell arch ops */ -static struct intel_mid_ops penwell_ops = { - .arch_setup = penwell_arch_setup, -}; - -static void mfld_power_off(void) -{ -} - static unsigned long __init mfld_calibrate_tsc(void) { unsigned long fast_calibrate; @@ -63,9 +53,12 @@ static unsigned long __init mfld_calibrate_tsc(void) static void __init penwell_arch_setup(void) { x86_platform.calibrate_tsc = mfld_calibrate_tsc; - pm_power_off = mfld_power_off; } +static struct intel_mid_ops penwell_ops = { + .arch_setup = penwell_arch_setup, +}; + void *get_penwell_ops(void) { return &penwell_ops; -- cgit v1.1 From 859bb6d59066b96341020e0991f191631cabe59d Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 8 Mar 2017 13:24:22 +0200 Subject: x86/platform/intel-mid: Add power button support for Merrifield Intel Merrifield platform has a Basin Cove PMIC to handle in particular power button events. Add necessary bits to enable it. Signed-off-by: Andy Shevchenko Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/20170308112422.67533-2-andriy.shevchenko@linux.intel.com Signed-off-by: Thomas Gleixner --- arch/x86/platform/intel-mid/device_libs/Makefile | 1 + .../device_libs/platform_mrfld_power_btn.c | 82 ++++++++++++++++++++++ 2 files changed, 83 insertions(+) create mode 100644 arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c (limited to 'arch') diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile index a7dbec4..3dbde04 100644 --- a/arch/x86/platform/intel-mid/device_libs/Makefile +++ b/arch/x86/platform/intel-mid/device_libs/Makefile @@ -26,5 +26,6 @@ obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_pcal9555a.o obj-$(subst m,y,$(CONFIG_GPIO_PCA953X)) += platform_tca6416.o # MISC Devices obj-$(subst m,y,$(CONFIG_KEYBOARD_GPIO)) += platform_gpio_keys.o +obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_mrfld_power_btn.o obj-$(subst m,y,$(CONFIG_RTC_DRV_CMOS)) += platform_mrfld_rtc.o obj-$(subst m,y,$(CONFIG_INTEL_MID_WATCHDOG)) += platform_mrfld_wdt.o diff --git a/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c new file mode 100644 index 0000000..a6c3705 --- /dev/null +++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_power_btn.c @@ -0,0 +1,82 @@ +/* + * Intel Merrifield power button support + * + * (C) Copyright 2017 Intel Corporation + * + * Author: Andy Shevchenko + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; version 2 + * of the License. + */ + +#include +#include +#include +#include + +#include +#include + +static struct resource mrfld_power_btn_resources[] = { + { + .flags = IORESOURCE_IRQ, + }, +}; + +static struct platform_device mrfld_power_btn_dev = { + .name = "msic_power_btn", + .id = PLATFORM_DEVID_NONE, + .num_resources = ARRAY_SIZE(mrfld_power_btn_resources), + .resource = mrfld_power_btn_resources, +}; + +static int mrfld_power_btn_scu_status_change(struct notifier_block *nb, + unsigned long code, void *data) +{ + if (code == SCU_DOWN) { + platform_device_unregister(&mrfld_power_btn_dev); + return 0; + } + + return platform_device_register(&mrfld_power_btn_dev); +} + +static struct notifier_block mrfld_power_btn_scu_notifier = { + .notifier_call = mrfld_power_btn_scu_status_change, +}; + +static int __init register_mrfld_power_btn(void) +{ + if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER) + return -ENODEV; + + /* + * We need to be sure that the SCU IPC is ready before + * PMIC power button device can be registered: + */ + intel_scu_notifier_add(&mrfld_power_btn_scu_notifier); + + return 0; +} +arch_initcall(register_mrfld_power_btn); + +static void __init *mrfld_power_btn_platform_data(void *info) +{ + struct resource *res = mrfld_power_btn_resources; + struct sfi_device_table_entry *pentry = info; + + res->start = res->end = pentry->irq; + return NULL; +} + +static const struct devs_id mrfld_power_btn_dev_id __initconst = { + .name = "bcove_power_btn", + .type = SFI_DEV_TYPE_IPC, + .delay = 1, + .msic = 1, + .get_platform_data = &mrfld_power_btn_platform_data, +}; + +sfi_device(mrfld_power_btn_dev_id); -- cgit v1.1 From 9aea151f282df2b82a44fd7058a239334437c266 Mon Sep 17 00:00:00 2001 From: Linus Walleij Date: Sun, 26 Feb 2017 01:02:09 +0100 Subject: ARM: dts: add the AB8500 clocks to the device tree This adds the AB8500 clocks to the device tree using the new bindings from the clk subsystem, making audio work again. Cc: Lee Jones Cc: Ulf Hansson Signed-off-by: Linus Walleij Signed-off-by: Olof Johansson --- arch/arm/boot/dts/ste-dbx5x0.dtsi | 19 +++++++++++++++++++ arch/arm/boot/dts/ste-href.dtsi | 9 --------- arch/arm/boot/dts/ste-snowball.dts | 9 --------- 3 files changed, 19 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi index 82d8c47..162e1eb 100644 --- a/arch/arm/boot/dts/ste-dbx5x0.dtsi +++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi @@ -14,6 +14,7 @@ #include #include #include +#include #include "skeleton.dtsi" / { @@ -603,6 +604,11 @@ interrupt-controller; #interrupt-cells = <2>; + ab8500_clock: clock-controller { + compatible = "stericsson,ab8500-clk"; + #clock-cells = <1>; + }; + ab8500_gpio: ab8500-gpio { compatible = "stericsson,ab8500-gpio"; gpio-controller; @@ -686,6 +692,8 @@ ab8500-pwm { compatible = "stericsson,ab8500-pwm"; + clocks = <&ab8500_clock AB8500_SYSCLK_INT>; + clock-names = "intclk"; }; ab8500-debugfs { @@ -700,6 +708,9 @@ V-AMIC2-supply = <&ab8500_ldo_anamic2_reg>; V-DMIC-supply = <&ab8500_ldo_dmic_reg>; + clocks = <&ab8500_clock AB8500_SYSCLK_AUDIO>; + clock-names = "audioclk"; + stericsson,earpeice-cmv = <950>; /* Units in mV. */ }; @@ -1095,6 +1106,14 @@ status = "disabled"; }; + sound { + compatible = "stericsson,snd-soc-mop500"; + stericsson,cpu-dai = <&msp1 &msp3>; + stericsson,audio-codec = <&codec>; + clocks = <&prcmu_clk PRCMU_SYSCLK>, <&ab8500_clock AB8500_SYSCLK_ULP>, <&ab8500_clock AB8500_SYSCLK_INT>; + clock-names = "sysclk", "ulpclk", "intclk"; + }; + msp0: msp@80123000 { compatible = "stericsson,ux500-msp-i2s"; reg = <0x80123000 0x1000>; diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi index f37f9e1..9e359e4 100644 --- a/arch/arm/boot/dts/ste-href.dtsi +++ b/arch/arm/boot/dts/ste-href.dtsi @@ -186,15 +186,6 @@ status = "okay"; }; - sound { - compatible = "stericsson,snd-soc-mop500"; - - stericsson,cpu-dai = <&msp1 &msp3>; - stericsson,audio-codec = <&codec>; - clocks = <&prcmu_clk PRCMU_SYSCLK>; - clock-names = "sysclk"; - }; - msp0: msp@80123000 { pinctrl-names = "default"; pinctrl-0 = <&msp0_default_mode>; diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts index dd5514d..ade1d0d 100644 --- a/arch/arm/boot/dts/ste-snowball.dts +++ b/arch/arm/boot/dts/ste-snowball.dts @@ -159,15 +159,6 @@ "", "", "", "", "", "", "", ""; }; - sound { - compatible = "stericsson,snd-soc-mop500"; - - stericsson,cpu-dai = <&msp1 &msp3>; - stericsson,audio-codec = <&codec>; - clocks = <&prcmu_clk PRCMU_SYSCLK>; - clock-names = "sysclk"; - }; - msp0: msp@80123000 { pinctrl-names = "default"; pinctrl-0 = <&msp0_default_mode>; -- cgit v1.1 From be3606ff739d1c1be36389f8737c577ad87e1f57 Mon Sep 17 00:00:00 2001 From: Andrey Ryabinin Date: Mon, 13 Mar 2017 19:33:37 +0300 Subject: x86/kasan: Fix boot with KASAN=y and PROFILE_ANNOTATED_BRANCHES=y The kernel doesn't boot with both PROFILE_ANNOTATED_BRANCHES=y and KASAN=y options selected. With branch profiling enabled we end up calling ftrace_likely_update() before kasan_early_init(). ftrace_likely_update() is built with KASAN instrumentation, so calling it before kasan has been initialized leads to crash. Use DISABLE_BRANCH_PROFILING define to make sure that we don't call ftrace_likely_update() from early code before kasan_early_init(). Fixes: ef7f0d6a6ca8 ("x86_64: add KASan support") Reported-by: Fengguang Wu Signed-off-by: Andrey Ryabinin Cc: kasan-dev@googlegroups.com Cc: Alexander Potapenko Cc: stable@vger.kernel.org Cc: Andrew Morton Cc: lkp@01.org Cc: Dmitry Vyukov Link: http://lkml.kernel.org/r/20170313163337.1704-1-aryabinin@virtuozzo.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/head64.c | 1 + arch/x86/mm/kasan_init_64.c | 1 + 2 files changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c index 54a2372..b5785c1 100644 --- a/arch/x86/kernel/head64.c +++ b/arch/x86/kernel/head64.c @@ -4,6 +4,7 @@ * Copyright (C) 2000 Andrea Arcangeli SuSE */ +#define DISABLE_BRANCH_PROFILING #include #include #include diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c index 8d63d7a..4c90cfd 100644 --- a/arch/x86/mm/kasan_init_64.c +++ b/arch/x86/mm/kasan_init_64.c @@ -1,3 +1,4 @@ +#define DISABLE_BRANCH_PROFILING #define pr_fmt(fmt) "kasan: " fmt #include #include -- cgit v1.1 From 60b89f1928af80b546b5c3fd8714a62f6f4b8844 Mon Sep 17 00:00:00 2001 From: Nicolas Ferre Date: Tue, 14 Mar 2017 09:38:04 +0100 Subject: ARM: at91: pm: cpu_idle: switch DDR to power-down mode On some DDR controllers, compatible with the sama5d3 one, the sequence to enter/exit/re-enter the self-refresh mode adds more constrains than what is currently written in the at91_idle driver. An actual access to the DDR chip is needed between exit and re-enter of this mode which is somehow difficult to implement. This sequence can completely hang the SoC. It is particularly experienced on parts which embed a L2 cache if the code run between IDLE calls fits in it... Moreover, as the intention is to enter and exit pretty rapidly from IDLE, the power-down mode is a good candidate. So now we use power-down instead of self-refresh. As we can simplify the code for sama5d3 compatible DDR controllers, we instantiate a new sama5d3_ddr_standby() function. Signed-off-by: Nicolas Ferre Cc: # v4.1+ Fixes: 017b5522d5e3 ("ARM: at91: Add new binding for sama5d3-ddramc") Signed-off-by: Alexandre Belloni --- arch/arm/mach-at91/pm.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/mach-at91/pm.c b/arch/arm/mach-at91/pm.c index 3d89b79..a277981 100644 --- a/arch/arm/mach-at91/pm.c +++ b/arch/arm/mach-at91/pm.c @@ -289,6 +289,22 @@ static void at91_ddr_standby(void) at91_ramc_write(1, AT91_DDRSDRC_LPR, saved_lpr1); } +static void sama5d3_ddr_standby(void) +{ + u32 lpr0; + u32 saved_lpr0; + + saved_lpr0 = at91_ramc_read(0, AT91_DDRSDRC_LPR); + lpr0 = saved_lpr0 & ~AT91_DDRSDRC_LPCB; + lpr0 |= AT91_DDRSDRC_LPCB_POWER_DOWN; + + at91_ramc_write(0, AT91_DDRSDRC_LPR, lpr0); + + cpu_do_idle(); + + at91_ramc_write(0, AT91_DDRSDRC_LPR, saved_lpr0); +} + /* We manage both DDRAM/SDRAM controllers, we need more than one value to * remember. */ @@ -323,7 +339,7 @@ static const struct of_device_id const ramc_ids[] __initconst = { { .compatible = "atmel,at91rm9200-sdramc", .data = at91rm9200_standby }, { .compatible = "atmel,at91sam9260-sdramc", .data = at91sam9_sdram_standby }, { .compatible = "atmel,at91sam9g45-ddramc", .data = at91_ddr_standby }, - { .compatible = "atmel,sama5d3-ddramc", .data = at91_ddr_standby }, + { .compatible = "atmel,sama5d3-ddramc", .data = sama5d3_ddr_standby }, { /*sentinel*/ } }; -- cgit v1.1 From d434936e4cbb10181463622962f30b989d3e9e19 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Tue, 14 Mar 2017 13:12:53 +0100 Subject: mm, x86: Fix native_pud_clear build error We still get a build error in random configurations, after this has been modified a few times: In file included from include/linux/mm.h:68:0, from include/linux/suspend.h:8, from arch/x86/kernel/asm-offsets.c:12: arch/x86/include/asm/pgtable.h:66:26: error: redefinition of 'native_pud_clear' #define pud_clear(pud) native_pud_clear(pud) My interpretation is that the build error comes from a typo in __PAGETABLE_PUD_FOLDED, so fix that typo now, and remove the incorrect #ifdef around the native_pud_clear definition. Fixes: 3e761a42e19c ("mm, x86: fix HIGHMEM64 && PARAVIRT build config for native_pud_clear()") Fixes: a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages") Signed-off-by: Arnd Bergmann Acked-by: Dave Jiang Cc: Kees Cook Cc: Dave Hansen Cc: Hugh Dickins Cc: Andrew Morton Cc: Borislav Petkov Cc: Thomas Garnier Link: http://lkml.kernel.org/r/20170314121330.182155-1-arnd@arndb.de Signed-off-by: Thomas Gleixner --- arch/x86/include/asm/pgtable-3level.h | 3 --- arch/x86/include/asm/pgtable.h | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 72277b1..50d35e3 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ - defined(CONFIG_PARAVIRT)) static inline void native_pud_clear(pud_t *pudp) { } -#endif static inline void pud_clear(pud_t *pudp) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1cfb36b..585ee0d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); # define set_pud(pudp, pud) native_set_pud(pudp, pud) #endif -#ifndef __PAGETABLE_PMD_FOLDED +#ifndef __PAGETABLE_PUD_FOLDED #define pud_clear(pud) native_pud_clear(pud) #endif -- cgit v1.1 From 87a6b2975f0d340c75b7488d22d61d2f98fb8abf Mon Sep 17 00:00:00 2001 From: Josh Poimboeuf Date: Mon, 13 Mar 2017 23:27:47 -0500 Subject: x86/unwind: Fix last frame check for aligned function stacks Pavel Machek reported the following warning on x86-32: WARNING: kernel stack frame pointer at f50cdf98 in swapper/2:0 has bad value (null) The warning is caused by the unwinder not realizing that it reached the end of the stack, due to an unusual prologue which gcc sometimes generates for aligned stacks. The prologue is based on a gcc feature called the Dynamic Realign Argument Pointer (DRAP). It's almost always enabled for aligned stacks when -maccumulate-outgoing-args isn't set. This issue is similar to the one fixed by the following commit: 8023e0e2a48d ("x86/unwind: Adjust last frame check for aligned function stacks") ... but that fix was specific to x86-64. Make the fix more generic to cover x86-32 as well, and also ensure that the return address referred to by the frame pointer is a copy of the original return address. Fixes: acb4608ad186 ("x86/unwind: Create stack frames for saved syscall registers") Reported-by: Pavel Machek Signed-off-by: Josh Poimboeuf Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/50d4924db716c264b14f1633037385ec80bf89d2.1489465609.git.jpoimboe@redhat.com Signed-off-by: Thomas Gleixner --- arch/x86/kernel/unwind_frame.c | 36 ++++++++++++++++++++++++++++++------ 1 file changed, 30 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c index 478d15d..0833926 100644 --- a/arch/x86/kernel/unwind_frame.c +++ b/arch/x86/kernel/unwind_frame.c @@ -82,19 +82,43 @@ static size_t regs_size(struct pt_regs *regs) return sizeof(*regs); } +#ifdef CONFIG_X86_32 +#define GCC_REALIGN_WORDS 3 +#else +#define GCC_REALIGN_WORDS 1 +#endif + static bool is_last_task_frame(struct unwind_state *state) { - unsigned long bp = (unsigned long)state->bp; - unsigned long regs = (unsigned long)task_pt_regs(state->task); + unsigned long *last_bp = (unsigned long *)task_pt_regs(state->task) - 2; + unsigned long *aligned_bp = last_bp - GCC_REALIGN_WORDS; /* * We have to check for the last task frame at two different locations * because gcc can occasionally decide to realign the stack pointer and - * change the offset of the stack frame by a word in the prologue of a - * function called by head/entry code. + * change the offset of the stack frame in the prologue of a function + * called by head/entry code. Examples: + * + * : + * push %edi + * lea 0x8(%esp),%edi + * and $0xfffffff8,%esp + * pushl -0x4(%edi) + * push %ebp + * mov %esp,%ebp + * + * : + * lea 0x8(%rsp),%r10 + * and $0xfffffffffffffff0,%rsp + * pushq -0x8(%r10) + * push %rbp + * mov %rsp,%rbp + * + * Note that after aligning the stack, it pushes a duplicate copy of + * the return address before pushing the frame pointer. */ - return bp == regs - FRAME_HEADER_SIZE || - bp == regs - FRAME_HEADER_SIZE - sizeof(long); + return (state->bp == last_bp || + (state->bp == aligned_bp && *(aligned_bp+1) == *(last_bp+1))); } /* -- cgit v1.1 From 49ec8f5b6ae3ab60385492cad900ffc8a523c895 Mon Sep 17 00:00:00 2001 From: Jiri Olsa Date: Tue, 14 Mar 2017 15:20:53 +0100 Subject: x86/intel_rdt: Put group node in rdtgroup_kn_unlock The rdtgroup_kn_unlock waits for the last user to release and put its node. But it's calling kernfs_put on the node which calls the rdtgroup_kn_unlock, which might not be the group's directory node, but another group's file node. This race could be easily reproduced by running 2 instances of following script: mount -t resctrl resctrl /sys/fs/resctrl/ pushd /sys/fs/resctrl/ mkdir krava echo "krava" > krava/schemata rmdir krava popd umount /sys/fs/resctrl It triggers the slub debug error message with following command line config: slub_debug=,kernfs_node_cache. Call kernfs_put on the group's node to fix it. Fixes: 60cf5e101fd4 ("x86/intel_rdt: Add mkdir to resctrl file system") Signed-off-by: Jiri Olsa Cc: Fenghua Yu Cc: Peter Zijlstra Cc: Peter Zijlstra Cc: Mike Galbraith Cc: Shaohua Li Cc: stable@vger.kernel.org Link: http://lkml.kernel.org/r/1489501253-20248-1-git-send-email-jolsa@kernel.org Signed-off-by: Thomas Gleixner --- arch/x86/kernel/cpu/intel_rdt_rdtgroup.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c index c05509d..9ac2a5c 100644 --- a/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c +++ b/arch/x86/kernel/cpu/intel_rdt_rdtgroup.c @@ -727,7 +727,7 @@ void rdtgroup_kn_unlock(struct kernfs_node *kn) if (atomic_dec_and_test(&rdtgrp->waitcount) && (rdtgrp->flags & RDT_DELETED)) { kernfs_unbreak_active_protection(kn); - kernfs_put(kn); + kernfs_put(rdtgrp->kn); kfree(rdtgrp); } else { kernfs_unbreak_active_protection(kn); -- cgit v1.1 From 8af42949d1681379c1a97d230de9242c1f4f326a Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Mon, 13 Mar 2017 07:41:55 +0900 Subject: openrisc: xchg: fix `computed is not used` warning When building allmodconfig this warning shows. fs/ocfs2/file.c: In function 'ocfs2_file_write_iter': ./arch/openrisc/include/asm/cmpxchg.h:81:3: warning: value computed is not used [-Wunused-value] ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) ^ Applying the same patch logic that was done to the cmpxchg macro. Signed-off-by: Stafford Horne --- arch/openrisc/include/asm/cmpxchg.h | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index 5fcb9ac..f0a5d8b 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -77,7 +77,11 @@ static inline unsigned long __xchg(unsigned long val, volatile void *ptr, return val; } -#define xchg(ptr, with) \ - ((typeof(*(ptr)))__xchg((unsigned long)(with), (ptr), sizeof(*(ptr)))) +#define xchg(ptr, with) \ + ({ \ + (__typeof__(*(ptr))) __xchg((unsigned long)(with), \ + (ptr), \ + sizeof(*(ptr))); \ + }) #endif /* __ASM_OPENRISC_CMPXCHG_H */ -- cgit v1.1 From 154e67cd8e8f964809d0e75e44bb121b169c75b3 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Mon, 13 Mar 2017 07:44:45 +0900 Subject: openrisc: fix issue handling 8 byte get_user calls Was getting the following error with allmodconfig: ERROR: "__get_user_bad" [lib/test_user_copy.ko] undefined! This was simply a missing break statement, causing an unwanted fall through. Signed-off-by: Stafford Horne --- arch/openrisc/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 140faa1..1311e6b 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -211,7 +211,7 @@ do { \ case 1: __get_user_asm(x, ptr, retval, "l.lbz"); break; \ case 2: __get_user_asm(x, ptr, retval, "l.lhz"); break; \ case 4: __get_user_asm(x, ptr, retval, "l.lwz"); break; \ - case 8: __get_user_asm2(x, ptr, retval); \ + case 8: __get_user_asm2(x, ptr, retval); break; \ default: (x) = __get_user_bad(); \ } \ } while (0) -- cgit v1.1 From 363dad58e4a0f72dce0bf12d361d617239a80317 Mon Sep 17 00:00:00 2001 From: Stafford Horne Date: Tue, 14 Mar 2017 22:52:49 +0900 Subject: openrisc: Export symbols needed by modules This was detected by allmodconfig, errors reported: ERROR: "empty_zero_page" [net/ceph/libceph.ko] undefined! ERROR: "__ucmpdi2" [lib/842/842_decompress.ko] undefined! ERROR: "empty_zero_page" [fs/nfs/objlayout/objlayoutdriver.ko] undefined! ERROR: "empty_zero_page" [fs/exofs/exofs.ko] undefined! ERROR: "empty_zero_page" [fs/crypto/fscrypto.ko] undefined! ERROR: "__ucmpdi2" [fs/btrfs/btrfs.ko] undefined! ERROR: "pm_power_off" [drivers/regulator/act8865-regulator.ko] undefined! ERROR: "__ucmpdi2" [drivers/media/i2c/adv7842.ko] undefined! ERROR: "__clear_user" [drivers/md/dm-mod.ko] undefined! ERROR: "__clear_user" [net/netfilter/x_tables.ko] undefined! Signed-off-by: Stafford Horne --- arch/openrisc/kernel/or32_ksyms.c | 4 ++++ arch/openrisc/kernel/process.c | 1 + 2 files changed, 5 insertions(+) (limited to 'arch') diff --git a/arch/openrisc/kernel/or32_ksyms.c b/arch/openrisc/kernel/or32_ksyms.c index 5c4695d..ee3e604 100644 --- a/arch/openrisc/kernel/or32_ksyms.c +++ b/arch/openrisc/kernel/or32_ksyms.c @@ -30,6 +30,7 @@ #include #include #include +#include #define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name) @@ -42,6 +43,9 @@ DECLARE_EXPORT(__muldi3); DECLARE_EXPORT(__ashrdi3); DECLARE_EXPORT(__ashldi3); DECLARE_EXPORT(__lshrdi3); +DECLARE_EXPORT(__ucmpdi2); +EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(__copy_tofrom_user); +EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(memset); diff --git a/arch/openrisc/kernel/process.c b/arch/openrisc/kernel/process.c index 828a291..f8da545 100644 --- a/arch/openrisc/kernel/process.c +++ b/arch/openrisc/kernel/process.c @@ -90,6 +90,7 @@ void arch_cpu_idle(void) } void (*pm_power_off) (void) = machine_power_off; +EXPORT_SYMBOL(pm_power_off); /* * When a process does an "exec", machine state like FPU and debug -- cgit v1.1 From 5f655322b1ba4bd46e26e307d04098f9c84df764 Mon Sep 17 00:00:00 2001 From: Mikulas Patocka Date: Tue, 14 Mar 2017 11:47:29 -0400 Subject: parisc: support R_PARISC_SECREL32 relocation in modules The parisc kernel doesn't work with CONFIG_MODVERSIONS since the commit 71810db27c1c853b335675bee335d893bc3d324b. It can't load modules with the error: "module unix: Unknown relocation: 41". The commit changes __kcrctab from 64-bit valus to 32-bit values. The assembler generates R_PARISC_SECREL32 secrel relocation for them and the module loader doesn't support this relocation. This patch adds the R_PARISC_SECREL32 relocation to the module loader. Signed-off-by: Mikulas Patocka Cc: stable@vger.kernel.org # v4.10+ Signed-off-by: Helge Deller --- arch/parisc/kernel/module.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'arch') diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c index a0ecdb4..c66c943 100644 --- a/arch/parisc/kernel/module.c +++ b/arch/parisc/kernel/module.c @@ -620,6 +620,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, */ *loc = fsel(val, addend); break; + case R_PARISC_SECREL32: + /* 32-bit section relative address. */ + *loc = fsel(val, addend); + break; case R_PARISC_DPREL21L: /* left 21 bit of relative address */ val = lrsel(val - dp, addend); @@ -807,6 +811,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, */ *loc = fsel(val, addend); break; + case R_PARISC_SECREL32: + /* 32-bit section relative address. */ + *loc = fsel(val, addend); + break; case R_PARISC_FPTR64: /* 64-bit function address */ if(in_local(me, (void *)(val + addend))) { -- cgit v1.1 From 316ec0624f951166daedbe446988ef92ae72b59f Mon Sep 17 00:00:00 2001 From: John David Anglin Date: Sat, 11 Mar 2017 18:03:34 -0500 Subject: parisc: Optimize flush_kernel_vmap_range and invalidate_kernel_vmap_range The previously submitted patch did not resolve the random segmentation faults observed on the phantom buildd system. There are still unresolved problems with the Debian 4.8 and 4.9 kernels on C8000. The attached patch removes the flush of the offset map pages and does a whole data cache flush for large ranges. No other arch flushes the offset map in these routines as far as I can tell. I have not observed any random segmentation faults on rp3440 in two weeks of testing with 4.10.0 and 4.10.1. Signed-off-by: John David Anglin Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Helge Deller --- arch/parisc/include/asm/cacheflush.h | 23 ++--------------------- arch/parisc/kernel/cache.c | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 21 deletions(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index 19c9c3c..c7e15cc 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h @@ -43,28 +43,9 @@ static inline void flush_kernel_dcache_page(struct page *page) #define flush_kernel_dcache_range(start,size) \ flush_kernel_dcache_range_asm((start), (start)+(size)); -/* vmap range flushes and invalidates. Architecturally, we don't need - * the invalidate, because the CPU should refuse to speculate once an - * area has been flushed, so invalidate is left empty */ -static inline void flush_kernel_vmap_range(void *vaddr, int size) -{ - unsigned long start = (unsigned long)vaddr; - - flush_kernel_dcache_range_asm(start, start + size); -} -static inline void invalidate_kernel_vmap_range(void *vaddr, int size) -{ - unsigned long start = (unsigned long)vaddr; - void *cursor = vaddr; - for ( ; cursor < vaddr + size; cursor += PAGE_SIZE) { - struct page *page = vmalloc_to_page(cursor); - - if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) - flush_kernel_dcache_page(page); - } - flush_kernel_dcache_range_asm(start, start + size); -} +void flush_kernel_vmap_range(void *vaddr, int size); +void invalidate_kernel_vmap_range(void *vaddr, int size); #define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all() diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c index 0dc72d5..c32a090 100644 --- a/arch/parisc/kernel/cache.c +++ b/arch/parisc/kernel/cache.c @@ -616,3 +616,25 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long __flush_cache_page(vma, vmaddr, PFN_PHYS(pfn)); } } + +void flush_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + if ((unsigned long)size > parisc_cache_flush_threshold) + flush_data_cache(); + else + flush_kernel_dcache_range_asm(start, start + size); +} +EXPORT_SYMBOL(flush_kernel_vmap_range); + +void invalidate_kernel_vmap_range(void *vaddr, int size) +{ + unsigned long start = (unsigned long)vaddr; + + if ((unsigned long)size > parisc_cache_flush_threshold) + flush_data_cache(); + else + flush_kernel_dcache_range_asm(start, start + size); +} +EXPORT_SYMBOL(invalidate_kernel_vmap_range); -- cgit v1.1 From 63d32d1e09cb2fc65b084b261976c06b40d19115 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 15 Mar 2017 21:10:17 +0100 Subject: parisc: Wire up statx system call Signed-off-by: Helge Deller --- arch/parisc/include/uapi/asm/unistd.h | 3 ++- arch/parisc/kernel/syscall_table.S | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h index 6b0741e..667c994 100644 --- a/arch/parisc/include/uapi/asm/unistd.h +++ b/arch/parisc/include/uapi/asm/unistd.h @@ -362,8 +362,9 @@ #define __NR_copy_file_range (__NR_Linux + 346) #define __NR_preadv2 (__NR_Linux + 347) #define __NR_pwritev2 (__NR_Linux + 348) +#define __NR_statx (__NR_Linux + 349) -#define __NR_Linux_syscalls (__NR_pwritev2 + 1) +#define __NR_Linux_syscalls (__NR_statx + 1) #define __IGNORE_select /* newselect */ diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S index 3cfef1d..44aeaa9 100644 --- a/arch/parisc/kernel/syscall_table.S +++ b/arch/parisc/kernel/syscall_table.S @@ -444,6 +444,7 @@ ENTRY_SAME(copy_file_range) ENTRY_COMP(preadv2) ENTRY_COMP(pwritev2) + ENTRY_SAME(statx) .ifne (. - 90b) - (__NR_Linux_syscalls * (91b - 90b)) -- cgit v1.1 From 186ecf14e58befba434f0774eea89e35f64d3c6a Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 15 Mar 2017 21:48:42 +0100 Subject: parisc: Avoid compiler warnings with access_ok() Commit 09b871ffd4d8 (parisc: Define access_ok() as macro) missed to mark uaddr as used, which then gives compiler warnings about unused variables. Fix it by comparing uaddr to uaddr which then gets optimized away by the compiler. Signed-off-by: Helge Deller Fixes: 09b871ffd4d8 ("parisc: Define access_ok() as macro") --- arch/parisc/include/asm/uaccess.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index fb4382c..edfbf9d 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -32,7 +32,8 @@ * that put_user is the same as __put_user, etc. */ -#define access_ok(type, uaddr, size) (1) +#define access_ok(type, uaddr, size) \ + ( (uaddr) == (uaddr) ) #define put_user __put_user #define get_user __get_user -- cgit v1.1 From 6bce725a78de1b171928ce66dec2bae4b569e5d1 Mon Sep 17 00:00:00 2001 From: Tobias Klauser Date: Wed, 8 Mar 2017 14:30:34 +0100 Subject: x86/mpx: Make unnecessarily global function static Make the function get_user_bd_entry() static as it is not used outside of arch/x86/mm/mpx.c This fixes a sparse warning. Signed-off-by: Tobias Klauser Cc: Dave Hansen Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Thomas Gleixner Cc: linux-kernel@vger.kernel.org Signed-off-by: Ingo Molnar --- arch/x86/mm/mpx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index 5126dfd..cd44ae7 100644 --- a/arch/x86/mm/mpx.c +++ b/arch/x86/mm/mpx.c @@ -590,7 +590,7 @@ static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm, * we might run off the end of the bounds table if we are on * a 64-bit kernel and try to get 8 bytes. */ -int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, +static int get_user_bd_entry(struct mm_struct *mm, unsigned long *bd_entry_ret, long __user *bd_entry_ptr) { u32 bd_entry_32; -- cgit v1.1 From f717629c7f834ab2efa05c7dbf0826f1d7c32ade Mon Sep 17 00:00:00 2001 From: Chandan Rajendra Date: Thu, 16 Mar 2017 14:37:11 +0530 Subject: powerpc: Wire up statx() syscall Test runs on a ppc64 BE guest succeeded. linux/samples/statx/test-statx program was executed on the following file types, 1. Regular file 2. Directory 3. device file 4. symlink 5. Named pipe The test run also included invoking test-statx with the runtime options provided in the main() function of test-statx.c Signed-off-by: Chandan Rajendra Signed-off-by: Michael Ellerman --- arch/powerpc/include/asm/systbl.h | 1 + arch/powerpc/include/asm/unistd.h | 2 +- arch/powerpc/include/uapi/asm/unistd.h | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 4b369d8..1c94708 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h @@ -387,3 +387,4 @@ SYSCALL(copy_file_range) COMPAT_SYS_SPU(preadv2) COMPAT_SYS_SPU(pwritev2) SYSCALL(kexec_file_load) +SYSCALL(statx) diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index eb1acee..9ba11db 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h @@ -12,7 +12,7 @@ #include -#define NR_syscalls 383 +#define NR_syscalls 384 #define __NR__exit __NR_exit diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h index 2f26335..b85f142 100644 --- a/arch/powerpc/include/uapi/asm/unistd.h +++ b/arch/powerpc/include/uapi/asm/unistd.h @@ -393,5 +393,6 @@ #define __NR_preadv2 380 #define __NR_pwritev2 381 #define __NR_kexec_file_load 382 +#define __NR_statx 383 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ -- cgit v1.1 From acfa28b3649ec07775efaac0c00de2db39d71634 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Wed, 1 Mar 2017 18:02:28 -0500 Subject: ARM: dts: NSP: GPIO reboot open-source The libgpio code pre-sets the GPIO values for the gpio-reset in the device tree. This results in the device being reset during bringup. To prevent this pre-setting, use the "open-source" flag in the device tree. Signed-off-by: Jon Mason Fixes: b1aaf88 ("ARM: dts: NSP: Add GPIO reboot method to bcm958625hr DTS file") Fixes: 10baed1 ("ARM: dts: NSP: Add GPIO reboot method to bcm958625xmc DTS file") Fixes: 088e3148 ("ARM: dts: NSP: Add new DT file for bcm958522er") Fixes: e3227c1 ("ARM: dts: NSP: Add new DT file for bcm958525er") Fixes: 2f8bc00 ("ARM: dts: NSP: Add new DT file for bcm958622hr") Fixes: d454c37 ("ARM: dts: NSP: Add new DT file for bcm958623hr") Fixes: f27eacf ("ARM: dts: NSP: Add new DT file for bcm988312hr") Signed-off-by: Florian Fainelli --- arch/arm/boot/dts/bcm958522er.dts | 1 + arch/arm/boot/dts/bcm958525er.dts | 1 + arch/arm/boot/dts/bcm958525xmc.dts | 1 + arch/arm/boot/dts/bcm958622hr.dts | 1 + arch/arm/boot/dts/bcm958623hr.dts | 1 + arch/arm/boot/dts/bcm958625hr.dts | 1 + arch/arm/boot/dts/bcm988312hr.dts | 1 + 7 files changed, 7 insertions(+) (limited to 'arch') diff --git a/arch/arm/boot/dts/bcm958522er.dts b/arch/arm/boot/dts/bcm958522er.dts index 3f04a40..df05e7f 100644 --- a/arch/arm/boot/dts/bcm958522er.dts +++ b/arch/arm/boot/dts/bcm958522er.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958525er.dts b/arch/arm/boot/dts/bcm958525er.dts index 9fd5422..4a3ab19 100644 --- a/arch/arm/boot/dts/bcm958525er.dts +++ b/arch/arm/boot/dts/bcm958525er.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958525xmc.dts b/arch/arm/boot/dts/bcm958525xmc.dts index 41e7fd3..81f7843 100644 --- a/arch/arm/boot/dts/bcm958525xmc.dts +++ b/arch/arm/boot/dts/bcm958525xmc.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 31 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958622hr.dts b/arch/arm/boot/dts/bcm958622hr.dts index 477c486..c88b8fe 100644 --- a/arch/arm/boot/dts/bcm958622hr.dts +++ b/arch/arm/boot/dts/bcm958622hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958623hr.dts b/arch/arm/boot/dts/bcm958623hr.dts index c0a499d..d503fa0 100644 --- a/arch/arm/boot/dts/bcm958623hr.dts +++ b/arch/arm/boot/dts/bcm958623hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm958625hr.dts b/arch/arm/boot/dts/bcm958625hr.dts index f7eb585..cc0363b 100644 --- a/arch/arm/boot/dts/bcm958625hr.dts +++ b/arch/arm/boot/dts/bcm958625hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; diff --git a/arch/arm/boot/dts/bcm988312hr.dts b/arch/arm/boot/dts/bcm988312hr.dts index 16666324..74e15a3 100644 --- a/arch/arm/boot/dts/bcm988312hr.dts +++ b/arch/arm/boot/dts/bcm988312hr.dts @@ -55,6 +55,7 @@ gpio-restart { compatible = "gpio-restart"; gpios = <&gpioa 15 GPIO_ACTIVE_LOW>; + open-source; priority = <200>; }; }; -- cgit v1.1 From d0f33ac9ae7b2a727fb678235ae37baf1d0608d5 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Thu, 16 Mar 2017 16:40:24 -0700 Subject: mm, x86: fix native_pud_clear build error We still get a build error in random configurations, after this has been modified a few times: In file included from include/linux/mm.h:68:0, from include/linux/suspend.h:8, from arch/x86/kernel/asm-offsets.c:12: arch/x86/include/asm/pgtable.h:66:26: error: redefinition of 'native_pud_clear' #define pud_clear(pud) native_pud_clear(pud) My interpretation is that the build error comes from a typo in __PAGETABLE_PUD_FOLDED, so fix that typo now, and remove the incorrect #ifdef around the native_pud_clear definition. Fixes: 3e761a42e19c ("mm, x86: fix HIGHMEM64 && PARAVIRT build config for native_pud_clear()") Fixes: a00cc7d9dd93 ("mm, x86: add support for PUD-sized transparent hugepages") Link: http://lkml.kernel.org/r/20170314121330.182155-1-arnd@arndb.de Signed-off-by: Arnd Bergmann Ackedy-by: Dave Jiang Cc: Matthew Wilcox Cc: Thomas Gleixner Cc: Ingo Molnar Cc: "H. Peter Anvin" Cc: Thomas Garnier Cc: Kees Cook Cc: Dave Hansen Cc: Hugh Dickins Cc: Borislav Petkov Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/include/asm/pgtable-3level.h | 3 --- arch/x86/include/asm/pgtable.h | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h index 72277b1..50d35e3 100644 --- a/arch/x86/include/asm/pgtable-3level.h +++ b/arch/x86/include/asm/pgtable-3level.h @@ -121,12 +121,9 @@ static inline void native_pmd_clear(pmd_t *pmd) *(tmp + 1) = 0; } -#if !defined(CONFIG_SMP) || (defined(CONFIG_HIGHMEM64G) && \ - defined(CONFIG_PARAVIRT)) static inline void native_pud_clear(pud_t *pudp) { } -#endif static inline void pud_clear(pud_t *pudp) { diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h index 1cfb36b..585ee0d 100644 --- a/arch/x86/include/asm/pgtable.h +++ b/arch/x86/include/asm/pgtable.h @@ -62,7 +62,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page); # define set_pud(pudp, pud) native_set_pud(pudp, pud) #endif -#ifndef __PAGETABLE_PMD_FOLDED +#ifndef __PAGETABLE_PUD_FOLDED #define pud_clear(pud) native_pud_clear(pud) #endif -- cgit v1.1 From 8971e1c79d3f6c9a5e6f7a65c50c41f434a4dae6 Mon Sep 17 00:00:00 2001 From: Michael Ellerman Date: Fri, 17 Mar 2017 16:02:35 +1100 Subject: powerpc/pseries: Don't give a warning when HPT resizing isn't available As of commit 438cc81a41e8 ("powerpc/pseries: Automatically resize HPT for memory hot add/remove"), when running on the pseries platform, we always attempt to use the PAPR extension to resize the hashed page table (HPT) when we add or remove memory. This is fine, but when the extension is not available we'll give a harmless, but scary warning. Instead check if the firmware supports HPT resizing before populating the mmu_hash_ops.resize_hpt pointer. Signed-off-by: David Gibson Signed-off-by: Michael Ellerman --- arch/powerpc/platforms/pseries/lpar.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 251060c..8b1fe89 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -751,7 +751,9 @@ void __init hpte_init_pseries(void) mmu_hash_ops.flush_hash_range = pSeries_lpar_flush_hash_range; mmu_hash_ops.hpte_clear_all = pseries_hpte_clear_all; mmu_hash_ops.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; - mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; + + if (firmware_has_feature(FW_FEATURE_HPT_RESIZE)) + mmu_hash_ops.resize_hpt = pseries_lpar_resize_hpt; } void radix_init_pseries(void) -- cgit v1.1 From 5dc855d44c2ad960a86f593c60461f1ae1566b6d Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 16 Mar 2017 12:59:39 -0700 Subject: x86/perf: Fix CR4.PCE propagation to use active_mm instead of mm If one thread mmaps a perf event while another thread in the same mm is in some context where active_mm != mm (which can happen in the scheduler, for example), refresh_pce() would write the wrong value to CR4.PCE. This broke some PAPI tests. Reported-and-tested-by: Vince Weaver Signed-off-by: Andy Lutomirski Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: stable@vger.kernel.org Fixes: 7911d3f7af14 ("perf/x86: Only allow rdpmc if a perf_event is mapped") Link: http://lkml.kernel.org/r/0c5b38a76ea50e405f9abe07a13dfaef87c173a1.1489694270.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 1635c0c..e07b36c 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2100,8 +2100,8 @@ static int x86_pmu_event_init(struct perf_event *event) static void refresh_pce(void *ignored) { - if (current->mm) - load_mm_cr4(current->mm); + if (current->active_mm) + load_mm_cr4(current->active_mm); } static void x86_pmu_event_mapped(struct perf_event *event) -- cgit v1.1 From 4b07372a32c0c1505a7634ad7e607d83340ef645 Mon Sep 17 00:00:00 2001 From: Andy Lutomirski Date: Thu, 16 Mar 2017 12:59:40 -0700 Subject: x86/perf: Clarify why x86_pmu_event_mapped() isn't racy Naively, it looks racy, but ->mmap_sem saves it. Add a comment and a lockdep assertion. Signed-off-by: Andy Lutomirski Cc: Alexander Shishkin Cc: Arnaldo Carvalho de Melo Cc: Borislav Petkov Cc: H. Peter Anvin Cc: Jiri Olsa Cc: Linus Torvalds Cc: Peter Zijlstra Cc: Stephane Eranian Cc: Thomas Gleixner Cc: Vince Weaver Link: http://lkml.kernel.org/r/03a1e629063899168dfc4707f3bb6e581e21f5c6.1489694270.git.luto@kernel.org Signed-off-by: Ingo Molnar --- arch/x86/events/core.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index e07b36c..183a972 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -2109,6 +2109,18 @@ static void x86_pmu_event_mapped(struct perf_event *event) if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED)) return; + /* + * This function relies on not being called concurrently in two + * tasks in the same mm. Otherwise one task could observe + * perf_rdpmc_allowed > 1 and return all the way back to + * userspace with CR4.PCE clear while another task is still + * doing on_each_cpu_mask() to propagate CR4.PCE. + * + * For now, this can't happen because all callers hold mmap_sem + * for write. If this changes, we'll need a different solution. + */ + lockdep_assert_held_exclusive(¤t->mm->mmap_sem); + if (atomic_inc_return(¤t->mm->context.perf_rdpmc_allowed) == 1) on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1); } -- cgit v1.1 From 74e3f6e63da6c8e8246fba1689e040bc926b4a1a Mon Sep 17 00:00:00 2001 From: Arvind Yadav Date: Tue, 14 Mar 2017 15:24:51 +0530 Subject: parisc: perf: Fix potential NULL pointer dereference Fix potential NULL pointer dereference and clean up coding style errors (code indent, trailing whitespaces). Signed-off-by: Arvind Yadav Signed-off-by: Helge Deller --- arch/parisc/kernel/perf.c | 94 ++++++++++++++++++++++++----------------------- 1 file changed, 49 insertions(+), 45 deletions(-) (limited to 'arch') diff --git a/arch/parisc/kernel/perf.c b/arch/parisc/kernel/perf.c index e282a51..6017a5a 100644 --- a/arch/parisc/kernel/perf.c +++ b/arch/parisc/kernel/perf.c @@ -39,7 +39,7 @@ * the PDC INTRIGUE calls. This is done to eliminate bugs introduced * in various PDC revisions. The code is much more maintainable * and reliable this way vs having to debug on every version of PDC - * on every box. + * on every box. */ #include @@ -195,8 +195,8 @@ static int perf_config(uint32_t *image_ptr); static int perf_release(struct inode *inode, struct file *file); static int perf_open(struct inode *inode, struct file *file); static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t *ppos); -static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, - loff_t *ppos); +static ssize_t perf_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos); static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg); static void perf_start_counters(void); static int perf_stop_counters(uint32_t *raddr); @@ -222,7 +222,7 @@ extern void perf_intrigue_disable_perf_counters (void); /* * configure: * - * Configure the cpu with a given data image. First turn off the counters, + * Configure the cpu with a given data image. First turn off the counters, * then download the image, then turn the counters back on. */ static int perf_config(uint32_t *image_ptr) @@ -234,7 +234,7 @@ static int perf_config(uint32_t *image_ptr) error = perf_stop_counters(raddr); if (error != 0) { printk("perf_config: perf_stop_counters = %ld\n", error); - return -EINVAL; + return -EINVAL; } printk("Preparing to write image\n"); @@ -242,7 +242,7 @@ printk("Preparing to write image\n"); error = perf_write_image((uint64_t *)image_ptr); if (error != 0) { printk("perf_config: DOWNLOAD = %ld\n", error); - return -EINVAL; + return -EINVAL; } printk("Preparing to start counters\n"); @@ -254,7 +254,7 @@ printk("Preparing to start counters\n"); } /* - * Open the device and initialize all of its memory. The device is only + * Open the device and initialize all of its memory. The device is only * opened once, but can be "queried" by multiple processes that know its * file descriptor. */ @@ -298,19 +298,19 @@ static ssize_t perf_read(struct file *file, char __user *buf, size_t cnt, loff_t * called on the processor that the download should happen * on. */ -static ssize_t perf_write(struct file *file, const char __user *buf, size_t count, - loff_t *ppos) +static ssize_t perf_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) { size_t image_size; uint32_t image_type; uint32_t interface_type; uint32_t test; - if (perf_processor_interface == ONYX_INTF) + if (perf_processor_interface == ONYX_INTF) image_size = PCXU_IMAGE_SIZE; - else if (perf_processor_interface == CUDA_INTF) + else if (perf_processor_interface == CUDA_INTF) image_size = PCXW_IMAGE_SIZE; - else + else return -EFAULT; if (!capable(CAP_SYS_ADMIN)) @@ -330,22 +330,22 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun /* First check the machine type is correct for the requested image */ - if (((perf_processor_interface == CUDA_INTF) && - (interface_type != CUDA_INTF)) || - ((perf_processor_interface == ONYX_INTF) && - (interface_type != ONYX_INTF))) + if (((perf_processor_interface == CUDA_INTF) && + (interface_type != CUDA_INTF)) || + ((perf_processor_interface == ONYX_INTF) && + (interface_type != ONYX_INTF))) return -EINVAL; /* Next check to make sure the requested image is valid */ - if (((interface_type == CUDA_INTF) && + if (((interface_type == CUDA_INTF) && (test >= MAX_CUDA_IMAGES)) || - ((interface_type == ONYX_INTF) && - (test >= MAX_ONYX_IMAGES))) + ((interface_type == ONYX_INTF) && + (test >= MAX_ONYX_IMAGES))) return -EINVAL; /* Copy the image into the processor */ - if (interface_type == CUDA_INTF) + if (interface_type == CUDA_INTF) return perf_config(cuda_images[test]); else return perf_config(onyx_images[test]); @@ -359,7 +359,7 @@ static ssize_t perf_write(struct file *file, const char __user *buf, size_t coun static void perf_patch_images(void) { #if 0 /* FIXME!! */ -/* +/* * NOTE: this routine is VERY specific to the current TLB image. * If the image is changed, this routine might also need to be changed. */ @@ -367,9 +367,9 @@ static void perf_patch_images(void) extern void $i_dtlb_miss_2_0(); extern void PA2_0_iva(); - /* + /* * We can only use the lower 32-bits, the upper 32-bits should be 0 - * anyway given this is in the kernel + * anyway given this is in the kernel */ uint32_t itlb_addr = (uint32_t)&($i_itlb_miss_2_0); uint32_t dtlb_addr = (uint32_t)&($i_dtlb_miss_2_0); @@ -377,21 +377,21 @@ static void perf_patch_images(void) if (perf_processor_interface == ONYX_INTF) { /* clear last 2 bytes */ - onyx_images[TLBMISS][15] &= 0xffffff00; + onyx_images[TLBMISS][15] &= 0xffffff00; /* set 2 bytes */ onyx_images[TLBMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); onyx_images[TLBMISS][16] = (dtlb_addr << 8)&0xffffff00; onyx_images[TLBMISS][17] = itlb_addr; /* clear last 2 bytes */ - onyx_images[TLBHANDMISS][15] &= 0xffffff00; + onyx_images[TLBHANDMISS][15] &= 0xffffff00; /* set 2 bytes */ onyx_images[TLBHANDMISS][15] |= (0x000000ff&((dtlb_addr) >> 24)); onyx_images[TLBHANDMISS][16] = (dtlb_addr << 8)&0xffffff00; onyx_images[TLBHANDMISS][17] = itlb_addr; /* clear last 2 bytes */ - onyx_images[BIG_CPI][15] &= 0xffffff00; + onyx_images[BIG_CPI][15] &= 0xffffff00; /* set 2 bytes */ onyx_images[BIG_CPI][15] |= (0x000000ff&((dtlb_addr) >> 24)); onyx_images[BIG_CPI][16] = (dtlb_addr << 8)&0xffffff00; @@ -404,24 +404,24 @@ static void perf_patch_images(void) } else if (perf_processor_interface == CUDA_INTF) { /* Cuda interface */ - cuda_images[TLBMISS][16] = + cuda_images[TLBMISS][16] = (cuda_images[TLBMISS][16]&0xffff0000) | ((dtlb_addr >> 8)&0x0000ffff); - cuda_images[TLBMISS][17] = + cuda_images[TLBMISS][17] = ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); cuda_images[TLBMISS][18] = (itlb_addr << 16)&0xffff0000; - cuda_images[TLBHANDMISS][16] = + cuda_images[TLBHANDMISS][16] = (cuda_images[TLBHANDMISS][16]&0xffff0000) | ((dtlb_addr >> 8)&0x0000ffff); - cuda_images[TLBHANDMISS][17] = + cuda_images[TLBHANDMISS][17] = ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); cuda_images[TLBHANDMISS][18] = (itlb_addr << 16)&0xffff0000; - cuda_images[BIG_CPI][16] = + cuda_images[BIG_CPI][16] = (cuda_images[BIG_CPI][16]&0xffff0000) | ((dtlb_addr >> 8)&0x0000ffff); - cuda_images[BIG_CPI][17] = + cuda_images[BIG_CPI][17] = ((dtlb_addr << 24)&0xff000000) | ((itlb_addr >> 16)&0x000000ff); cuda_images[BIG_CPI][18] = (itlb_addr << 16)&0xffff0000; } else { @@ -433,7 +433,7 @@ static void perf_patch_images(void) /* * ioctl routine - * All routines effect the processor that they are executed on. Thus you + * All routines effect the processor that they are executed on. Thus you * must be running on the processor that you wish to change. */ @@ -459,7 +459,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg) } /* copy out the Counters */ - if (copy_to_user((void __user *)arg, raddr, + if (copy_to_user((void __user *)arg, raddr, sizeof (raddr)) != 0) { error = -EFAULT; break; @@ -487,7 +487,7 @@ static const struct file_operations perf_fops = { .open = perf_open, .release = perf_release }; - + static struct miscdevice perf_dev = { MISC_DYNAMIC_MINOR, PA_PERF_DEV, @@ -595,7 +595,7 @@ static int perf_stop_counters(uint32_t *raddr) /* OR sticky2 (bit 1496) to counter2 bit 32 */ tmp64 |= (userbuf[23] >> 8) & 0x0000000080000000; raddr[2] = (uint32_t)tmp64; - + /* Counter3 is bits 1497 to 1528 */ tmp64 = (userbuf[23] >> 7) & 0x00000000ffffffff; /* OR sticky3 (bit 1529) to counter3 bit 32 */ @@ -617,7 +617,7 @@ static int perf_stop_counters(uint32_t *raddr) userbuf[22] = 0; userbuf[23] = 0; - /* + /* * Write back the zeroed bytes + the image given * the read was destructive. */ @@ -625,13 +625,13 @@ static int perf_stop_counters(uint32_t *raddr) } else { /* - * Read RDR-15 which contains the counters and sticky bits + * Read RDR-15 which contains the counters and sticky bits */ if (!perf_rdr_read_ubuf(15, userbuf)) { return -13; } - /* + /* * Clear out the counters */ perf_rdr_clear(15); @@ -644,7 +644,7 @@ static int perf_stop_counters(uint32_t *raddr) raddr[2] = (uint32_t)((userbuf[1] >> 32) & 0x00000000ffffffffUL); raddr[3] = (uint32_t)(userbuf[1] & 0x00000000ffffffffUL); } - + return 0; } @@ -682,7 +682,7 @@ static int perf_rdr_read_ubuf(uint32_t rdr_num, uint64_t *buffer) i = tentry->num_words; while (i--) { buffer[i] = 0; - } + } /* Check for bits an even number of 64 */ if ((xbits = width & 0x03f) != 0) { @@ -808,18 +808,22 @@ static int perf_write_image(uint64_t *memaddr) } runway = ioremap_nocache(cpu_device->hpa.start, 4096); + if (!runway) { + pr_err("perf_write_image: ioremap failed!\n"); + return -ENOMEM; + } /* Merge intrigue bits into Runway STATUS 0 */ tmp64 = __raw_readq(runway + RUNWAY_STATUS) & 0xffecfffffffffffful; - __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), + __raw_writeq(tmp64 | (*memaddr++ & 0x0013000000000000ul), runway + RUNWAY_STATUS); - + /* Write RUNWAY DEBUG registers */ for (i = 0; i < 8; i++) { __raw_writeq(*memaddr++, runway + RUNWAY_DEBUG); } - return 0; + return 0; } /* @@ -843,7 +847,7 @@ printk("perf_rdr_write\n"); perf_rdr_shift_out_U(rdr_num, buffer[i]); } else { perf_rdr_shift_out_W(rdr_num, buffer[i]); - } + } } printk("perf_rdr_write done\n"); } -- cgit v1.1 From 73580dac7618e4bcd21679f553cf3c97323fec46 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 18 Mar 2017 17:13:27 +0100 Subject: parisc: Fix system shutdown halt On those parisc machines which don't provide a software power off function, the system currently kills the init process at the end of a shutdown and unexpectedly restarts insteads of halting. Fix it by adding a loop which will not return. Signed-off-by: Helge Deller Cc: stable@vger.kernel.org # 4.9+ --- arch/parisc/kernel/process.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index 06f7ca7..b76f503 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -142,6 +142,8 @@ void machine_power_off(void) printk(KERN_EMERG "System shut down completed.\n" "Please power this system off now."); + + for (;;); } void (*pm_power_off)(void) = machine_power_off; -- cgit v1.1 From 0cdefd5b5485ee6eb3512a75739d09a4090176ed Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Sat, 18 Mar 2017 21:53:20 -0700 Subject: ARM: dts: sun7i: lamobo-r1: Fix CPU port RGMII settings The CPU port of the BCM53125 is configured with RGMII (no delays) but this should actually be RGMII with transmit delay (rgmii-txid) because STMMAC takes care of inserting the transmitter delay. This fixes occasional packet loss encountered. Fixes: d7b9eaff5f0c ("ARM: dts: sun7i: Add BCM53125 switch nodes to the lamobo-r1 board") Reported-by: Hartmut Knaack Signed-off-by: Florian Fainelli Signed-off-by: Maxime Ripard --- arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts index 72ec0d5..bbf1c8c 100644 --- a/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts +++ b/arch/arm/boot/dts/sun7i-a20-lamobo-r1.dts @@ -167,7 +167,7 @@ reg = <8>; label = "cpu"; ethernet = <&gmac>; - phy-mode = "rgmii"; + phy-mode = "rgmii-txid"; fixed-link { speed = <1000>; full-duplex; -- cgit v1.1 From 9693219aa61dc7a75ac015e5c011e889cb821eec Mon Sep 17 00:00:00 2001 From: Icenowy Zheng Date: Sat, 18 Mar 2017 05:23:15 +0800 Subject: ARM: sun8i: a23/a33: drop bl_en_pin GPIO pinmux in reference design DTSI The bl_en_pin GPIO pinmux is configured as "gpio_in", which makes it conflicts with the real GPIO usage (out), and makes the backlight not usable. Drop the GPIO pinmux for it, thus this GPIO can be correctly used. Signed-off-by: Icenowy Zheng Signed-off-by: Maxime Ripard --- arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi | 7 ------- 1 file changed, 7 deletions(-) (limited to 'arch') diff --git a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi index 7097c18..d6bd158 100644 --- a/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi +++ b/arch/arm/boot/dts/sun8i-reference-design-tablet.dtsi @@ -50,8 +50,6 @@ backlight: backlight { compatible = "pwm-backlight"; - pinctrl-names = "default"; - pinctrl-0 = <&bl_en_pin>; pwms = <&pwm 0 50000 PWM_POLARITY_INVERTED>; brightness-levels = <0 10 20 30 40 50 60 70 80 90 100>; default-brightness-level = <8>; @@ -93,11 +91,6 @@ }; &pio { - bl_en_pin: bl_en_pin@0 { - pins = "PH6"; - function = "gpio_in"; - }; - mmc0_cd_pin: mmc0_cd_pin@0 { pins = "PB4"; function = "gpio_in"; -- cgit v1.1 From 6d98ce0be541d4a3cfbb52cd75072c0339ebb500 Mon Sep 17 00:00:00 2001 From: Nicholas Piggin Date: Fri, 17 Mar 2017 15:13:20 +1000 Subject: powerpc/64s: Fix idle wakeup potential to clobber registers We concluded there may be a window where the idle wakeup code could get to pnv_wakeup_tb_loss() (which clobbers non-volatile GPRs), but the hardware may set SRR1[46:47] to 01b (no state loss) which would result in the wakeup code failing to restore non-volatile GPRs. I was not able to trigger this condition with trivial tests on real hardware or simulator, but the ISA (at least 2.07) seems to allow for it, and Gautham says that it can happen if there is an exception pending when the sleep/winkle instruction is executed. Fixes: 1706567117ba ("powerpc/kvm: make hypervisor state restore a function") Cc: stable@vger.kernel.org # v4.8+ Signed-off-by: Nicholas Piggin Acked-by: Gautham R. Shenoy Signed-off-by: Michael Ellerman --- arch/powerpc/kernel/idle_book3s.S | 20 +++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/idle_book3s.S b/arch/powerpc/kernel/idle_book3s.S index 9957287..6fd0821 100644 --- a/arch/powerpc/kernel/idle_book3s.S +++ b/arch/powerpc/kernel/idle_book3s.S @@ -449,9 +449,23 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300) _GLOBAL(pnv_wakeup_tb_loss) ld r1,PACAR1(r13) /* - * Before entering any idle state, the NVGPRs are saved in the stack - * and they are restored before switching to the process context. Hence - * until they are restored, they are free to be used. + * Before entering any idle state, the NVGPRs are saved in the stack. + * If there was a state loss, or PACA_NAPSTATELOST was set, then the + * NVGPRs are restored. If we are here, it is likely that state is lost, + * but not guaranteed -- neither ISA207 nor ISA300 tests to reach + * here are the same as the test to restore NVGPRS: + * PACA_THREAD_IDLE_STATE test for ISA207, PSSCR test for ISA300, + * and SRR1 test for restoring NVGPRs. + * + * We are about to clobber NVGPRs now, so set NAPSTATELOST to + * guarantee they will always be restored. This might be tightened + * with careful reading of specs (particularly for ISA300) but this + * is already a slow wakeup path and it's simpler to be safe. + */ + li r0,1 + stb r0,PACA_NAPSTATELOST(r13) + + /* * * Save SRR1 and LR in NVGPRs as they might be clobbered in * opal_call() (called in CHECK_HMI_INTERRUPT). SRR1 is required -- cgit v1.1 From 066def56dc712329561abadcea15be9cad7393b6 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 2 Jan 2017 13:51:43 +0100 Subject: m68k/bitops: Correct signature of test_bit() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit mm/filemap.c: In function ‘clear_bit_unlock_is_negative_byte’: mm/filemap.c:933: warning: passing argument 2 of ‘test_bit’ discards qualifiers from pointer target type Make the bitmask pointed to by the "vaddr" parameter volatile to fix this, like is done on other architectures. Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/bitops.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/bitops.h b/arch/m68k/include/asm/bitops.h index b4a9b0d..dda58cf 100644 --- a/arch/m68k/include/asm/bitops.h +++ b/arch/m68k/include/asm/bitops.h @@ -148,7 +148,7 @@ static inline void bfchg_mem_change_bit(int nr, volatile unsigned long *vaddr) #define __change_bit(nr, vaddr) change_bit(nr, vaddr) -static inline int test_bit(int nr, const unsigned long *vaddr) +static inline int test_bit(int nr, const volatile unsigned long *vaddr) { return (vaddr[nr >> 5] & (1UL << (nr & 31))) != 0; } -- cgit v1.1 From 3820ed470e71d7aa3d355495f7fd1cd8457e7c96 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 6 Mar 2017 10:40:02 +0100 Subject: m68k/defconfig: Update defconfigs for v4.11-rc1 Signed-off-by: Geert Uytterhoeven --- arch/m68k/configs/amiga_defconfig | 14 +++++++++++++- arch/m68k/configs/apollo_defconfig | 14 +++++++++++++- arch/m68k/configs/atari_defconfig | 14 +++++++++++++- arch/m68k/configs/bvme6000_defconfig | 14 +++++++++++++- arch/m68k/configs/hp300_defconfig | 14 +++++++++++++- arch/m68k/configs/mac_defconfig | 14 +++++++++++++- arch/m68k/configs/multi_defconfig | 14 +++++++++++++- arch/m68k/configs/mvme147_defconfig | 14 +++++++++++++- arch/m68k/configs/mvme16x_defconfig | 14 +++++++++++++- arch/m68k/configs/q40_defconfig | 14 +++++++++++++- arch/m68k/configs/sun3_defconfig | 14 +++++++++++++- arch/m68k/configs/sun3x_defconfig | 14 +++++++++++++- 12 files changed, 156 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig index 048bf07..531cb9e 100644 --- a/arch/m68k/configs/amiga_defconfig +++ b/arch/m68k/configs/amiga_defconfig @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -60,6 +61,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -71,6 +73,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -101,6 +104,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -371,6 +377,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -383,6 +390,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_A2065=y CONFIG_ARIADNE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -404,7 +412,6 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -564,6 +571,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -594,6 +603,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -605,6 +615,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -629,4 +640,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig index d4de249..ca91d39 100644 --- a/arch/m68k/configs/apollo_defconfig +++ b/arch/m68k/configs/apollo_defconfig @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -69,6 +71,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -362,6 +369,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -378,7 +386,6 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -523,6 +530,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -553,6 +562,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -564,6 +574,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -588,4 +599,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig index fc0fd3f..23a3d8a 100644 --- a/arch/m68k/configs/atari_defconfig +++ b/arch/m68k/configs/atari_defconfig @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -69,6 +71,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -362,6 +368,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -372,6 +379,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_ATARILANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -389,7 +397,6 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -544,6 +551,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -574,6 +583,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -585,6 +595,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -609,4 +620,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig index 52e984a..95deb95 100644 --- a/arch/m68k/configs/bvme6000_defconfig +++ b/arch/m68k/configs/bvme6000_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68040=y @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -67,6 +69,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -377,7 +385,6 @@ CONFIG_BVME6000_NET=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig index aaeed44..afae695 100644 --- a/arch/m68k/configs/hp300_defconfig +++ b/arch/m68k/configs/hp300_defconfig @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -58,6 +59,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -69,6 +71,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -99,6 +102,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -296,6 +300,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -353,6 +359,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -363,6 +370,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_HPLANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -379,7 +387,6 @@ CONFIG_HPLANCE=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -525,6 +532,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -555,6 +564,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -566,6 +576,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -590,4 +601,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig index 3bbc9b2..b010734 100644 --- a/arch/m68k/configs/mac_defconfig +++ b/arch/m68k/configs/mac_defconfig @@ -25,6 +25,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -57,6 +58,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -68,6 +70,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -98,6 +101,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -298,6 +302,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -369,6 +375,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -379,6 +386,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -398,7 +406,6 @@ CONFIG_MAC8390=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -547,6 +554,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -577,6 +586,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -588,6 +598,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -612,4 +623,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig index 8f2c0de..0e41454 100644 --- a/arch/m68k/configs/multi_defconfig +++ b/arch/m68k/configs/multi_defconfig @@ -21,6 +21,7 @@ CONFIG_SOLARIS_X86_PARTITION=y CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68020=y @@ -67,6 +68,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -78,6 +80,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -108,6 +111,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -308,6 +312,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -402,6 +408,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -419,6 +426,7 @@ CONFIG_HPLANCE=y CONFIG_MVME147_NET=y CONFIG_SUN3LANCE=y CONFIG_MACMACE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -444,7 +452,6 @@ CONFIG_ZORRO8390=y # CONFIG_NET_VENDOR_SOLARFLARE is not set CONFIG_SMC91X=y # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -627,6 +634,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -657,6 +666,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -668,6 +678,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -692,4 +703,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig index c743dd2..b2e687a 100644 --- a/arch/m68k/configs/mvme147_defconfig +++ b/arch/m68k/configs/mvme147_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68030=y @@ -55,6 +56,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -66,6 +68,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -96,6 +99,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -293,6 +297,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -351,6 +357,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -361,6 +368,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_MVME147_NET=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -377,7 +385,6 @@ CONFIG_MVME147_NET=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig index 2ccaca8..cbd8ee2 100644 --- a/arch/m68k/configs/mvme16x_defconfig +++ b/arch/m68k/configs/mvme16x_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68040=y @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -67,6 +69,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -352,6 +358,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -361,6 +368,7 @@ CONFIG_NETCONSOLE_DYNAMIC=y CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -377,7 +385,6 @@ CONFIG_MVME16x_NET=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -515,6 +522,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -545,6 +554,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -556,6 +566,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -580,4 +591,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig index 5599f3f..1e82cc9 100644 --- a/arch/m68k/configs/q40_defconfig +++ b/arch/m68k/configs/q40_defconfig @@ -26,6 +26,7 @@ CONFIG_SUN_PARTITION=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_M68040=y @@ -56,6 +57,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -67,6 +69,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -97,6 +100,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -294,6 +298,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -358,6 +364,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -369,6 +376,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set # CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -388,7 +396,6 @@ CONFIG_NE2000=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_SMSC is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PLIP=m @@ -538,6 +545,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -568,6 +577,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -579,6 +589,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -603,4 +614,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig index 313bf0a..f9e77f5 100644 --- a/arch/m68k/configs/sun3_defconfig +++ b/arch/m68k/configs/sun3_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_SUN3=y @@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -64,6 +66,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -359,6 +366,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_EZCHIP is not set @@ -375,7 +383,6 @@ CONFIG_SUN3_82586=y # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set # CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -546,6 +555,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -557,6 +567,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -581,4 +592,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig index 38b6136..3c394fc 100644 --- a/arch/m68k/configs/sun3x_defconfig +++ b/arch/m68k/configs/sun3x_defconfig @@ -25,6 +25,7 @@ CONFIG_UNIXWARE_DISKLABEL=y # CONFIG_EFI_PARTITION is not set CONFIG_SYSV68_PARTITION=y CONFIG_IOSCHED_DEADLINE=m +CONFIG_MQ_IOSCHED_DEADLINE=m CONFIG_KEXEC=y CONFIG_BOOTINFO_PROC=y CONFIG_SUN3X=y @@ -53,6 +54,7 @@ CONFIG_NET_IPVTI=m CONFIG_NET_FOU_IP_TUNNELS=y CONFIG_INET_AH=m CONFIG_INET_ESP=m +CONFIG_INET_ESP_OFFLOAD=m CONFIG_INET_IPCOMP=m CONFIG_INET_XFRM_MODE_TRANSPORT=m CONFIG_INET_XFRM_MODE_TUNNEL=m @@ -64,6 +66,7 @@ CONFIG_IPV6=m CONFIG_IPV6_ROUTER_PREF=y CONFIG_INET6_AH=m CONFIG_INET6_ESP=m +CONFIG_INET6_ESP_OFFLOAD=m CONFIG_INET6_IPCOMP=m CONFIG_IPV6_ILA=m CONFIG_IPV6_VTI=m @@ -94,6 +97,7 @@ CONFIG_NFT_NUMGEN=m CONFIG_NFT_CT=m CONFIG_NFT_SET_RBTREE=m CONFIG_NFT_SET_HASH=m +CONFIG_NFT_SET_BITMAP=m CONFIG_NFT_COUNTER=m CONFIG_NFT_LOG=m CONFIG_NFT_LIMIT=m @@ -291,6 +295,8 @@ CONFIG_MPLS_IPTUNNEL=m CONFIG_NET_L3_MASTER_DEV=y CONFIG_AF_KCM=m # CONFIG_WIRELESS is not set +CONFIG_PSAMPLE=m +CONFIG_NET_IFE=m CONFIG_NET_DEVLINK=m # CONFIG_UEVENT_HELPER is not set CONFIG_DEVTMPFS=y @@ -349,6 +355,7 @@ CONFIG_NET_TEAM_MODE_LOADBALANCE=m CONFIG_MACVLAN=m CONFIG_MACVTAP=m CONFIG_IPVLAN=m +CONFIG_IPVTAP=m CONFIG_VXLAN=m CONFIG_GENEVE=m CONFIG_GTP=m @@ -359,6 +366,7 @@ CONFIG_VETH=m # CONFIG_NET_VENDOR_ALACRITECH is not set # CONFIG_NET_VENDOR_AMAZON is not set CONFIG_SUN3LANCE=y +# CONFIG_NET_VENDOR_AQUANTIA is not set # CONFIG_NET_VENDOR_ARC is not set # CONFIG_NET_CADENCE is not set # CONFIG_NET_VENDOR_BROADCOM is not set @@ -375,7 +383,6 @@ CONFIG_SUN3LANCE=y # CONFIG_NET_VENDOR_SEEQ is not set # CONFIG_NET_VENDOR_SOLARFLARE is not set # CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SYNOPSYS is not set # CONFIG_NET_VENDOR_VIA is not set # CONFIG_NET_VENDOR_WIZNET is not set CONFIG_PPP=m @@ -517,6 +524,8 @@ CONFIG_NLS_MAC_TURKISH=m CONFIG_DLM=m # CONFIG_SECTION_MISMATCH_WARN_ONLY is not set CONFIG_MAGIC_SYSRQ=y +CONFIG_WW_MUTEX_SELFTEST=m +CONFIG_ATOMIC64_SELFTEST=m CONFIG_ASYNC_RAID6_TEST=m CONFIG_TEST_HEXDUMP=m CONFIG_TEST_STRING_HELPERS=m @@ -547,6 +556,7 @@ CONFIG_CRYPTO_CHACHA20POLY1305=m CONFIG_CRYPTO_LRW=m CONFIG_CRYPTO_PCBC=m CONFIG_CRYPTO_KEYWRAP=m +CONFIG_CRYPTO_CMAC=m CONFIG_CRYPTO_XCBC=m CONFIG_CRYPTO_VMAC=m CONFIG_CRYPTO_MICHAEL_MIC=m @@ -558,6 +568,7 @@ CONFIG_CRYPTO_SHA512=m CONFIG_CRYPTO_SHA3=m CONFIG_CRYPTO_TGR192=m CONFIG_CRYPTO_WP512=m +CONFIG_CRYPTO_AES_TI=m CONFIG_CRYPTO_ANUBIS=m CONFIG_CRYPTO_BLOWFISH=m CONFIG_CRYPTO_CAMELLIA=m @@ -582,4 +593,5 @@ CONFIG_CRYPTO_USER_API_SKCIPHER=m CONFIG_CRYPTO_USER_API_RNG=m CONFIG_CRYPTO_USER_API_AEAD=m # CONFIG_CRYPTO_HW is not set +CONFIG_CRC32_SELFTEST=m CONFIG_XZ_DEC_TEST=m -- cgit v1.1 From e3b1ebd673876f2cdb0b84205e52a33b94a9860f Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 6 Mar 2017 11:04:22 +0100 Subject: m68k: Wire up statx Signed-off-by: Geert Uytterhoeven --- arch/m68k/include/asm/unistd.h | 2 +- arch/m68k/include/uapi/asm/unistd.h | 1 + arch/m68k/kernel/syscalltable.S | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index a857d82..aab1edd 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include -#define NR_syscalls 379 +#define NR_syscalls 380 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 9fe674bf..25589f5 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -384,5 +384,6 @@ #define __NR_copy_file_range 376 #define __NR_preadv2 377 #define __NR_pwritev2 378 +#define __NR_statx 379 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index d6fd6d9..8c9fcfa 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -399,3 +399,4 @@ ENTRY(sys_call_table) .long sys_copy_file_range .long sys_preadv2 .long sys_pwritev2 + .long sys_statx -- cgit v1.1 From 6c6c5e0311c83ffe75e14260fb83e05e21e1d488 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radim=20Kr=C4=8Dm=C3=A1=C5=99?= Date: Fri, 13 Jan 2017 18:59:04 +0100 Subject: KVM: VMX: downgrade warning on unexpected exit code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We never needed the call trace and we better rate-limit if it can be triggered by a guest. Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 98e82ee..e7ec8896 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -8501,7 +8501,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) && kvm_vmx_exit_handlers[exit_reason]) return kvm_vmx_exit_handlers[exit_reason](vcpu); else { - WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason); + vcpu_unimpl(vcpu, "vmx: unexpected exit reason 0x%x\n", + exit_reason); kvm_queue_exception(vcpu, UD_VECTOR); return 1; } -- cgit v1.1 From 3863dff0c3dd72984395c93b12383b393c5c3989 Mon Sep 17 00:00:00 2001 From: Dmitry Vyukov Date: Tue, 24 Jan 2017 14:06:48 +0100 Subject: kvm: fix usage of uninit spinlock in avic_vm_destroy() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If avic is not enabled, avic_vm_init() does nothing and returns early. However, avic_vm_destroy() still tries to destroy what hasn't been created. The only bad consequence of this now is that avic_vm_destroy() uses svm_vm_data_hash_lock that hasn't been initialized (and is not meant to be used at all if avic is not enabled). Return early from avic_vm_destroy() if avic is not enabled. It has nothing to destroy. Signed-off-by: Dmitry Vyukov Cc: Joerg Roedel Cc: Paolo Bonzini Cc: "Radim Krčmář" Cc: David Hildenbrand Cc: kvm@vger.kernel.org Cc: syzkaller@googlegroups.com Reviewed-by: David Hildenbrand Signed-off-by: Radim Krčmář --- arch/x86/kvm/svm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index d1efe2c..5fba706 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -1379,6 +1379,9 @@ static void avic_vm_destroy(struct kvm *kvm) unsigned long flags; struct kvm_arch *vm_data = &kvm->arch; + if (!avic) + return; + avic_free_vm_id(vm_data->avic_vm_id); if (vm_data->avic_logical_id_table_page) -- cgit v1.1 From 6d1b3ad2cd87150fc89bad2331beab173a8ad24d Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Sun, 12 Mar 2017 00:53:52 -0800 Subject: KVM: nVMX: don't reset kvm mmu twice MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kvm mmu is reset once successfully loading CR3 as part of emulating vmentry in nested_vmx_load_cr3(). We should not reset kvm mmu twice. Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 2 -- 1 file changed, 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e7ec8896..c664365 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -10287,8 +10287,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, entry_failure_code)) return 1; - kvm_mmu_reset_context(vcpu); - if (!enable_ept) vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested; -- cgit v1.1 From fc36a903265c18d124cefaba364a7fa71b21be61 Mon Sep 17 00:00:00 2001 From: Paul Mackerras Date: Tue, 21 Mar 2017 12:38:02 +1100 Subject: Revert "powerpc/64: Disable use of radix under a hypervisor" This reverts commit 3f91a89d424a79f8082525db5a375e438887bb3e. Now that we do have the machinery for using the radix MMU under a hypervisor, the extra check and comment introduced in 3f91a89d424a are no longer correct. The result is that when booted under a hypervisor that only allows use of radix, we clear the MMU_FTR_TYPE_RADIX and then set it again, and print a warning about ignoring the disable_radix command line option, even though the command line does not include "disable_radix". Signed-off-by: Paul Mackerras Signed-off-by: Michael Ellerman --- arch/powerpc/mm/init_64.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index 9be9920..c22f207 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -397,8 +397,7 @@ static void early_check_vec5(void) void __init mmu_early_init_devtree(void) { /* Disable radix mode based on kernel command line. */ - /* We don't yet have the machinery to do radix as a guest. */ - if (disable_radix || !(mfmsr() & MSR_HV)) + if (disable_radix) cur_cpu_spec->mmu_features &= ~MMU_FTR_TYPE_RADIX; /* -- cgit v1.1 From 713cc9df6473f0cc8d699987d990482d432c0679 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 21 Mar 2017 18:04:26 +0000 Subject: arm64: compat: Update compat syscalls Hook up three pkey syscalls (which we don't implement) and the new statx syscall, as has been done for arch/arm/. Signed-off-by: Will Deacon --- arch/arm64/include/asm/unistd.h | 2 +- arch/arm64/include/asm/unistd32.h | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index e78ac26..bdbeb06 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 394 +#define __NR_compat_syscalls 398 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index b7e8ef1..c66b51a 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -809,6 +809,14 @@ __SYSCALL(__NR_copy_file_range, sys_copy_file_range) __SYSCALL(__NR_preadv2, compat_sys_preadv2) #define __NR_pwritev2 393 __SYSCALL(__NR_pwritev2, compat_sys_pwritev2) +#define __NR_pkey_mprotect 394 +__SYSCALL(__NR_pkey_mprotect, sys_pkey_mprotect) +#define __NR_pkey_alloc 395 +__SYSCALL(__NR_pkey_alloc, sys_pkey_alloc) +#define __NR_pkey_free 396 +__SYSCALL(__NR_pkey_free, sys_pkey_free) +#define __NR_statx 397 +__SYSCALL(__NR_statx, sys_statx) /* * Please add new compat syscalls above this comment and update -- cgit v1.1 From afd0e5a876703accb95894f23317a13e2c49b523 Mon Sep 17 00:00:00 2001 From: Neeraj Upadhyay Date: Wed, 22 Mar 2017 17:08:25 +0530 Subject: arm64: kaslr: Fix up the kernel image alignment If kernel image extends across alignment boundary, existing code increases the KASLR offset by size of kernel image. The offset is masked after resizing. There are cases, where after masking, we may still have kernel image extending across boundary. This eventually results in only 2MB block getting mapped while creating the page tables. This results in data aborts while accessing unmapped regions during second relocation (with kaslr offset) in __primary_switch. To fix this problem, round up the kernel image size, by swapper block size, before adding it for correction. For example consider below case, where kernel image still crosses 1GB alignment boundary, after masking the offset, which is fixed by rounding up kernel image size. SWAPPER_TABLE_SHIFT = 30 Swapper using section maps with section size 2MB. CONFIG_PGTABLE_LEVELS = 3 VA_BITS = 39 _text : 0xffffff8008080000 _end : 0xffffff800aa1b000 offset : 0x1f35600000 mask = ((1UL << (VA_BITS - 2)) - 1) & ~(SZ_2M - 1) (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7c (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d offset after existing correction (before mask) = 0x1f37f9b000 (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d offset (after mask) = 0x1f37e00000 (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7c (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d new offset w/ rounding up = 0x1f38000000 (_text + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d (_end + offset) >> SWAPPER_TABLE_SHIFT = 0x3fffffe7d Fixes: f80fb3a3d508 ("arm64: add support for kernel ASLR") Cc: Reviewed-by: Ard Biesheuvel Signed-off-by: Neeraj Upadhyay Signed-off-by: Srinivas Ramana Signed-off-by: Will Deacon --- arch/arm64/kernel/kaslr.c | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c index 769f24e..d7e90d9 100644 --- a/arch/arm64/kernel/kaslr.c +++ b/arch/arm64/kernel/kaslr.c @@ -131,11 +131,15 @@ u64 __init kaslr_early_init(u64 dt_phys, u64 modulo_offset) /* * The kernel Image should not extend across a 1GB/32MB/512MB alignment * boundary (for 4KB/16KB/64KB granule kernels, respectively). If this - * happens, increase the KASLR offset by the size of the kernel image. + * happens, increase the KASLR offset by the size of the kernel image + * rounded up by SWAPPER_BLOCK_SIZE. */ if ((((u64)_text + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT) != - (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) - offset = (offset + (u64)(_end - _text)) & mask; + (((u64)_end + offset + modulo_offset) >> SWAPPER_TABLE_SHIFT)) { + u64 kimg_sz = _end - _text; + offset = (offset + round_up(kimg_sz, SWAPPER_BLOCK_SIZE)) + & mask; + } if (IS_ENABLED(CONFIG_KASAN)) /* -- cgit v1.1 From f0c0cb99f74c03e2407ea553f6d46eb611e262b5 Mon Sep 17 00:00:00 2001 From: Jon Mason Date: Tue, 21 Mar 2017 16:51:19 -0400 Subject: arm64: dts: NS2: Add dma-coherent to relevant DT entries Cache related issues with DMA rings and performance issues related to caching are being caused by not properly setting the "dma-coherent" flag in the device tree entries. Adding it here to correct the issue. Signed-off-by: Jon Mason Fixes: fd5e5dd56 ("arm64: dts: Add PCIe0 and PCIe4 DT nodes for NS2") Fixes: dddc3c9d7 ("arm64: dts: NS2: add AMAC ethernet support") Fixes: e79249143 ("arm64: dts: Add Broadcom Northstar2 device tree entries for PDC driver") Fixes: ac9aae00f ("arm64: dts: Add SATA3 AHCI and SATA3 PHY DT nodes for NS2") Fixes: efc877676 ("arm64: dts: Add SDHCI DT node for NS2") Signed-off-by: Florian Fainelli --- arch/arm64/boot/dts/broadcom/ns2.dtsi | 11 +++++++++++ 1 file changed, 11 insertions(+) (limited to 'arch') diff --git a/arch/arm64/boot/dts/broadcom/ns2.dtsi b/arch/arm64/boot/dts/broadcom/ns2.dtsi index 9f9e203..bcb03fc 100644 --- a/arch/arm64/boot/dts/broadcom/ns2.dtsi +++ b/arch/arm64/boot/dts/broadcom/ns2.dtsi @@ -114,6 +114,7 @@ pcie0: pcie@20020000 { compatible = "brcm,iproc-pcie"; reg = <0 0x20020000 0 0x1000>; + dma-coherent; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; @@ -144,6 +145,7 @@ pcie4: pcie@50020000 { compatible = "brcm,iproc-pcie"; reg = <0 0x50020000 0 0x1000>; + dma-coherent; #interrupt-cells = <1>; interrupt-map-mask = <0 0 0 0>; @@ -174,6 +176,7 @@ pcie8: pcie@60c00000 { compatible = "brcm,iproc-pcie-paxc"; reg = <0 0x60c00000 0 0x1000>; + dma-coherent; linux,pci-domain = <8>; bus-range = <0x0 0x1>; @@ -203,6 +206,7 @@ <0x61030000 0x100>; reg-names = "amac_base", "idm_base", "nicpm_base"; interrupts = ; + dma-coherent; phy-handle = <&gphy0>; phy-mode = "rgmii"; status = "disabled"; @@ -213,6 +217,7 @@ reg = <0x612c0000 0x445>; /* PDC FS0 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -222,6 +227,7 @@ reg = <0x612e0000 0x445>; /* PDC FS1 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -231,6 +237,7 @@ reg = <0x61300000 0x445>; /* PDC FS2 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -240,6 +247,7 @@ reg = <0x61320000 0x445>; /* PDC FS3 regs */ interrupts = ; #mbox-cells = <1>; + dma-coherent; brcm,rx-status-len = <32>; brcm,use-bcm-hdr; }; @@ -644,6 +652,7 @@ sata: ahci@663f2000 { compatible = "brcm,iproc-ahci", "generic-ahci"; reg = <0x663f2000 0x1000>; + dma-coherent; reg-names = "ahci"; interrupts = ; #address-cells = <1>; @@ -667,6 +676,7 @@ compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66420000 0x100>; interrupts = ; + dma-coherent; bus-width = <8>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; status = "disabled"; @@ -676,6 +686,7 @@ compatible = "brcm,sdhci-iproc-cygnus"; reg = <0x66430000 0x100>; interrupts = ; + dma-coherent; bus-width = <8>; clocks = <&genpll_sw BCM_NS2_GENPLL_SW_SDIO_CLK>; status = "disabled"; -- cgit v1.1 From 950712eb8ef03e4a62ac5b3067efde7ec2f8a91e Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 15 Mar 2017 16:01:18 +0800 Subject: KVM: x86: check existance before destroy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Mostly used for split irqchip mode. In that case, these two things are not inited at all, so no need to release. Signed-off-by: Peter Xu Signed-off-by: Radim Krčmář --- arch/x86/kvm/i8259.c | 3 +++ arch/x86/kvm/ioapic.c | 3 +++ 2 files changed, 6 insertions(+) (limited to 'arch') diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 73ea24d..047b17a 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c @@ -657,6 +657,9 @@ void kvm_pic_destroy(struct kvm *kvm) { struct kvm_pic *vpic = kvm->arch.vpic; + if (!vpic) + return; + kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_master); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_slave); kvm_io_bus_unregister_dev(vpic->kvm, KVM_PIO_BUS, &vpic->dev_eclr); diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c index 6e219e5..289270a 100644 --- a/arch/x86/kvm/ioapic.c +++ b/arch/x86/kvm/ioapic.c @@ -635,6 +635,9 @@ void kvm_ioapic_destroy(struct kvm *kvm) { struct kvm_ioapic *ioapic = kvm->arch.vioapic; + if (!ioapic) + return; + cancel_delayed_work_sync(&ioapic->eoi_inject); kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev); kvm->arch.vioapic = NULL; -- cgit v1.1 From c761159cf81b8641290f7559a8d8e30f6ab92669 Mon Sep 17 00:00:00 2001 From: Peter Xu Date: Wed, 15 Mar 2017 16:01:19 +0800 Subject: KVM: x86: use pic/ioapic destructor when destroy vm MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We have specific destructors for pic/ioapic, we'd better use them when destroying the VM as well. Signed-off-by: Peter Xu Signed-off-by: Radim Krčmář --- arch/x86/kvm/x86.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1faf620..d30ff49 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8153,8 +8153,8 @@ void kvm_arch_destroy_vm(struct kvm *kvm) if (kvm_x86_ops->vm_destroy) kvm_x86_ops->vm_destroy(kvm); kvm_iommu_unmap_guest(kvm); - kfree(kvm->arch.vpic); - kfree(kvm->arch.vioapic); + kvm_pic_destroy(kvm); + kvm_ioapic_destroy(kvm); kvm_free_vcpus(kvm); kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kvm_mmu_uninit_vm(kvm); -- cgit v1.1 From fb6c8198431311027c3434d4e94ab8bc040f7aea Mon Sep 17 00:00:00 2001 From: Jim Mattson Date: Thu, 16 Mar 2017 13:53:59 -0700 Subject: kvm: vmx: Flush TLB when the APIC-access address changes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Quoting from the Intel SDM, volume 3, section 28.3.3.4: Guidelines for Use of the INVEPT Instruction: If EPT was in use on a logical processor at one time with EPTP X, it is recommended that software use the INVEPT instruction with the "single-context" INVEPT type and with EPTP X in the INVEPT descriptor before a VM entry on the same logical processor that enables EPT with EPTP X and either (a) the "virtualize APIC accesses" VM-execution control was changed from 0 to 1; or (b) the value of the APIC-access address was changed. In the nested case, the burden falls on L1, unless L0 enables EPT in vmcs02 when L1 doesn't enable EPT in vmcs12. Signed-off-by: Jim Mattson Signed-off-by: Radim Krčmář --- arch/x86/kvm/vmx.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c664365..e2f6082 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -4024,6 +4024,12 @@ static void vmx_flush_tlb(struct kvm_vcpu *vcpu) __vmx_flush_tlb(vcpu, to_vmx(vcpu)->vpid); } +static void vmx_flush_tlb_ept_only(struct kvm_vcpu *vcpu) +{ + if (enable_ept) + vmx_flush_tlb(vcpu); +} + static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu) { ulong cr0_guest_owned_bits = vcpu->arch.cr0_guest_owned_bits; @@ -8548,6 +8554,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set) } else { sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; + vmx_flush_tlb_ept_only(vcpu); } vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control); @@ -8573,8 +8580,10 @@ static void vmx_set_apic_access_page_addr(struct kvm_vcpu *vcpu, hpa_t hpa) */ if (!is_guest_mode(vcpu) || !nested_cpu_has2(get_vmcs12(&vmx->vcpu), - SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { vmcs_write64(APIC_ACCESS_ADDR, hpa); + vmx_flush_tlb_ept_only(vcpu); + } } static void vmx_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr) @@ -10256,6 +10265,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, if (nested_cpu_has_ept(vmcs12)) { kvm_mmu_unload(vcpu); nested_ept_init_mmu_context(vcpu); + } else if (nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + vmx_flush_tlb_ept_only(vcpu); } /* @@ -11055,6 +11067,10 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, vmx->nested.change_vmcs01_virtual_x2apic_mode = false; vmx_set_virtual_x2apic_mode(vcpu, vcpu->arch.apic_base & X2APIC_ENABLE); + } else if (!nested_cpu_has_ept(vmcs12) && + nested_cpu_has2(vmcs12, + SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) { + vmx_flush_tlb_ept_only(vcpu); } /* This is needed for same reason as it was needed in prepare_vmcs02 */ -- cgit v1.1 From 24dccf83a121b8a4ad5c2ad383a8184ef6c266ee Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 20 Mar 2017 21:18:55 -0700 Subject: KVM: x86: correct async page present tracepoint MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit After async pf setup successfully, there is a broadcast wakeup w/ special token 0xffffffff which tells vCPU that it should wake up all processes waiting for APFs though there is no real process waiting at the moment. The async page present tracepoint print prematurely and fails to catch the special token setup. This patch fixes it by moving the async page present tracepoint after the special token setup. Before patch: qemu-system-x86-8499 [006] ...1 5973.473292: kvm_async_pf_ready: token 0x0 gva 0x0 After patch: qemu-system-x86-8499 [006] ...1 5973.473292: kvm_async_pf_ready: token 0xffffffff gva 0x0 Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/x86.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d30ff49..64697fe 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8566,11 +8566,11 @@ void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, { struct x86_exception fault; - trace_kvm_async_pf_ready(work->arch.token, work->gva); if (work->wakeup_all) work->arch.token = ~0; /* broadcast wakeup */ else kvm_del_async_pf_gfn(vcpu, work->arch.gfn); + trace_kvm_async_pf_ready(work->arch.token, work->gva); if ((vcpu->arch.apf.msr_val & KVM_ASYNC_PF_ENABLED) && !apf_put_user(vcpu, KVM_PV_REASON_PAGE_READY)) { -- cgit v1.1 From 63cb6d5f004ca44f9b8e562b6dd191f717a4960e Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Mon, 20 Mar 2017 21:18:53 -0700 Subject: KVM: nVMX: Fix nested VPID vmx exec control MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can be reproduced by running kvm-unit-tests/vmx.flat on L0 w/ vpid disabled. Test suite: VPID Unhandled exception 6 #UD at ip 00000000004051a6 error_code=0000 rflags=00010047 cs=00000008 rax=0000000000000000 rcx=0000000000000001 rdx=0000000000000047 rbx=0000000000402f79 rbp=0000000000456240 rsi=0000000000000001 rdi=0000000000000000 r8=000000000000000a r9=00000000000003f8 r10=0000000080010011 r11=0000000000000000 r12=0000000000000003 r13=0000000000000708 r14=0000000000000000 r15=0000000000000000 cr0=0000000080010031 cr2=0000000000000000 cr3=0000000007fff000 cr4=0000000000002020 cr8=0000000000000000 STACK: @4051a6 40523e 400f7f 402059 40028f We should hide and forbid VPID in L1 if it is disabled on L0. However, nested VPID enable bit is set unconditionally during setup nested vmx exec controls though VPID is not exposed through nested VMX capablity. This patch fixes it by don't set nested VPID enable bit if it is disabled on L0. Cc: Paolo Bonzini Cc: Radim Krčmář Cc: stable@vger.kernel.org Fixes: 5c614b3583e (KVM: nVMX: nested VPID emulation) Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index e2f6082..5a82766 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -2753,7 +2753,6 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) SECONDARY_EXEC_RDTSCP | SECONDARY_EXEC_DESC | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | - SECONDARY_EXEC_ENABLE_VPID | SECONDARY_EXEC_APIC_REGISTER_VIRT | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | SECONDARY_EXEC_WBINVD_EXITING | @@ -2781,10 +2780,12 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) * though it is treated as global context. The alternative is * not failing the single-context invvpid, and it is worse. */ - if (enable_vpid) + if (enable_vpid) { + vmx->nested.nested_vmx_secondary_ctls_high |= + SECONDARY_EXEC_ENABLE_VPID; vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | VMX_VPID_EXTENT_SUPPORTED_MASK; - else + } else vmx->nested.nested_vmx_vpid_caps = 0; if (enable_unrestricted_guest) -- cgit v1.1 From 08d839c4b134b8328ec42f2157a9ca4b93227c03 Mon Sep 17 00:00:00 2001 From: Wanpeng Li Date: Thu, 23 Mar 2017 05:30:08 -0700 Subject: KVM: VMX: Fix enable VPID conditions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This can be reproduced by running L2 on L1, and disable VPID on L0 if w/o commit "KVM: nVMX: Fix nested VPID vmx exec control", the L2 crash as below: KVM: entry failed, hardware error 0x7 EAX=00000000 EBX=00000000 ECX=00000000 EDX=000306c3 ESI=00000000 EDI=00000000 EBP=00000000 ESP=00000000 EIP=0000fff0 EFL=00000002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0 ES =0000 00000000 0000ffff 00009300 CS =f000 ffff0000 0000ffff 00009b00 SS =0000 00000000 0000ffff 00009300 DS =0000 00000000 0000ffff 00009300 FS =0000 00000000 0000ffff 00009300 GS =0000 00000000 0000ffff 00009300 LDT=0000 00000000 0000ffff 00008200 TR =0000 00000000 0000ffff 00008b00 GDT= 00000000 0000ffff IDT= 00000000 0000ffff CR0=60000010 CR2=00000000 CR3=00000000 CR4=00000000 DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000 DR6=00000000ffff0ff0 DR7=0000000000000400 EFER=0000000000000000 Reference SDM 30.3 INVVPID: Protected Mode Exceptions - #UD - If not in VMX operation. - If the logical processor does not support VPIDs (IA32_VMX_PROCBASED_CTLS2[37]=0). - If the logical processor supports VPIDs (IA32_VMX_PROCBASED_CTLS2[37]=1) but does not support the INVVPID instruction (IA32_VMX_EPT_VPID_CAP[32]=0). So we should check both VPID enable bit in vmx exec control and INVVPID support bit in vmx capability MSRs to enable VPID. This patch adds the guarantee to not enable VPID if either INVVPID or single-context/all-context invalidation is not exposed in vmx capability MSRs. Reviewed-by: David Hildenbrand Reviewed-by: Jim Mattson Cc: Paolo Bonzini Cc: Radim Krčmář Signed-off-by: Wanpeng Li Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 5a82766..cd1ba62 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1239,6 +1239,11 @@ static inline bool cpu_has_vmx_invvpid_global(void) return vmx_capability.vpid & VMX_VPID_EXTENT_GLOBAL_CONTEXT_BIT; } +static inline bool cpu_has_vmx_invvpid(void) +{ + return vmx_capability.vpid & VMX_VPID_INVVPID_BIT; +} + static inline bool cpu_has_vmx_ept(void) { return vmcs_config.cpu_based_2nd_exec_ctrl & @@ -6524,8 +6529,10 @@ static __init int hardware_setup(void) if (boot_cpu_has(X86_FEATURE_NX)) kvm_enable_efer_bits(EFER_NX); - if (!cpu_has_vmx_vpid()) + if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || + !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) enable_vpid = 0; + if (!cpu_has_vmx_shadow_vmcs()) enable_shadow_vmcs = 0; if (enable_shadow_vmcs) -- cgit v1.1 From aaa2e7ac80f679230faf28a8e12e8d68dbe977eb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 01:22:09 -0500 Subject: add asm-generic/extable.h ... and make the users of generic uaccess.h use that. Signed-off-by: Al Viro --- arch/arc/include/asm/Kbuild | 1 + arch/arc/include/asm/uaccess.h | 2 -- arch/c6x/include/asm/Kbuild | 1 + arch/h8300/include/asm/Kbuild | 1 + arch/hexagon/include/asm/Kbuild | 1 + arch/um/include/asm/Kbuild | 1 + arch/unicore32/include/asm/Kbuild | 1 + arch/unicore32/include/asm/uaccess.h | 2 -- 8 files changed, 6 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/Kbuild b/arch/arc/include/asm/Kbuild index 63a0401..7bee4e4 100644 --- a/arch/arc/include/asm/Kbuild +++ b/arch/arc/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 0431f56..d837a53 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -750,6 +750,4 @@ extern long arc_strnlen_user_noinline(const char __user *src, long n); #include -extern int fixup_exception(struct pt_regs *regs); - #endif diff --git a/arch/c6x/include/asm/Kbuild b/arch/c6x/include/asm/Kbuild index 82619c3..f0eaf047 100644 --- a/arch/c6x/include/asm/Kbuild +++ b/arch/c6x/include/asm/Kbuild @@ -12,6 +12,7 @@ generic-y += dma.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += futex.h diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index 341740c..f14e4e6 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += dma.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h diff --git a/arch/hexagon/include/asm/Kbuild b/arch/hexagon/include/asm/Kbuild index 797b64a..a2036bf 100644 --- a/arch/hexagon/include/asm/Kbuild +++ b/arch/hexagon/include/asm/Kbuild @@ -11,6 +11,7 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h diff --git a/arch/um/include/asm/Kbuild b/arch/um/include/asm/Kbuild index e9d42aa..50a32c3 100644 --- a/arch/um/include/asm/Kbuild +++ b/arch/um/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += delay.h generic-y += device.h generic-y += emergency-restart.h generic-y += exec.h +generic-y += extable.h generic-y += ftrace.h generic-y += futex.h generic-y += hardirq.h diff --git a/arch/unicore32/include/asm/Kbuild b/arch/unicore32/include/asm/Kbuild index 84205fe..e9ad511 100644 --- a/arch/unicore32/include/asm/Kbuild +++ b/arch/unicore32/include/asm/Kbuild @@ -10,6 +10,7 @@ generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index 1622f37..f60fab7 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -38,6 +38,4 @@ __strnlen_user(const char __user *s, long n); #include -extern int fixup_exception(struct pt_regs *regs); - #endif /* __UNICORE_UACCESS_H__ */ -- cgit v1.1 From 7ad658b693536741c37b16aeb07840a2ce75f5b9 Mon Sep 17 00:00:00 2001 From: Ladi Prosek Date: Thu, 23 Mar 2017 07:18:08 +0100 Subject: KVM: nVMX: fix nested EPT detection The nested_ept_enabled flag introduced in commit 7ca29de2136 was not computed correctly. We are interested only in L1's EPT state, not the the combined L0+L1 value. In particular, if L0 uses EPT but L1 does not, nested_ept_enabled must be false to make sure that PDPSTRs are loaded based on CR3 as usual, because the special case described in 26.3.2.4 Loading Page-Directory- Pointer-Table Entries does not apply. Fixes: 7ca29de21362 ("KVM: nVMX: fix CR3 load if L2 uses PAE paging and EPT") Cc: qemu-stable@nongnu.org Reported-by: Wanpeng Li Reviewed-by: David Hildenbrand Signed-off-by: Ladi Prosek Signed-off-by: Paolo Bonzini --- arch/x86/kvm/vmx.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cd1ba62..2ee00db 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -9992,7 +9992,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, { struct vcpu_vmx *vmx = to_vmx(vcpu); u32 exec_control; - bool nested_ept_enabled = false; vmcs_write16(GUEST_ES_SELECTOR, vmcs12->guest_es_selector); vmcs_write16(GUEST_CS_SELECTOR, vmcs12->guest_cs_selector); @@ -10139,8 +10138,6 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmcs12->guest_intr_status); } - nested_ept_enabled = (exec_control & SECONDARY_EXEC_ENABLE_EPT) != 0; - /* * Write an illegal value to APIC_ACCESS_ADDR. Later, * nested_get_vmcs12_pages will either fix it up or @@ -10303,7 +10300,7 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12, vmx_set_efer(vcpu, vcpu->arch.efer); /* Shadow page tables on either EPT or shadow page tables. */ - if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_ept_enabled, + if (nested_vmx_load_cr3(vcpu, vmcs12->guest_cr3, nested_cpu_has_ept(vmcs12), entry_failure_code)) return 1; -- cgit v1.1 From 2beb6dad2e8f95d710159d5befb390e4f62ab5cf Mon Sep 17 00:00:00 2001 From: Paolo Bonzini Date: Mon, 27 Mar 2017 17:53:50 +0200 Subject: KVM: x86: cleanup the page tracking SRCU instance SRCU uses a delayed work item. Skip cleaning it up, and the result is use-after-free in the work item callbacks. Reported-by: Dmitry Vyukov Suggested-by: Dmitry Vyukov Cc: stable@vger.kernel.org Fixes: 0eb05bf290cfe8610d9680b49abef37febd1c38a Reviewed-by: Xiao Guangrong Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm_page_track.h | 1 + arch/x86/kvm/page_track.c | 8 ++++++++ arch/x86/kvm/x86.c | 1 + 3 files changed, 10 insertions(+) (limited to 'arch') diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h index d74747b..c4eda79 100644 --- a/arch/x86/include/asm/kvm_page_track.h +++ b/arch/x86/include/asm/kvm_page_track.h @@ -46,6 +46,7 @@ struct kvm_page_track_notifier_node { }; void kvm_page_track_init(struct kvm *kvm); +void kvm_page_track_cleanup(struct kvm *kvm); void kvm_page_track_free_memslot(struct kvm_memory_slot *free, struct kvm_memory_slot *dont); diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c index 37942e4..60168cd 100644 --- a/arch/x86/kvm/page_track.c +++ b/arch/x86/kvm/page_track.c @@ -160,6 +160,14 @@ bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn, return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]); } +void kvm_page_track_cleanup(struct kvm *kvm) +{ + struct kvm_page_track_notifier_head *head; + + head = &kvm->arch.track_notifier_head; + cleanup_srcu_struct(&head->track_srcu); +} + void kvm_page_track_init(struct kvm *kvm) { struct kvm_page_track_notifier_head *head; diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 64697fe..ccbd45e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8158,6 +8158,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm) kvm_free_vcpus(kvm); kvfree(rcu_dereference_check(kvm->arch.apic_map, 1)); kvm_mmu_uninit_vm(kvm); + kvm_page_track_cleanup(kvm); } void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, -- cgit v1.1 From db68ce10c4f0a27c1ff9fa0e789e5c41f8c4ea63 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 21:08:07 -0400 Subject: new helper: uaccess_kernel() Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 2 +- arch/arc/include/asm/uaccess.h | 2 +- arch/arm/include/asm/uaccess.h | 2 +- arch/arm/lib/uaccess_with_memcpy.c | 4 ++-- arch/blackfin/kernel/process.c | 2 +- arch/c6x/kernel/sys_c6x.c | 2 +- arch/cris/include/asm/uaccess.h | 2 +- arch/m68k/include/asm/uaccess_mm.h | 2 +- arch/metag/include/asm/uaccess.h | 2 +- arch/mips/include/asm/checksum.h | 4 ++-- arch/mips/include/asm/r4kcache.h | 4 ++-- arch/mips/include/asm/uaccess.h | 2 +- arch/mips/kernel/unaligned.c | 10 +++++----- arch/openrisc/include/asm/uaccess.h | 2 +- arch/parisc/include/asm/futex.h | 2 +- arch/parisc/lib/memcpy.c | 2 +- arch/s390/include/asm/uaccess.h | 2 +- arch/sparc/include/asm/uaccess.h | 2 +- arch/sparc/include/asm/uaccess_32.h | 2 +- arch/um/include/asm/uaccess.h | 2 +- arch/um/kernel/skas/uaccess.c | 10 +++++----- arch/unicore32/include/asm/uaccess.h | 2 +- arch/unicore32/kernel/process.c | 2 +- arch/xtensa/include/asm/uaccess.h | 2 +- 24 files changed, 35 insertions(+), 35 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 77c55ce..b2f62b9 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -425,7 +425,7 @@ clear_user(void __user *to, long len) #undef __module_call #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index d837a53..ffd14e6 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -27,7 +27,7 @@ #include /* for generic string functions */ -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) /* * Algorithmically, for __user_ok() we want do: diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 9677a7c..b635273 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -266,7 +266,7 @@ static inline void set_fs(mm_segment_t fs) #define access_ok(type, addr, size) (__range_ok(addr, size) == 0) #define user_addr_max() \ - (segment_eq(get_fs(), KERNEL_DS) ? ~0UL : get_fs()) + (uaccess_kernel() ? ~0UL : get_fs()) /* * The "__xxx" versions of the user access functions do not verify the diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c index 6bd1089..9b4ed17 100644 --- a/arch/arm/lib/uaccess_with_memcpy.c +++ b/arch/arm/lib/uaccess_with_memcpy.c @@ -90,7 +90,7 @@ __copy_to_user_memcpy(void __user *to, const void *from, unsigned long n) unsigned long ua_flags; int atomic; - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + if (uaccess_kernel()) { memcpy((void *)to, from, n); return 0; } @@ -162,7 +162,7 @@ __clear_user_memset(void __user *addr, unsigned long n) { unsigned long ua_flags; - if (unlikely(segment_eq(get_fs(), KERNEL_DS))) { + if (uaccess_kernel()) { memset((void *)addr, 0, n); return 0; } diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c index 89d5162..8981485 100644 --- a/arch/blackfin/kernel/process.c +++ b/arch/blackfin/kernel/process.c @@ -370,7 +370,7 @@ int _access_ok(unsigned long addr, unsigned long size) /* Check that things do not wrap around */ if (addr > ULONG_MAX - size) return 0; - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return 1; #ifdef CONFIG_MTD_UCLINUX if (1) diff --git a/arch/c6x/kernel/sys_c6x.c b/arch/c6x/kernel/sys_c6x.c index 3e9bdfb..a742ae25 100644 --- a/arch/c6x/kernel/sys_c6x.c +++ b/arch/c6x/kernel/sys_c6x.c @@ -23,7 +23,7 @@ int _access_ok(unsigned long addr, unsigned long size) if (!addr || addr > (0xffffffffUL - (size - 1))) goto _bad_access; - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return 1; if (memory_start <= addr && (addr + size - 1) < memory_end) diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index c2462ef..5f5b8f5 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -43,7 +43,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ (((size) <= TASK_SIZE) && ((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index fb72b71..14054a4 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -375,7 +375,7 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) #define copy_to_user(to, from, n) __copy_to_user(to, from, n) #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) extern long strncpy_from_user(char *dst, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 7fc5277..83de755 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -24,7 +24,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) /* * Explicitly allow NULL pointers here. Parts of the kernel such * as readv/writev use access_ok to validate pointers, but want diff --git a/arch/mips/include/asm/checksum.h b/arch/mips/include/asm/checksum.h index c8b574f..77cad23 100644 --- a/arch/mips/include/asm/checksum.h +++ b/arch/mips/include/asm/checksum.h @@ -50,7 +50,7 @@ __wsum csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr) { might_fault(); - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) return __csum_partial_copy_kernel((__force void *)src, dst, len, sum, err_ptr); else @@ -82,7 +82,7 @@ __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, { might_fault(); if (access_ok(VERIFY_WRITE, dst, len)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) return __csum_partial_copy_kernel(src, (__force void *)dst, len, sum, err_ptr); diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h index 55fd94e..7f12d7e 100644 --- a/arch/mips/include/asm/r4kcache.h +++ b/arch/mips/include/asm/r4kcache.h @@ -20,7 +20,7 @@ #include #include #include -#include /* for segment_eq() */ +#include /* for uaccess_kernel() */ extern void (*r4k_blast_dcache)(void); extern void (*r4k_blast_icache)(void); @@ -714,7 +714,7 @@ static inline void protected_blast_##pfx##cache##_range(unsigned long start,\ \ __##pfx##flush_prologue \ \ - if (segment_eq(get_fs(), USER_DS)) { \ + if (!uaccess_kernel()) { \ while (1) { \ protected_cachee_op(hitop, addr); \ if (addr == aend) \ diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index dd25b31..70ca8ee 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -88,7 +88,7 @@ static inline bool eva_kernel_access(void) if (!IS_ENABLED(CONFIG_EVA)) return false; - return segment_eq(get_fs(), get_ds()); + return uaccess_kernel(); } /* diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 7ed9835..f806ee5 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c @@ -1026,7 +1026,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadHW(addr, value, res); else LoadHWE(addr, value, res); @@ -1045,7 +1045,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadW(addr, value, res); else LoadWE(addr, value, res); @@ -1064,7 +1064,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, goto sigbus; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) LoadHWU(addr, value, res); else LoadHWUE(addr, value, res); @@ -1132,7 +1132,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, value = regs->regs[insn.i_format.rt]; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) StoreHW(addr, value, res); else StoreHWE(addr, value, res); @@ -1152,7 +1152,7 @@ static void emulate_load_store_insn(struct pt_regs *regs, value = regs->regs[insn.i_format.rt]; if (IS_ENABLED(CONFIG_EVA)) { - if (segment_eq(get_fs(), get_ds())) + if (uaccess_kernel()) StoreW(addr, value, res); else StoreWE(addr, value, res); diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 0b0f604..227af1a 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -292,7 +292,7 @@ clear_user(void *addr, unsigned long size) } #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) extern long strncpy_from_user(char *dest, const char __user *src, long count); diff --git a/arch/parisc/include/asm/futex.h b/arch/parisc/include/asm/futex.h index ac8bd58..0ba1430 100644 --- a/arch/parisc/include/asm/futex.h +++ b/arch/parisc/include/asm/futex.h @@ -109,7 +109,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... */ - if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) + if (uaccess_kernel() && !uaddr) return -EFAULT; if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index f82ff10..66f5160 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -76,7 +76,7 @@ DECLARE_PER_CPU(struct exception_data, exception_data); goto label; \ } while (0) -#define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) +#define get_user_space() (uaccess_kernel() ? 0 : mfsp(3)) #define get_kernel_space() (0) #define MERGE(w0, sh_1, w1, sh_2) ({ \ diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 9e9a5e8..7228ed8 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -37,7 +37,7 @@ static inline void set_fs(mm_segment_t fs) { current->thread.mm_segment = fs; - if (segment_eq(fs, KERNEL_DS)) { + if (uaccess_kernel()) { set_cpu_flag(CIF_ASCE_SECONDARY); __ctl_load(S390_lowcore.kernel_asce, 7, 7); } else { diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h index bd56c28..9e068bf 100644 --- a/arch/sparc/include/asm/uaccess.h +++ b/arch/sparc/include/asm/uaccess.h @@ -7,7 +7,7 @@ #endif #define user_addr_max() \ - (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL) + (uaccess_kernel() ? ~0UL : TASK_SIZE) long strncpy_from_user(char *dest, const char __user *src, long count); diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 952d512..a59a1e8 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -36,7 +36,7 @@ * large size and address near to PAGE_OFFSET - a fault will break his intentions. */ #define __user_ok(addr, size) ({ (void)(size); (addr) < STACK_TOP; }) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __access_ok(addr, size) (__user_ok((addr) & get_fs().seg, (size))) #define access_ok(type, addr, size) \ ({ (void)(type); __access_ok((unsigned long)(addr), size); }) diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 076bdcb..e992bb5 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -45,7 +45,7 @@ static inline int __access_ok(unsigned long addr, unsigned long size) return __addr_range_nowrap(addr, size) && (__under_task_size(addr, size) || __access_ok_vsyscall(addr, size) || - segment_eq(get_fs(), KERNEL_DS)); + uaccess_kernel()); } #endif diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 85ac8ad..22c9f79 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c @@ -141,7 +141,7 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg) long __copy_from_user(void *to, const void __user *from, unsigned long n) { - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { memcpy(to, (__force void*)from, n); return 0; } @@ -161,7 +161,7 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg) long __copy_to_user(void __user *to, const void *from, unsigned long n) { - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { memcpy((__force void *) to, from, n); return 0; } @@ -189,7 +189,7 @@ long __strncpy_from_user(char *dst, const char __user *src, long count) long n; char *ptr = dst; - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { strncpy(dst, (__force void *) src, count); return strnlen(dst, count); } @@ -210,7 +210,7 @@ static int clear_chunk(unsigned long addr, int len, void *unused) unsigned long __clear_user(void __user *mem, unsigned long len) { - if (segment_eq(get_fs(), KERNEL_DS)) { + if (uaccess_kernel()) { memset((__force void*)mem, 0, len); return 0; } @@ -235,7 +235,7 @@ long __strnlen_user(const void __user *str, long len) { int count = 0, n; - if (segment_eq(get_fs(), KERNEL_DS)) + if (uaccess_kernel()) return strnlen((__force char*)str, len) + 1; n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index f60fab7..1196c88 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -20,7 +20,7 @@ #define __strnlen_user __strnlen_user #define __clear_user __clear_user -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) (((size) <= TASK_SIZE) \ && ((addr) <= TASK_SIZE - (size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) diff --git a/arch/unicore32/kernel/process.c b/arch/unicore32/kernel/process.c index d22c1dc..ddaf78a 100644 --- a/arch/unicore32/kernel/process.c +++ b/arch/unicore32/kernel/process.c @@ -178,7 +178,7 @@ void __show_regs(struct pt_regs *regs) buf, interrupts_enabled(regs) ? "n" : "ff", fast_interrupts_enabled(regs) ? "n" : "ff", processor_modes[processor_mode(regs)], - segment_eq(get_fs(), get_ds()) ? "kernel" : "user"); + uaccess_kernel() ? "kernel" : "user"); { unsigned int ctrl; diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index bd8861c..2651269 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -37,7 +37,7 @@ #define segment_eq(a, b) ((a).seg == (b).seg) -#define __kernel_ok (segment_eq(get_fs(), KERNEL_DS)) +#define __kernel_ok (uaccess_kernel()) #define __user_ok(addr, size) \ (((size) <= TASK_SIZE)&&((addr) <= TASK_SIZE-(size))) #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) -- cgit v1.1 From 3fb50075284433093eba7c43e75b16eaa36eeabd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 00:51:43 -0400 Subject: asm-generic/uaccess.h: don't mess with __copy_{to,from}_user only h8300 actually used those; might as well define them there. Signed-off-by: Al Viro --- arch/c6x/include/asm/uaccess.h | 3 --- arch/h8300/include/asm/Kbuild | 1 - arch/h8300/include/asm/uaccess.h | 52 ++++++++++++++++++++++++++++++++++++ arch/um/include/asm/uaccess.h | 2 -- arch/unicore32/include/asm/uaccess.h | 2 -- 5 files changed, 52 insertions(+), 8 deletions(-) create mode 100644 arch/h8300/include/asm/uaccess.h (limited to 'arch') diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h index 453dd26..79fb6f4 100644 --- a/arch/c6x/include/asm/uaccess.h +++ b/arch/c6x/include/asm/uaccess.h @@ -94,9 +94,6 @@ static inline __must_check long __copy_to_user(void __user *to, return 0; } -#define __copy_to_user __copy_to_user -#define __copy_from_user __copy_from_user - extern int _access_ok(unsigned long addr, unsigned long size); #ifdef CONFIG_ACCESS_CHECK #define __access_ok _access_ok diff --git a/arch/h8300/include/asm/Kbuild b/arch/h8300/include/asm/Kbuild index f14e4e6..757cdeb 100644 --- a/arch/h8300/include/asm/Kbuild +++ b/arch/h8300/include/asm/Kbuild @@ -69,7 +69,6 @@ generic-y += tlbflush.h generic-y += trace_clock.h generic-y += topology.h generic-y += types.h -generic-y += uaccess.h generic-y += ucontext.h generic-y += unaligned.h generic-y += vga.h diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h new file mode 100644 index 0000000..c156a41 --- /dev/null +++ b/arch/h8300/include/asm/uaccess.h @@ -0,0 +1,52 @@ +#ifndef _ASM_UACCESS_H +#define _ASM_UACCESS_H + +#include + +static inline __must_check long __copy_from_user(void *to, + const void __user * from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 *)to = *(u8 __force *)from; + return 0; + case 2: + *(u16 *)to = *(u16 __force *)from; + return 0; + case 4: + *(u32 *)to = *(u32 __force *)from; + return 0; + } + } + + memcpy(to, (const void __force *)from, n); + return 0; +} + +static inline __must_check long __copy_to_user(void __user *to, + const void *from, unsigned long n) +{ + if (__builtin_constant_p(n)) { + switch(n) { + case 1: + *(u8 __force *)to = *(u8 *)from; + return 0; + case 2: + *(u16 __force *)to = *(u16 *)from; + return 0; + case 4: + *(u32 __force *)to = *(u32 *)from; + return 0; + default: + break; + } + } + + memcpy((void __force *)to, from, n); + return 0; +} + +#include + +#endif diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index e992bb5..9396c8a 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -31,8 +31,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size); /* Teach asm-generic/uaccess.h that we have C functions for these. */ #define __access_ok __access_ok #define __clear_user __clear_user -#define __copy_to_user __copy_to_user -#define __copy_from_user __copy_from_user #define __strnlen_user __strnlen_user #define __strncpy_from_user __strncpy_from_user #define __copy_to_user_inatomic __copy_to_user diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index 1196c88..b2bcab6 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -14,8 +14,6 @@ #include -#define __copy_from_user __copy_from_user -#define __copy_to_user __copy_to_user #define __strncpy_from_user __strncpy_from_user #define __strnlen_user __strnlen_user #define __clear_user __clear_user -- cgit v1.1 From d597580d373774b1bdab84b3d26ff0b55162b916 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 21:56:06 -0400 Subject: generic ...copy_..._user primitives provide raw_copy_..._user() and select ARCH_HAS_RAW_COPY_USER to use those. Signed-off-by: Al Viro --- arch/Kconfig | 3 +++ 1 file changed, 3 insertions(+) (limited to 'arch') diff --git a/arch/Kconfig b/arch/Kconfig index cd211a1..315d376 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -847,4 +847,7 @@ config STRICT_MODULE_RWX config ARCH_WANT_RELAX_ORDER bool +config ARCH_HAS_RAW_COPY_USER + bool + source "kernel/gcov/Kconfig" -- cgit v1.1 From 8525023121de4848b5f0a7d867ffeadbc477774d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 24 Dec 2016 20:26:18 -0500 Subject: alpha: switch __copy_user() and __do_clean_user() to normal calling conventions They used to need odd calling conventions due to old exception handling mechanism, the last remnants of which had disappeared back in 2002. Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 67 +++++-------------------- arch/alpha/lib/clear_user.S | 66 ++++++++++--------------- arch/alpha/lib/copy_user.S | 82 +++++++++++++----------------- arch/alpha/lib/ev6-clear_user.S | 84 +++++++++++++------------------ arch/alpha/lib/ev6-copy_user.S | 104 +++++++++++++++++---------------------- 5 files changed, 151 insertions(+), 252 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index b2f62b9..0135199 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -334,45 +334,17 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ * Complex access routines */ -/* This little bit of silliness is to get the GP loaded for a function - that ordinarily wouldn't. Otherwise we could have it done by the macro - directly, which can be optimized the linker. */ -#ifdef MODULE -#define __module_address(sym) "r"(sym), -#define __module_call(ra, arg, sym) "jsr $" #ra ",(%" #arg ")," #sym -#else -#define __module_address(sym) -#define __module_call(ra, arg, sym) "bsr $" #ra "," #sym " !samegp" -#endif - -extern void __copy_user(void); - -extern inline long -__copy_tofrom_user_nocheck(void *to, const void *from, long len) -{ - register void * __cu_to __asm__("$6") = to; - register const void * __cu_from __asm__("$7") = from; - register long __cu_len __asm__("$0") = len; - - __asm__ __volatile__( - __module_call(28, 3, __copy_user) - : "=r" (__cu_len), "=r" (__cu_from), "=r" (__cu_to) - : __module_address(__copy_user) - "0" (__cu_len), "1" (__cu_from), "2" (__cu_to) - : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); - - return __cu_len; -} +extern long __copy_user(void *to, const void *from, long len); -#define __copy_to_user(to, from, n) \ -({ \ - __chk_user_ptr(to); \ - __copy_tofrom_user_nocheck((__force void *)(to), (from), (n)); \ +#define __copy_to_user(to, from, n) \ +({ \ + __chk_user_ptr(to); \ + __copy_user((__force void *)(to), (from), (n)); \ }) -#define __copy_from_user(to, from, n) \ -({ \ - __chk_user_ptr(from); \ - __copy_tofrom_user_nocheck((to), (__force void *)(from), (n)); \ +#define __copy_from_user(to, from, n) \ +({ \ + __chk_user_ptr(from); \ + __copy_user((to), (__force void *)(from), (n)); \ }) #define __copy_to_user_inatomic __copy_to_user @@ -382,7 +354,7 @@ extern inline long copy_to_user(void __user *to, const void *from, long n) { if (likely(__access_ok((unsigned long)to, n, get_fs()))) - n = __copy_tofrom_user_nocheck((__force void *)to, from, n); + n = __copy_user((__force void *)to, from, n); return n; } @@ -397,21 +369,7 @@ copy_from_user(void *to, const void __user *from, long n) return res; } -extern void __do_clear_user(void); - -extern inline long -__clear_user(void __user *to, long len) -{ - register void __user * __cl_to __asm__("$6") = to; - register long __cl_len __asm__("$0") = len; - __asm__ __volatile__( - __module_call(28, 2, __do_clear_user) - : "=r"(__cl_len), "=r"(__cl_to) - : __module_address(__do_clear_user) - "0"(__cl_len), "1"(__cl_to) - : "$1", "$2", "$3", "$4", "$5", "$28", "memory"); - return __cl_len; -} +extern long __clear_user(void __user *to, long len); extern inline long clear_user(void __user *to, long len) @@ -421,9 +379,6 @@ clear_user(void __user *to, long len) return len; } -#undef __module_address -#undef __module_call - #define user_addr_max() \ (uaccess_kernel() ? ~0UL : TASK_SIZE) diff --git a/arch/alpha/lib/clear_user.S b/arch/alpha/lib/clear_user.S index bf5b931..006f469 100644 --- a/arch/alpha/lib/clear_user.S +++ b/arch/alpha/lib/clear_user.S @@ -8,21 +8,6 @@ * right "bytes left to zero" value (and that it is updated only _after_ * a successful copy). There is also some rather minor exception setup * stuff. - * - * NOTE! This is not directly C-callable, because the calling semantics - * are different: - * - * Inputs: - * length in $0 - * destination address in $6 - * exception pointer in $7 - * return address in $28 (exceptions expect it there) - * - * Outputs: - * bytes left to copy in $0 - * - * Clobbers: - * $1,$2,$3,$4,$5,$6 */ #include @@ -38,62 +23,63 @@ .set noreorder .align 4 - .globl __do_clear_user - .ent __do_clear_user - .frame $30, 0, $28 + .globl __clear_user + .ent __clear_user + .frame $30, 0, $26 .prologue 0 $loop: and $1, 3, $4 # e0 : beq $4, 1f # .. e1 : -0: EX( stq_u $31, 0($6) ) # e0 : zero one word +0: EX( stq_u $31, 0($16) ) # e0 : zero one word subq $0, 8, $0 # .. e1 : subq $4, 1, $4 # e0 : - addq $6, 8, $6 # .. e1 : + addq $16, 8, $16 # .. e1 : bne $4, 0b # e1 : unop # : 1: bic $1, 3, $1 # e0 : beq $1, $tail # .. e1 : -2: EX( stq_u $31, 0($6) ) # e0 : zero four words +2: EX( stq_u $31, 0($16) ) # e0 : zero four words subq $0, 8, $0 # .. e1 : - EX( stq_u $31, 8($6) ) # e0 : + EX( stq_u $31, 8($16) ) # e0 : subq $0, 8, $0 # .. e1 : - EX( stq_u $31, 16($6) ) # e0 : + EX( stq_u $31, 16($16) ) # e0 : subq $0, 8, $0 # .. e1 : - EX( stq_u $31, 24($6) ) # e0 : + EX( stq_u $31, 24($16) ) # e0 : subq $0, 8, $0 # .. e1 : subq $1, 4, $1 # e0 : - addq $6, 32, $6 # .. e1 : + addq $16, 32, $16 # .. e1 : bne $1, 2b # e1 : $tail: bne $2, 1f # e1 : is there a tail to do? - ret $31, ($28), 1 # .. e1 : + ret $31, ($26), 1 # .. e1 : -1: EX( ldq_u $5, 0($6) ) # e0 : +1: EX( ldq_u $5, 0($16) ) # e0 : clr $0 # .. e1 : nop # e1 : mskqh $5, $0, $5 # e0 : - EX( stq_u $5, 0($6) ) # e0 : - ret $31, ($28), 1 # .. e1 : + EX( stq_u $5, 0($16) ) # e0 : + ret $31, ($26), 1 # .. e1 : -__do_clear_user: - and $6, 7, $4 # e0 : find dest misalignment +__clear_user: + and $17, $17, $0 + and $16, 7, $4 # e0 : find dest misalignment beq $0, $zerolength # .. e1 : addq $0, $4, $1 # e0 : bias counter and $1, 7, $2 # e1 : number of bytes in tail srl $1, 3, $1 # e0 : beq $4, $loop # .. e1 : - EX( ldq_u $5, 0($6) ) # e0 : load dst word to mask back in + EX( ldq_u $5, 0($16) ) # e0 : load dst word to mask back in beq $1, $oneword # .. e1 : sub-word store? - mskql $5, $6, $5 # e0 : take care of misaligned head - addq $6, 8, $6 # .. e1 : - EX( stq_u $5, -8($6) ) # e0 : + mskql $5, $16, $5 # e0 : take care of misaligned head + addq $16, 8, $16 # .. e1 : + EX( stq_u $5, -8($16) ) # e0 : addq $0, $4, $0 # .. e1 : bytes left -= 8 - misalignment subq $1, 1, $1 # e0 : subq $0, 8, $0 # .. e1 : @@ -101,15 +87,15 @@ __do_clear_user: unop # : $oneword: - mskql $5, $6, $4 # e0 : + mskql $5, $16, $4 # e0 : mskqh $5, $2, $5 # e0 : or $5, $4, $5 # e1 : - EX( stq_u $5, 0($6) ) # e0 : + EX( stq_u $5, 0($16) ) # e0 : clr $0 # .. e1 : $zerolength: $exception: - ret $31, ($28), 1 # .. e1 : + ret $31, ($26), 1 # .. e1 : - .end __do_clear_user - EXPORT_SYMBOL(__do_clear_user) + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/alpha/lib/copy_user.S b/arch/alpha/lib/copy_user.S index 509f62b..159f1b7 100644 --- a/arch/alpha/lib/copy_user.S +++ b/arch/alpha/lib/copy_user.S @@ -9,21 +9,6 @@ * contains the right "bytes left to copy" value (and that it is updated * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. - * - * NOTE! This is not directly C-callable, because the calling semantics are - * different: - * - * Inputs: - * length in $0 - * destination address in $6 - * source address in $7 - * return address in $28 - * - * Outputs: - * bytes left to copy in $0 - * - * Clobbers: - * $1,$2,$3,$4,$5,$6,$7 */ #include @@ -49,58 +34,59 @@ .ent __copy_user __copy_user: .prologue 0 - and $6,7,$3 + and $18,$18,$0 + and $16,7,$3 beq $0,$35 beq $3,$36 subq $3,8,$3 .align 4 $37: - EXI( ldq_u $1,0($7) ) - EXO( ldq_u $2,0($6) ) - extbl $1,$7,$1 - mskbl $2,$6,$2 - insbl $1,$6,$1 + EXI( ldq_u $1,0($17) ) + EXO( ldq_u $2,0($16) ) + extbl $1,$17,$1 + mskbl $2,$16,$2 + insbl $1,$16,$1 addq $3,1,$3 bis $1,$2,$1 - EXO( stq_u $1,0($6) ) + EXO( stq_u $1,0($16) ) subq $0,1,$0 - addq $6,1,$6 - addq $7,1,$7 + addq $16,1,$16 + addq $17,1,$17 beq $0,$41 bne $3,$37 $36: - and $7,7,$1 + and $17,7,$1 bic $0,7,$4 beq $1,$43 beq $4,$48 - EXI( ldq_u $3,0($7) ) + EXI( ldq_u $3,0($17) ) .align 4 $50: - EXI( ldq_u $2,8($7) ) + EXI( ldq_u $2,8($17) ) subq $4,8,$4 - extql $3,$7,$3 - extqh $2,$7,$1 + extql $3,$17,$3 + extqh $2,$17,$1 bis $3,$1,$1 - EXO( stq $1,0($6) ) - addq $7,8,$7 + EXO( stq $1,0($16) ) + addq $17,8,$17 subq $0,8,$0 - addq $6,8,$6 + addq $16,8,$16 bis $2,$2,$3 bne $4,$50 $48: beq $0,$41 .align 4 $57: - EXI( ldq_u $1,0($7) ) - EXO( ldq_u $2,0($6) ) - extbl $1,$7,$1 - mskbl $2,$6,$2 - insbl $1,$6,$1 + EXI( ldq_u $1,0($17) ) + EXO( ldq_u $2,0($16) ) + extbl $1,$17,$1 + mskbl $2,$16,$2 + insbl $1,$16,$1 bis $1,$2,$1 - EXO( stq_u $1,0($6) ) + EXO( stq_u $1,0($16) ) subq $0,1,$0 - addq $6,1,$6 - addq $7,1,$7 + addq $16,1,$16 + addq $17,1,$17 bne $0,$57 br $31,$41 .align 4 @@ -108,27 +94,27 @@ $43: beq $4,$65 .align 4 $66: - EXI( ldq $1,0($7) ) + EXI( ldq $1,0($17) ) subq $4,8,$4 - EXO( stq $1,0($6) ) - addq $7,8,$7 + EXO( stq $1,0($16) ) + addq $17,8,$17 subq $0,8,$0 - addq $6,8,$6 + addq $16,8,$16 bne $4,$66 $65: beq $0,$41 - EXI( ldq $2,0($7) ) - EXO( ldq $1,0($6) ) + EXI( ldq $2,0($17) ) + EXO( ldq $1,0($16) ) mskql $2,$0,$2 mskqh $1,$0,$1 bis $2,$1,$2 - EXO( stq $2,0($6) ) + EXO( stq $2,0($16) ) bis $31,$31,$0 $41: $35: $exitin: $exitout: - ret $31,($28),1 + ret $31,($26),1 .end __copy_user EXPORT_SYMBOL(__copy_user) diff --git a/arch/alpha/lib/ev6-clear_user.S b/arch/alpha/lib/ev6-clear_user.S index 05bef6b..e179e47 100644 --- a/arch/alpha/lib/ev6-clear_user.S +++ b/arch/alpha/lib/ev6-clear_user.S @@ -9,21 +9,6 @@ * a successful copy). There is also some rather minor exception setup * stuff. * - * NOTE! This is not directly C-callable, because the calling semantics - * are different: - * - * Inputs: - * length in $0 - * destination address in $6 - * exception pointer in $7 - * return address in $28 (exceptions expect it there) - * - * Outputs: - * bytes left to copy in $0 - * - * Clobbers: - * $1,$2,$3,$4,$5,$6 - * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here @@ -56,14 +41,15 @@ .set noreorder .align 4 - .globl __do_clear_user - .ent __do_clear_user - .frame $30, 0, $28 + .globl __clear_user + .ent __clear_user + .frame $30, 0, $26 .prologue 0 # Pipeline info : Slotting & Comments -__do_clear_user: - and $6, 7, $4 # .. E .. .. : find dest head misalignment +__clear_user: + and $17, $17, $0 + and $16, 7, $4 # .. E .. .. : find dest head misalignment beq $0, $zerolength # U .. .. .. : U L U L addq $0, $4, $1 # .. .. .. E : bias counter @@ -75,14 +61,14 @@ __do_clear_user: /* * Head is not aligned. Write (8 - $4) bytes to head of destination - * This means $6 is known to be misaligned + * This means $16 is known to be misaligned */ - EX( ldq_u $5, 0($6) ) # .. .. .. L : load dst word to mask back in + EX( ldq_u $5, 0($16) ) # .. .. .. L : load dst word to mask back in beq $1, $onebyte # .. .. U .. : sub-word store? - mskql $5, $6, $5 # .. U .. .. : take care of misaligned head - addq $6, 8, $6 # E .. .. .. : L U U L + mskql $5, $16, $5 # .. U .. .. : take care of misaligned head + addq $16, 8, $16 # E .. .. .. : L U U L - EX( stq_u $5, -8($6) ) # .. .. .. L : + EX( stq_u $5, -8($16) ) # .. .. .. L : subq $1, 1, $1 # .. .. E .. : addq $0, $4, $0 # .. E .. .. : bytes left -= 8 - misalignment subq $0, 8, $0 # E .. .. .. : U L U L @@ -93,11 +79,11 @@ __do_clear_user: * values upon initial entry to the loop * $1 is number of quadwords to clear (zero is a valid value) * $2 is number of trailing bytes (0..7) ($2 never used...) - * $6 is known to be aligned 0mod8 + * $16 is known to be aligned 0mod8 */ $headalign: subq $1, 16, $4 # .. .. .. E : If < 16, we can not use the huge loop - and $6, 0x3f, $2 # .. .. E .. : Forward work for huge loop + and $16, 0x3f, $2 # .. .. E .. : Forward work for huge loop subq $2, 0x40, $3 # .. E .. .. : bias counter (huge loop) blt $4, $trailquad # U .. .. .. : U L U L @@ -114,21 +100,21 @@ $headalign: beq $3, $bigalign # U .. .. .. : U L U L : Aligned 0mod64 $alignmod64: - EX( stq_u $31, 0($6) ) # .. .. .. L + EX( stq_u $31, 0($16) ) # .. .. .. L addq $3, 8, $3 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E subq $1, 1, $1 # .. .. E .. - addq $6, 8, $6 # .. E .. .. + addq $16, 8, $16 # .. E .. .. blt $3, $alignmod64 # U .. .. .. : U L U L $bigalign: /* * $0 is the number of bytes left * $1 is the number of quads left - * $6 is aligned 0mod64 + * $16 is aligned 0mod64 * we know that we'll be taking a minimum of one trip through * CWG Section 3.7.6: do not expect a sustained store rate of > 1/cycle * We are _not_ going to update $0 after every single store. That @@ -145,39 +131,39 @@ $bigalign: nop # E : nop # E : nop # E : - bis $6,$6,$3 # E : U L U L : Initial wh64 address is dest + bis $16,$16,$3 # E : U L U L : Initial wh64 address is dest /* This might actually help for the current trip... */ $do_wh64: wh64 ($3) # .. .. .. L1 : memory subsystem hint subq $1, 16, $4 # .. .. E .. : Forward calculation - repeat the loop? - EX( stq_u $31, 0($6) ) # .. L .. .. + EX( stq_u $31, 0($16) ) # .. L .. .. subq $0, 8, $0 # E .. .. .. : U L U L - addq $6, 128, $3 # E : Target address of wh64 - EX( stq_u $31, 8($6) ) # L : - EX( stq_u $31, 16($6) ) # L : + addq $16, 128, $3 # E : Target address of wh64 + EX( stq_u $31, 8($16) ) # L : + EX( stq_u $31, 16($16) ) # L : subq $0, 16, $0 # E : U L L U nop # E : - EX( stq_u $31, 24($6) ) # L : - EX( stq_u $31, 32($6) ) # L : + EX( stq_u $31, 24($16) ) # L : + EX( stq_u $31, 32($16) ) # L : subq $0, 168, $5 # E : U L L U : two trips through the loop left? /* 168 = 192 - 24, since we've already completed some stores */ subq $0, 16, $0 # E : - EX( stq_u $31, 40($6) ) # L : - EX( stq_u $31, 48($6) ) # L : - cmovlt $5, $6, $3 # E : U L L U : Latency 2, extra mapping cycle + EX( stq_u $31, 40($16) ) # L : + EX( stq_u $31, 48($16) ) # L : + cmovlt $5, $16, $3 # E : U L L U : Latency 2, extra mapping cycle subq $1, 8, $1 # E : subq $0, 16, $0 # E : - EX( stq_u $31, 56($6) ) # L : + EX( stq_u $31, 56($16) ) # L : nop # E : U L U L nop # E : subq $0, 8, $0 # E : - addq $6, 64, $6 # E : + addq $16, 64, $16 # E : bge $4, $do_wh64 # U : U L U L $trailquad: @@ -190,14 +176,14 @@ $trailquad: beq $1, $trailbytes # U .. .. .. : U L U L : Only 0..7 bytes to go $onequad: - EX( stq_u $31, 0($6) ) # .. .. .. L + EX( stq_u $31, 0($16) ) # .. .. .. L subq $1, 1, $1 # .. .. E .. subq $0, 8, $0 # .. E .. .. nop # E .. .. .. : U L U L nop # .. .. .. E nop # .. .. E .. - addq $6, 8, $6 # .. E .. .. + addq $16, 8, $16 # .. E .. .. bgt $1, $onequad # U .. .. .. : U L U L # We have an unknown number of bytes left to go. @@ -211,9 +197,9 @@ $trailbytes: # so we will use $0 as the loop counter # We know for a fact that $0 > 0 zero due to previous context $onebyte: - EX( stb $31, 0($6) ) # .. .. .. L + EX( stb $31, 0($16) ) # .. .. .. L subq $0, 1, $0 # .. .. E .. : - addq $6, 1, $6 # .. E .. .. : + addq $16, 1, $16 # .. E .. .. : bgt $0, $onebyte # U .. .. .. : U L U L $zerolength: @@ -221,6 +207,6 @@ $exception: # Destination for exception recovery(?) nop # .. .. .. E : nop # .. .. E .. : nop # .. E .. .. : - ret $31, ($28), 1 # L0 .. .. .. : L U L U - .end __do_clear_user - EXPORT_SYMBOL(__do_clear_user) + ret $31, ($26), 1 # L0 .. .. .. : L U L U + .end __clear_user + EXPORT_SYMBOL(__clear_user) diff --git a/arch/alpha/lib/ev6-copy_user.S b/arch/alpha/lib/ev6-copy_user.S index be720b5..35e6710 100644 --- a/arch/alpha/lib/ev6-copy_user.S +++ b/arch/alpha/lib/ev6-copy_user.S @@ -12,21 +12,6 @@ * only _after_ a successful copy). There is also some rather minor * exception setup stuff.. * - * NOTE! This is not directly C-callable, because the calling semantics are - * different: - * - * Inputs: - * length in $0 - * destination address in $6 - * source address in $7 - * return address in $28 - * - * Outputs: - * bytes left to copy in $0 - * - * Clobbers: - * $1,$2,$3,$4,$5,$6,$7 - * * Much of the information about 21264 scheduling/coding comes from: * Compiler Writer's Guide for the Alpha 21264 * abbreviated as 'CWG' in other comments here @@ -60,10 +45,11 @@ # Pipeline info: Slotting & Comments __copy_user: .prologue 0 - subq $0, 32, $1 # .. E .. .. : Is this going to be a small copy? + andq $18, $18, $0 + subq $18, 32, $1 # .. E .. .. : Is this going to be a small copy? beq $0, $zerolength # U .. .. .. : U L U L - and $6,7,$3 # .. .. .. E : is leading dest misalignment + and $16,7,$3 # .. .. .. E : is leading dest misalignment ble $1, $onebyteloop # .. .. U .. : 1st branch : small amount of data beq $3, $destaligned # .. U .. .. : 2nd (one cycle fetcher stall) subq $3, 8, $3 # E .. .. .. : L U U L : trip counter @@ -73,17 +59,17 @@ __copy_user: * We know we have at least one trip through this loop */ $aligndest: - EXI( ldbu $1,0($7) ) # .. .. .. L : Keep loads separate from stores - addq $6,1,$6 # .. .. E .. : Section 3.8 in the CWG + EXI( ldbu $1,0($17) ) # .. .. .. L : Keep loads separate from stores + addq $16,1,$16 # .. .. E .. : Section 3.8 in the CWG addq $3,1,$3 # .. E .. .. : nop # E .. .. .. : U L U L /* - * the -1 is to compensate for the inc($6) done in a previous quadpack + * the -1 is to compensate for the inc($16) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ - EXO( stb $1,-1($6) ) # .. .. .. L : - addq $7,1,$7 # .. .. E .. : Section 3.8 in the CWG + EXO( stb $1,-1($16) ) # .. .. .. L : + addq $17,1,$17 # .. .. E .. : Section 3.8 in the CWG subq $0,1,$0 # .. E .. .. : bne $3, $aligndest # U .. .. .. : U L U L @@ -92,29 +78,29 @@ $aligndest: * If we arrived via branch, we have a minimum of 32 bytes */ $destaligned: - and $7,7,$1 # .. .. .. E : Check _current_ source alignment + and $17,7,$1 # .. .. .. E : Check _current_ source alignment bic $0,7,$4 # .. .. E .. : number bytes as a quadword loop - EXI( ldq_u $3,0($7) ) # .. L .. .. : Forward fetch for fallthrough code + EXI( ldq_u $3,0($17) ) # .. L .. .. : Forward fetch for fallthrough code beq $1,$quadaligned # U .. .. .. : U L U L /* - * In the worst case, we've just executed an ldq_u here from 0($7) + * In the worst case, we've just executed an ldq_u here from 0($17) * and we'll repeat it once if we take the branch */ /* Misaligned quadword loop - not unrolled. Leave it that way. */ $misquad: - EXI( ldq_u $2,8($7) ) # .. .. .. L : + EXI( ldq_u $2,8($17) ) # .. .. .. L : subq $4,8,$4 # .. .. E .. : - extql $3,$7,$3 # .. U .. .. : - extqh $2,$7,$1 # U .. .. .. : U U L L + extql $3,$17,$3 # .. U .. .. : + extqh $2,$17,$1 # U .. .. .. : U U L L bis $3,$1,$1 # .. .. .. E : - EXO( stq $1,0($6) ) # .. .. L .. : - addq $7,8,$7 # .. E .. .. : + EXO( stq $1,0($16) ) # .. .. L .. : + addq $17,8,$17 # .. E .. .. : subq $0,8,$0 # E .. .. .. : U L L U - addq $6,8,$6 # .. .. .. E : + addq $16,8,$16 # .. .. .. E : bis $2,$2,$3 # .. .. E .. : nop # .. E .. .. : bne $4,$misquad # U .. .. .. : U L U L @@ -125,8 +111,8 @@ $misquad: beq $0,$zerolength # U .. .. .. : U L U L /* We know we have at least one trip through the byte loop */ - EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad - addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) + EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad + addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : br $31, $dirtyentry # L0 .. .. .. : L U U L /* Do the trailing byte loop load, then hop into the store part of the loop */ @@ -136,8 +122,8 @@ $misquad: * Based upon the usage context, it's worth the effort to unroll this loop * $0 - number of bytes to be moved * $4 - number of bytes to move as quadwords - * $6 is current destination address - * $7 is current source address + * $16 is current destination address + * $17 is current source address */ $quadaligned: subq $4, 32, $2 # .. .. .. E : do not unroll for small stuff @@ -155,29 +141,29 @@ $quadaligned: * instruction memory hint instruction). */ $unroll4: - EXI( ldq $1,0($7) ) # .. .. .. L - EXI( ldq $2,8($7) ) # .. .. L .. + EXI( ldq $1,0($17) ) # .. .. .. L + EXI( ldq $2,8($17) ) # .. .. L .. subq $4,32,$4 # .. E .. .. nop # E .. .. .. : U U L L - addq $7,16,$7 # .. .. .. E - EXO( stq $1,0($6) ) # .. .. L .. - EXO( stq $2,8($6) ) # .. L .. .. + addq $17,16,$17 # .. .. .. E + EXO( stq $1,0($16) ) # .. .. L .. + EXO( stq $2,8($16) ) # .. L .. .. subq $0,16,$0 # E .. .. .. : U L L U - addq $6,16,$6 # .. .. .. E - EXI( ldq $1,0($7) ) # .. .. L .. - EXI( ldq $2,8($7) ) # .. L .. .. + addq $16,16,$16 # .. .. .. E + EXI( ldq $1,0($17) ) # .. .. L .. + EXI( ldq $2,8($17) ) # .. L .. .. subq $4, 32, $3 # E .. .. .. : U U L L : is there enough for another trip? - EXO( stq $1,0($6) ) # .. .. .. L - EXO( stq $2,8($6) ) # .. .. L .. + EXO( stq $1,0($16) ) # .. .. .. L + EXO( stq $2,8($16) ) # .. .. L .. subq $0,16,$0 # .. E .. .. - addq $7,16,$7 # E .. .. .. : U L L U + addq $17,16,$17 # E .. .. .. : U L L U nop # .. .. .. E nop # .. .. E .. - addq $6,16,$6 # .. E .. .. + addq $16,16,$16 # .. E .. .. bgt $3,$unroll4 # U .. .. .. : U L U L nop @@ -186,14 +172,14 @@ $unroll4: beq $4, $noquads $onequad: - EXI( ldq $1,0($7) ) + EXI( ldq $1,0($17) ) subq $4,8,$4 - addq $7,8,$7 + addq $17,8,$17 nop - EXO( stq $1,0($6) ) + EXO( stq $1,0($16) ) subq $0,8,$0 - addq $6,8,$6 + addq $16,8,$16 bne $4,$onequad $noquads: @@ -207,23 +193,23 @@ $noquads: * There's no point in doing a lot of complex alignment calculations to try to * to quadword stuff for a small amount of data. * $0 - remaining number of bytes left to copy - * $6 - current dest addr - * $7 - current source addr + * $16 - current dest addr + * $17 - current source addr */ $onebyteloop: - EXI ( ldbu $2,0($7) ) # .. .. .. L : No loads in the same quad - addq $6,1,$6 # .. .. E .. : as the store (Section 3.8 in CWG) + EXI ( ldbu $2,0($17) ) # .. .. .. L : No loads in the same quad + addq $16,1,$16 # .. .. E .. : as the store (Section 3.8 in CWG) nop # .. E .. .. : nop # E .. .. .. : U L U L $dirtyentry: /* - * the -1 is to compensate for the inc($6) done in a previous quadpack + * the -1 is to compensate for the inc($16) done in a previous quadpack * which allows us zero dependencies within either quadpack in the loop */ - EXO ( stb $2,-1($6) ) # .. .. .. L : - addq $7,1,$7 # .. .. E .. : quadpack as the load + EXO ( stb $2,-1($16) ) # .. .. .. L : + addq $17,1,$17 # .. .. E .. : quadpack as the load subq $0,1,$0 # .. E .. .. : change count _after_ copy bgt $0,$onebyteloop # U .. .. .. : U L U L @@ -233,7 +219,7 @@ $exitout: # Destination for exception recovery(?) nop # .. .. .. E nop # .. .. E .. nop # .. E .. .. - ret $31,($28),1 # L0 .. .. .. : L U L U + ret $31,($26),1 # L0 .. .. .. : L U L U .end __copy_user EXPORT_SYMBOL(__copy_user) -- cgit v1.1 From b5478c1b67bcd52d3b7fb276090b985b4b38a7ea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 24 Dec 2016 21:18:40 -0500 Subject: alpha: add asm/extable.h Signed-off-by: Al Viro --- arch/alpha/include/asm/extable.h | 55 ++++++++++++++++++++++++++++++++++++++++ arch/alpha/include/asm/uaccess.h | 52 +------------------------------------ 2 files changed, 56 insertions(+), 51 deletions(-) create mode 100644 arch/alpha/include/asm/extable.h (limited to 'arch') diff --git a/arch/alpha/include/asm/extable.h b/arch/alpha/include/asm/extable.h new file mode 100644 index 0000000..048e209 --- /dev/null +++ b/arch/alpha/include/asm/extable.h @@ -0,0 +1,55 @@ +#ifndef _ASM_EXTABLE_H +#define _ASM_EXTABLE_H + +/* + * About the exception table: + * + * - insn is a 32-bit pc-relative offset from the faulting insn. + * - nextinsn is a 16-bit offset off of the faulting instruction + * (not off of the *next* instruction as branches are). + * - errreg is the register in which to place -EFAULT. + * - valreg is the final target register for the load sequence + * and will be zeroed. + * + * Either errreg or valreg may be $31, in which case nothing happens. + * + * The exception fixup information "just so happens" to be arranged + * as in a MEM format instruction. This lets us emit our three + * values like so: + * + * lda valreg, nextinsn(errreg) + * + */ + +struct exception_table_entry +{ + signed int insn; + union exception_fixup { + unsigned unit; + struct { + signed int nextinsn : 16; + unsigned int errreg : 5; + unsigned int valreg : 5; + } bits; + } fixup; +}; + +/* Returns the new pc */ +#define fixup_exception(map_reg, _fixup, pc) \ +({ \ + if ((_fixup)->fixup.bits.valreg != 31) \ + map_reg((_fixup)->fixup.bits.valreg) = 0; \ + if ((_fixup)->fixup.bits.errreg != 31) \ + map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ + (pc) + (_fixup)->fixup.bits.nextinsn; \ +}) + +#define ARCH_HAS_RELATIVE_EXTABLE + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ + do { \ + (a)->fixup.unit = (b)->fixup.unit; \ + (b)->fixup.unit = (tmp).fixup.unit; \ + } while (0) + +#endif diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 0135199..b7270a6 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -386,56 +386,6 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count); extern __must_check long strlen_user(const char __user *str); extern __must_check long strnlen_user(const char __user *str, long n); -/* - * About the exception table: - * - * - insn is a 32-bit pc-relative offset from the faulting insn. - * - nextinsn is a 16-bit offset off of the faulting instruction - * (not off of the *next* instruction as branches are). - * - errreg is the register in which to place -EFAULT. - * - valreg is the final target register for the load sequence - * and will be zeroed. - * - * Either errreg or valreg may be $31, in which case nothing happens. - * - * The exception fixup information "just so happens" to be arranged - * as in a MEM format instruction. This lets us emit our three - * values like so: - * - * lda valreg, nextinsn(errreg) - * - */ - -struct exception_table_entry -{ - signed int insn; - union exception_fixup { - unsigned unit; - struct { - signed int nextinsn : 16; - unsigned int errreg : 5; - unsigned int valreg : 5; - } bits; - } fixup; -}; - -/* Returns the new pc */ -#define fixup_exception(map_reg, _fixup, pc) \ -({ \ - if ((_fixup)->fixup.bits.valreg != 31) \ - map_reg((_fixup)->fixup.bits.valreg) = 0; \ - if ((_fixup)->fixup.bits.errreg != 31) \ - map_reg((_fixup)->fixup.bits.errreg) = -EFAULT; \ - (pc) + (_fixup)->fixup.bits.nextinsn; \ -}) - -#define ARCH_HAS_RELATIVE_EXTABLE - -#define swap_ex_entry_fixup(a, b, tmp, delta) \ - do { \ - (a)->fixup.unit = (b)->fixup.unit; \ - (b)->fixup.unit = (tmp).fixup.unit; \ - } while (0) - +#include #endif /* __ALPHA_UACCESS_H */ -- cgit v1.1 From d78d834bed81bf838281270c7ae3790c88fc95aa Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 02:04:09 -0500 Subject: alpha: get rid of 'segment' argument of __{get,put}_user_check() always equal to get_fs() Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index b7270a6..30fa884 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -54,9 +54,9 @@ * (b) require any knowledge of processes at this stage */ #define put_user(x, ptr) \ - __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)), get_fs()) + __put_user_check((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr))) #define get_user(x, ptr) \ - __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) + __get_user_check((x), (ptr), sizeof(*(ptr))) /* * The "__xxx" versions do not do address space checking, useful when @@ -93,12 +93,12 @@ extern void __get_user_unknown(void); __gu_err; \ }) -#define __get_user_check(x, ptr, size, segment) \ +#define __get_user_check(x, ptr, size) \ ({ \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr, size, segment)) { \ + if (__access_ok((unsigned long)__gu_addr, size, get_fs())) { \ __gu_err = 0; \ switch (size) { \ case 1: __get_user_8(__gu_addr); break; \ @@ -208,11 +208,11 @@ extern void __put_user_unknown(void); __pu_err; \ }) -#define __put_user_check(x, ptr, size, segment) \ +#define __put_user_check(x, ptr, size) \ ({ \ long __pu_err = -EFAULT; \ __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr, size, segment)) { \ + if (__access_ok((unsigned long)__pu_addr, size, get_fs())) { \ __pu_err = 0; \ switch (size) { \ case 1: __put_user_8(x, __pu_addr); break; \ -- cgit v1.1 From f14d6b4f52616465fead8c4b6508a6687032660d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 02:23:46 -0500 Subject: alpha: don't bother with __access_ok() in traps.c we want to check that address is below TASK_SIZE; sure, __access_ok(addr, 0, USER_DS) will do that, but it's more straightforward to just spell it out and that way we can get rid of the damn 'segment' argument of __access_ok(). Signed-off-by: Al Viro --- arch/alpha/kernel/traps.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'arch') diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index b137390..b724ad8 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -802,7 +802,7 @@ do_entUnaUser(void __user * va, unsigned long opcode, /* Don't bother reading ds in the access check since we already know that this came from the user. Also rely on the fact that the page at TASK_SIZE is unmapped and so can't be touched anyway. */ - if (!__access_ok((unsigned long)va, 0, USER_DS)) + if ((unsigned long)va >= TASK_SIZE) goto give_sigsegv; ++unaligned[1].count; @@ -1047,7 +1047,7 @@ give_sigsegv: /* We need to replicate some of the logic in mm/fault.c, since we don't have access to the fault code in the exception handling return path. */ - if (!__access_ok((unsigned long)va, 0, USER_DS)) + if ((unsigned long)va >= TASK_SIZE) info.si_code = SEGV_ACCERR; else { struct mm_struct *mm = current->mm; -- cgit v1.1 From c9df6025a6ecc5b4d243d1469a4697e03c371a88 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 02:26:32 -0500 Subject: alpha: kill the 'segment' argument of __access_ok() always equal to get_fs(). Signed-off-by: Al Viro --- arch/alpha/include/asm/uaccess.h | 82 ++++++++++++++++++++-------------------- 1 file changed, 41 insertions(+), 41 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 30fa884..54c1d19 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -32,13 +32,13 @@ * - AND "addr+size" doesn't have any high-bits set * - OR we are in kernel mode. */ -#define __access_ok(addr, size, segment) \ - (((segment).seg & (addr | size | (addr+size))) == 0) +#define __access_ok(addr, size) \ + ((get_fs().seg & (addr | size | (addr+size))) == 0) -#define access_ok(type, addr, size) \ -({ \ - __chk_user_ptr(addr); \ - __access_ok(((unsigned long)(addr)), (size), get_fs()); \ +#define access_ok(type, addr, size) \ +({ \ + __chk_user_ptr(addr); \ + __access_ok(((unsigned long)(addr)), (size)); \ }) /* @@ -93,23 +93,23 @@ extern void __get_user_unknown(void); __gu_err; \ }) -#define __get_user_check(x, ptr, size) \ -({ \ - long __gu_err = -EFAULT; \ - unsigned long __gu_val = 0; \ - const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ - if (__access_ok((unsigned long)__gu_addr, size, get_fs())) { \ - __gu_err = 0; \ - switch (size) { \ - case 1: __get_user_8(__gu_addr); break; \ - case 2: __get_user_16(__gu_addr); break; \ - case 4: __get_user_32(__gu_addr); break; \ - case 8: __get_user_64(__gu_addr); break; \ - default: __get_user_unknown(); break; \ - } \ - } \ - (x) = (__force __typeof__(*(ptr))) __gu_val; \ - __gu_err; \ +#define __get_user_check(x, ptr, size) \ +({ \ + long __gu_err = -EFAULT; \ + unsigned long __gu_val = 0; \ + const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \ + if (__access_ok((unsigned long)__gu_addr, size)) { \ + __gu_err = 0; \ + switch (size) { \ + case 1: __get_user_8(__gu_addr); break; \ + case 2: __get_user_16(__gu_addr); break; \ + case 4: __get_user_32(__gu_addr); break; \ + case 8: __get_user_64(__gu_addr); break; \ + default: __get_user_unknown(); break; \ + } \ + } \ + (x) = (__force __typeof__(*(ptr))) __gu_val; \ + __gu_err; \ }) struct __large_struct { unsigned long buf[100]; }; @@ -208,21 +208,21 @@ extern void __put_user_unknown(void); __pu_err; \ }) -#define __put_user_check(x, ptr, size) \ -({ \ - long __pu_err = -EFAULT; \ - __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ - if (__access_ok((unsigned long)__pu_addr, size, get_fs())) { \ - __pu_err = 0; \ - switch (size) { \ - case 1: __put_user_8(x, __pu_addr); break; \ - case 2: __put_user_16(x, __pu_addr); break; \ - case 4: __put_user_32(x, __pu_addr); break; \ - case 8: __put_user_64(x, __pu_addr); break; \ - default: __put_user_unknown(); break; \ - } \ - } \ - __pu_err; \ +#define __put_user_check(x, ptr, size) \ +({ \ + long __pu_err = -EFAULT; \ + __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ + if (__access_ok((unsigned long)__pu_addr, size)) { \ + __pu_err = 0; \ + switch (size) { \ + case 1: __put_user_8(x, __pu_addr); break; \ + case 2: __put_user_16(x, __pu_addr); break; \ + case 4: __put_user_32(x, __pu_addr); break; \ + case 8: __put_user_64(x, __pu_addr); break; \ + default: __put_user_unknown(); break; \ + } \ + } \ + __pu_err; \ }) /* @@ -353,7 +353,7 @@ extern long __copy_user(void *to, const void *from, long len); extern inline long copy_to_user(void __user *to, const void *from, long n) { - if (likely(__access_ok((unsigned long)to, n, get_fs()))) + if (likely(__access_ok((unsigned long)to, n))) n = __copy_user((__force void *)to, from, n); return n; } @@ -362,7 +362,7 @@ extern inline long copy_from_user(void *to, const void __user *from, long n) { long res = n; - if (likely(__access_ok((unsigned long)from, n, get_fs()))) + if (likely(__access_ok((unsigned long)from, n))) res = __copy_from_user_inatomic(to, from, n); if (unlikely(res)) memset(to + (n - res), 0, res); @@ -374,7 +374,7 @@ extern long __clear_user(void __user *to, long len); extern inline long clear_user(void __user *to, long len) { - if (__access_ok((unsigned long)to, len, get_fs())) + if (__access_ok((unsigned long)to, len)) len = __clear_user(to, len); return len; } -- cgit v1.1 From ca282f69738161ca1ac2e13b2c1b283be82ffb4b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 7 Mar 2017 04:08:46 -0500 Subject: alpha: add a helper for emitting exception table entries Signed-off-by: Al Viro --- arch/alpha/include/asm/futex.h | 16 +--- arch/alpha/include/asm/uaccess.h | 80 ++++++-------------- arch/alpha/kernel/traps.c | 148 ++++++++++--------------------------- arch/alpha/lib/csum_partial_copy.c | 10 +-- 4 files changed, 68 insertions(+), 186 deletions(-) (limited to 'arch') diff --git a/arch/alpha/include/asm/futex.h b/arch/alpha/include/asm/futex.h index f939794..fb01dfb 100644 --- a/arch/alpha/include/asm/futex.h +++ b/arch/alpha/include/asm/futex.h @@ -19,12 +19,8 @@ "3: .subsection 2\n" \ "4: br 1b\n" \ " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .long 1b-.\n" \ - " lda $31,3b-1b(%1)\n" \ - " .long 2b-.\n" \ - " lda $31,3b-2b(%1)\n" \ - " .previous\n" \ + EXC(1b,3b,%1,$31) \ + EXC(2b,3b,%1,$31) \ : "=&r" (oldval), "=&r"(ret) \ : "r" (uaddr), "r"(oparg) \ : "memory") @@ -101,12 +97,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, "3: .subsection 2\n" "4: br 1b\n" " .previous\n" - " .section __ex_table,\"a\"\n" - " .long 1b-.\n" - " lda $31,3b-1b(%0)\n" - " .long 2b-.\n" - " lda $31,3b-2b(%0)\n" - " .previous\n" + EXC(1b,3b,%0,$31) + EXC(2b,3b,%0,$31) : "+r"(ret), "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)(int)oldval), "r"(newval) : "memory"); diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 54c1d19..9557c12 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -74,6 +74,11 @@ * more extensive comments with fixup_inline_exception below for * more information. */ +#define EXC(label,cont,res,err) \ + ".section __ex_table,\"a\"\n" \ + " .long "#label"-.\n" \ + " lda "#res","#cont"-"#label"("#err")\n" \ + ".previous\n" extern void __get_user_unknown(void); @@ -118,20 +123,14 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_64(addr) \ __asm__("1: ldq %0,%2\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ + EXC(1b,2b,%0,%1) \ : "=r"(__gu_val), "=r"(__gu_err) \ : "m"(__m(addr)), "1"(__gu_err)) #define __get_user_32(addr) \ __asm__("1: ldl %0,%2\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ + EXC(1b,2b,%0,%1) \ : "=r"(__gu_val), "=r"(__gu_err) \ : "m"(__m(addr)), "1"(__gu_err)) @@ -141,20 +140,14 @@ struct __large_struct { unsigned long buf[100]; }; #define __get_user_16(addr) \ __asm__("1: ldwu %0,%2\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ + EXC(1b,2b,%0,%1) \ : "=r"(__gu_val), "=r"(__gu_err) \ : "m"(__m(addr)), "1"(__gu_err)) #define __get_user_8(addr) \ __asm__("1: ldbu %0,%2\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ + EXC(1b,2b,%0,%1) \ : "=r"(__gu_val), "=r"(__gu_err) \ : "m"(__m(addr)), "1"(__gu_err)) #else @@ -170,12 +163,8 @@ struct __large_struct { unsigned long buf[100]; }; " extwh %1,%3,%1\n" \ " or %0,%1,%0\n" \ "3:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 3b-1b(%2)\n" \ - " .long 2b - .\n" \ - " lda %0, 3b-2b(%2)\n" \ - ".previous" \ + EXC(1b,3b,%0,%2) \ + EXC(2b,3b,%0,%2) \ : "=&r"(__gu_val), "=&r"(__gu_tmp), "=r"(__gu_err) \ : "r"(addr), "2"(__gu_err)); \ } @@ -184,10 +173,7 @@ struct __large_struct { unsigned long buf[100]; }; __asm__("1: ldq_u %0,0(%2)\n" \ " extbl %0,%2,%0\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0, 2b-1b(%1)\n" \ - ".previous" \ + EXC(1b,2b,%0,%1) \ : "=&r"(__gu_val), "=r"(__gu_err) \ : "r"(addr), "1"(__gu_err)) #endif @@ -233,20 +219,14 @@ extern void __put_user_unknown(void); #define __put_user_64(x, addr) \ __asm__ __volatile__("1: stq %r2,%1\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ + EXC(1b,2b,$31,%0) \ : "=r"(__pu_err) \ : "m" (__m(addr)), "rJ" (x), "0"(__pu_err)) #define __put_user_32(x, addr) \ __asm__ __volatile__("1: stl %r2,%1\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ + EXC(1b,2b,$31,%0) \ : "=r"(__pu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) @@ -256,20 +236,14 @@ __asm__ __volatile__("1: stl %r2,%1\n" \ #define __put_user_16(x, addr) \ __asm__ __volatile__("1: stw %r2,%1\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ + EXC(1b,2b,$31,%0) \ : "=r"(__pu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) #define __put_user_8(x, addr) \ __asm__ __volatile__("1: stb %r2,%1\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ + EXC(1b,2b,$31,%0) \ : "=r"(__pu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(__pu_err)) #else @@ -291,16 +265,10 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ "3: stq_u %2,1(%5)\n" \ "4: stq_u %1,0(%5)\n" \ "5:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31, 5b-1b(%0)\n" \ - " .long 2b - .\n" \ - " lda $31, 5b-2b(%0)\n" \ - " .long 3b - .\n" \ - " lda $31, 5b-3b(%0)\n" \ - " .long 4b - .\n" \ - " lda $31, 5b-4b(%0)\n" \ - ".previous" \ + EXC(1b,5b,$31,%0) \ + EXC(2b,5b,$31,%0) \ + EXC(3b,5b,$31,%0) \ + EXC(4b,5b,$31,%0) \ : "=r"(__pu_err), "=&r"(__pu_tmp1), \ "=&r"(__pu_tmp2), "=&r"(__pu_tmp3), \ "=&r"(__pu_tmp4) \ @@ -317,12 +285,8 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ " or %1,%2,%1\n" \ "2: stq_u %1,0(%4)\n" \ "3:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda $31, 3b-1b(%0)\n" \ - " .long 2b - .\n" \ - " lda $31, 3b-2b(%0)\n" \ - ".previous" \ + EXC(1b,3b,$31,%0) \ + EXC(2b,3b,$31,%0) \ : "=r"(__pu_err), \ "=&r"(__pu_tmp1), "=&r"(__pu_tmp2) \ : "r"((unsigned long)(x)), "r"(addr), "0"(__pu_err)); \ diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c index b724ad8..65bb102 100644 --- a/arch/alpha/kernel/traps.c +++ b/arch/alpha/kernel/traps.c @@ -482,12 +482,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -502,12 +498,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -522,12 +514,8 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -551,16 +539,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %2,5b-1b(%0)\n" - " .long 2b - .\n" - " lda %1,5b-2b(%0)\n" - " .long 3b - .\n" - " lda $31,5b-3b(%0)\n" - " .long 4b - .\n" - " lda $31,5b-4b(%0)\n" - ".previous" + EXC(1b,5b,%2,%0) + EXC(2b,5b,%1,%0) + EXC(3b,5b,$31,%0) + EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); @@ -581,16 +563,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %2,5b-1b(%0)\n" - " .long 2b - .\n" - " lda %1,5b-2b(%0)\n" - " .long 3b - .\n" - " lda $31,5b-3b(%0)\n" - " .long 4b - .\n" - " lda $31,5b-4b(%0)\n" - ".previous" + EXC(1b,5b,%2,%0) + EXC(2b,5b,%1,%0) + EXC(3b,5b,$31,%0) + EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); @@ -611,16 +587,10 @@ do_entUna(void * va, unsigned long opcode, unsigned long reg, "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" - ".section __ex_table,\"a\"\n\t" - " .long 1b - .\n" - " lda %2,5b-1b(%0)\n" - " .long 2b - .\n" - " lda %1,5b-2b(%0)\n" - " .long 3b - .\n" - " lda $31,5b-3b(%0)\n" - " .long 4b - .\n" - " lda $31,5b-4b(%0)\n" - ".previous" + EXC(1b,5b,%2,%0) + EXC(2b,5b,%1,%0) + EXC(3b,5b,$31,%0) + EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); @@ -835,12 +805,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -855,12 +821,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -875,12 +837,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -895,12 +853,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -915,12 +869,8 @@ do_entUnaUser(void __user * va, unsigned long opcode, " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %1,3b-1b(%0)\n" - " .long 2b - .\n" - " lda %2,3b-2b(%0)\n" - ".previous" + EXC(1b,3b,%1,%0) + EXC(2b,3b,%2,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) @@ -944,16 +894,10 @@ do_entUnaUser(void __user * va, unsigned long opcode, "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %2,5b-1b(%0)\n" - " .long 2b - .\n" - " lda %1,5b-2b(%0)\n" - " .long 3b - .\n" - " lda $31,5b-3b(%0)\n" - " .long 4b - .\n" - " lda $31,5b-4b(%0)\n" - ".previous" + EXC(1b,5b,%2,%0) + EXC(2b,5b,%1,%0) + EXC(3b,5b,$31,%0) + EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); @@ -978,16 +922,10 @@ do_entUnaUser(void __user * va, unsigned long opcode, "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" - ".section __ex_table,\"a\"\n" - " .long 1b - .\n" - " lda %2,5b-1b(%0)\n" - " .long 2b - .\n" - " lda %1,5b-2b(%0)\n" - " .long 3b - .\n" - " lda $31,5b-3b(%0)\n" - " .long 4b - .\n" - " lda $31,5b-4b(%0)\n" - ".previous" + EXC(1b,5b,%2,%0) + EXC(2b,5b,%1,%0) + EXC(3b,5b,$31,%0) + EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); @@ -1012,16 +950,10 @@ do_entUnaUser(void __user * va, unsigned long opcode, "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" - ".section __ex_table,\"a\"\n\t" - " .long 1b - .\n" - " lda %2,5b-1b(%0)\n" - " .long 2b - .\n" - " lda %1,5b-2b(%0)\n" - " .long 3b - .\n" - " lda $31,5b-3b(%0)\n" - " .long 4b - .\n" - " lda $31,5b-4b(%0)\n" - ".previous" + EXC(1b,5b,%2,%0) + EXC(2b,5b,%1,%0) + EXC(3b,5b,$31,%0) + EXC(4b,5b,$31,%0) : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); diff --git a/arch/alpha/lib/csum_partial_copy.c b/arch/alpha/lib/csum_partial_copy.c index 5dfb797..ab42afb 100644 --- a/arch/alpha/lib/csum_partial_copy.c +++ b/arch/alpha/lib/csum_partial_copy.c @@ -45,10 +45,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) __asm__ __volatile__( \ "1: ldq_u %0,%2\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - .\n" \ - " lda %0,2b-1b(%1)\n" \ - ".previous" \ + EXC(1b,2b,%0,%1) \ : "=r"(x), "=r"(__guu_err) \ : "m"(__m(ptr)), "1"(0)); \ __guu_err; \ @@ -60,10 +57,7 @@ __asm__ __volatile__("insqh %1,%2,%0":"=r" (z):"r" (x),"r" (y)) __asm__ __volatile__( \ "1: stq_u %2,%1\n" \ "2:\n" \ - ".section __ex_table,\"a\"\n" \ - " .long 1b - ." \ - " lda $31,2b-1b(%0)\n" \ - ".previous" \ + EXC(1b,2b,$31,%0) \ : "=r"(__puu_err) \ : "m"(__m(addr)), "rJ"(x), "0"(0)); \ __puu_err; \ -- cgit v1.1 From ec022681a4201006a5bcb1a01401591a326b659c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 08:13:02 -0400 Subject: alpha: switch to RAW_COPY_USER copy_{to,from}_user() uninlined. We can go back to inlined variants, if we want to... Signed-off-by: Al Viro --- arch/alpha/Kconfig | 1 + arch/alpha/include/asm/uaccess.h | 33 ++++++--------------------------- 2 files changed, 7 insertions(+), 27 deletions(-) (limited to 'arch') diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index 0e49d39..1be5f61 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig @@ -26,6 +26,7 @@ config ALPHA select ODD_RT_SIGACTION select OLD_SIGSUSPEND select CPU_NO_EFFICIENT_FFS if !ALPHA_EV67 + select ARCH_HAS_RAW_COPY_USER help The Alpha is a 64-bit general-purpose processor designed and marketed by the Digital Equipment Corporation of blessed memory, diff --git a/arch/alpha/include/asm/uaccess.h b/arch/alpha/include/asm/uaccess.h index 9557c12..7b82dc9 100644 --- a/arch/alpha/include/asm/uaccess.h +++ b/arch/alpha/include/asm/uaccess.h @@ -300,37 +300,16 @@ __asm__ __volatile__("1: stb %r2,%1\n" \ extern long __copy_user(void *to, const void *from, long len); -#define __copy_to_user(to, from, n) \ -({ \ - __chk_user_ptr(to); \ - __copy_user((__force void *)(to), (from), (n)); \ -}) -#define __copy_from_user(to, from, n) \ -({ \ - __chk_user_ptr(from); \ - __copy_user((to), (__force void *)(from), (n)); \ -}) - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -extern inline long -copy_to_user(void __user *to, const void *from, long n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long len) { - if (likely(__access_ok((unsigned long)to, n))) - n = __copy_user((__force void *)to, from, n); - return n; + return __copy_user(to, (__force const void *)from, len); } -extern inline long -copy_from_user(void *to, const void __user *from, long n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long len) { - long res = n; - if (likely(__access_ok((unsigned long)from, n))) - res = __copy_from_user_inatomic(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; + return __copy_user((__force void *)to, from, len); } extern long __clear_user(void __user *to, long len); -- cgit v1.1 From 3a1e37ae007e9d629617a0d5bc07661078defa32 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Mar 2017 01:01:53 -0400 Subject: arc: get rid of unused declaration Signed-off-by: Al Viro --- arch/arc/include/asm/uaccess.h | 3 --- 1 file changed, 3 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index ffd14e6..657401f 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -394,9 +394,6 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n) return res; } -extern unsigned long slowpath_copy_to_user(void __user *to, const void *from, - unsigned long n); - static inline unsigned long __arc_copy_to_user(void __user *to, const void *from, unsigned long n) { -- cgit v1.1 From 0f9b38cd79d528c1e19693899d989521778cc245 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: arm: switch to generic extable.h Signed-off-by: Al Viro --- arch/arm/include/asm/Kbuild | 1 + arch/arm/include/asm/uaccess.h | 20 +------------------- 2 files changed, 2 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/arm/include/asm/Kbuild b/arch/arm/include/asm/Kbuild index b14e8c7..3a36d99 100644 --- a/arch/arm/include/asm/Kbuild +++ b/arch/arm/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += early_ioremap.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += ioctl.h generic-y += ipcbuf.h generic-y += irq_regs.h diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index b635273..3a9190b 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -24,25 +24,7 @@ #define __put_user_unaligned __put_user #endif -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); +#include /* * These two functions allow hooking accesses to userspace to increase -- cgit v1.1 From 4de5b63e76b2e672478e49622dabe2666b7f727f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 08:23:33 -0400 Subject: arm: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/arm/Kconfig | 1 + arch/arm/include/asm/uaccess.h | 60 ++++++++++-------------------------------- 2 files changed, 15 insertions(+), 46 deletions(-) (limited to 'arch') diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 0d4e71b..6fab7f3 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -96,6 +96,7 @@ config ARM select PERF_USE_VMALLOC select RTC_LIB select SYS_SUPPORTS_APM_EMULATION + select ARCH_HAS_RAW_COPY_USER # Above selects are sorted alphabetically; please add new ones # according to that. Thanks. help diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h index 3a9190b..2577405 100644 --- a/arch/arm/include/asm/uaccess.h +++ b/arch/arm/include/asm/uaccess.h @@ -455,7 +455,7 @@ extern unsigned long __must_check arm_copy_from_user(void *to, const void __user *from, unsigned long n); static inline unsigned long __must_check -__arch_copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { unsigned int __ua_flags; @@ -471,7 +471,7 @@ extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n); static inline unsigned long __must_check -__arch_copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { #ifndef CONFIG_UACCESS_WITH_MEMCPY unsigned int __ua_flags; @@ -499,54 +499,22 @@ __clear_user(void __user *addr, unsigned long n) } #else -#define __arch_copy_from_user(to, from, n) \ - (memcpy(to, (void __force *)from, n), 0) -#define __arch_copy_to_user(to, from, n) \ - (memcpy((void __force *)to, from, n), 0) -#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) -#endif - -static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - check_object_size(to, n, false); - return __arch_copy_from_user(to, from, n); -} - -static inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long res = n; - - check_object_size(to, n, false); - - if (likely(access_ok(VERIFY_READ, from, n))) - res = __arch_copy_from_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - check_object_size(from, n, true); - - return __arch_copy_to_user(to, from, n); + memcpy(to, (const void __force *)from, n); + return 0; } - -static inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - check_object_size(from, n, true); - - if (access_ok(VERIFY_WRITE, to, n)) - n = __arch_copy_to_user(to, from, n); - return n; + memcpy((void __force *)to, from, n); + return 0; } - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define __clear_user(addr, n) (memset((void __force *)addr, 0, n), 0) +#endif +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { -- cgit v1.1 From 46583939b98d5a3f13ac71eed9518c6b7a80ae7e Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:00:03 -0500 Subject: arm64: add extable.h Signed-off-by: Al Viro --- arch/arm64/include/asm/extable.h | 25 +++++++++++++++++++++++++ arch/arm64/include/asm/uaccess.h | 23 +---------------------- 2 files changed, 26 insertions(+), 22 deletions(-) create mode 100644 arch/arm64/include/asm/extable.h (limited to 'arch') diff --git a/arch/arm64/include/asm/extable.h b/arch/arm64/include/asm/extable.h new file mode 100644 index 0000000..42f50f1 --- /dev/null +++ b/arch/arm64/include/asm/extable.h @@ -0,0 +1,25 @@ +#ifndef __ASM_EXTABLE_H +#define __ASM_EXTABLE_H + +/* + * The exception table consists of pairs of relative offsets: the first + * is the relative offset to an instruction that is allowed to fault, + * and the second is the relative offset at which the program should + * continue. No registers are modified, so it is entirely up to the + * continuation code to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + int insn, fixup; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +extern int fixup_exception(struct pt_regs *regs); +#endif diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 7c514e1..5a9d49c 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -33,28 +33,7 @@ #include #include #include - -/* - * The exception table consists of pairs of relative offsets: the first - * is the relative offset to an instruction that is allowed to fault, - * and the second is the relative offset at which the program should - * continue. No registers are modified, so it is entirely up to the - * continuation code to figure out what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - int insn, fixup; -}; - -#define ARCH_HAS_RELATIVE_EXTABLE - -extern int fixup_exception(struct pt_regs *regs); +#include #define KERNEL_DS (-1UL) #define get_ds() (KERNEL_DS) -- cgit v1.1 From 5cbe865b0e03982c7886f36716fc149415ee8381 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: avr32: switch to generic extable.h Signed-off-by: Al Viro --- arch/avr32/include/asm/Kbuild | 1 + arch/avr32/include/asm/uaccess.h | 5 +---- 2 files changed, 2 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/avr32/include/asm/Kbuild b/arch/avr32/include/asm/Kbuild index 3d7ef2c..ff7d739 100644 --- a/arch/avr32/include/asm/Kbuild +++ b/arch/avr32/include/asm/Kbuild @@ -5,6 +5,7 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += exec.h +generic-y += extable.h generic-y += futex.h generic-y += irq_regs.h generic-y += irq_work.h diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 7ca5cb3..5f3ce85 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -323,9 +323,6 @@ extern long __strnlen_user(const char __user *__s, long __n); #define strlen_user(s) strnlen_user(s, ~0UL >> 1) -struct exception_table_entry -{ - unsigned long insn, fixup; -}; +#include #endif /* __ASM_AVR32_UACCESS_H */ -- cgit v1.1 From 92430dab364019babd4122fef22441ef20bdb39b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 08:40:57 -0400 Subject: arm64: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/arm64/Kconfig | 1 + arch/arm64/include/asm/uaccess.h | 55 ++++------------------------------------ arch/arm64/kernel/arm64ksyms.c | 2 +- arch/arm64/lib/copy_in_user.S | 4 +-- 4 files changed, 9 insertions(+), 53 deletions(-) (limited to 'arch') diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index a39029b..b72160e 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -115,6 +115,7 @@ config ARM64 select SPARSE_IRQ select SYSCTL_EXCEPTION_TRACE select THREAD_INFO_IN_TASK + select ARCH_HAS_RAW_COPY_USER help ARM 64-bit (AArch64) Linux support. diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h index 5a9d49c..ba49717 100644 --- a/arch/arm64/include/asm/uaccess.h +++ b/arch/arm64/include/asm/uaccess.h @@ -331,58 +331,13 @@ do { \ }) extern unsigned long __must_check __arch_copy_from_user(void *to, const void __user *from, unsigned long n); +#define raw_copy_from_user __arch_copy_from_user extern unsigned long __must_check __arch_copy_to_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __must_check __copy_in_user(void __user *to, const void __user *from, unsigned long n); +#define raw_copy_to_user __arch_copy_to_user +extern unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); - -static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n) -{ - kasan_check_write(to, n); - check_object_size(to, n, false); - return __arch_copy_from_user(to, from, n); -} - -static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n) -{ - kasan_check_read(from, n); - check_object_size(from, n, true); - return __arch_copy_to_user(to, from, n); -} - -static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long res = n; - kasan_check_write(to, n); - check_object_size(to, n, false); - - if (access_ok(VERIFY_READ, from, n)) { - res = __arch_copy_from_user(to, from, n); - } - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) -{ - kasan_check_read(from, n); - check_object_size(from, n, true); - - if (access_ok(VERIFY_WRITE, to, n)) { - n = __arch_copy_to_user(to, from, n); - } - return n; -} - -static inline unsigned long __must_check copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - if (access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n)) - n = __copy_in_user(to, from, n); - return n; -} - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER static inline unsigned long __must_check clear_user(void __user *to, unsigned long n) { diff --git a/arch/arm64/kernel/arm64ksyms.c b/arch/arm64/kernel/arm64ksyms.c index e9c4dc9..67368c7 100644 --- a/arch/arm64/kernel/arm64ksyms.c +++ b/arch/arm64/kernel/arm64ksyms.c @@ -38,7 +38,7 @@ EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(__arch_copy_from_user); EXPORT_SYMBOL(__arch_copy_to_user); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__copy_in_user); +EXPORT_SYMBOL(raw_copy_in_user); /* physical memory */ EXPORT_SYMBOL(memstart_addr); diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 47184c3..b24a830 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -64,14 +64,14 @@ .endm end .req x5 -ENTRY(__copy_in_user) +ENTRY(raw_copy_in_user) uaccess_enable_not_uao x3, x4 add end, x0, x2 #include "copy_template.S" uaccess_disable_not_uao x3 mov x0, #0 ret -ENDPROC(__copy_in_user) +ENDPROC(raw_copy_in_user) .section .fixup,"ax" .align 2 -- cgit v1.1 From e5c1540030a0e2da0c46b3da91ed3ceafd0e4368 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 16:03:51 -0400 Subject: avr32: switch to RAW_COPY_USER This one needs profiling; use of asm variant of access_ok() might have been performance-critical. Now copy_{to,from}_user() are using the C variant. I doubt that it's going to have visible effects, but that needs to be experimentally verified. Signed-off-by: Al Viro --- arch/avr32/Kconfig | 1 + arch/avr32/include/asm/uaccess.h | 28 ++++++---------------------- arch/avr32/kernel/avr32_ksyms.c | 2 -- arch/avr32/lib/copy_user.S | 15 --------------- 4 files changed, 7 insertions(+), 39 deletions(-) (limited to 'arch') diff --git a/arch/avr32/Kconfig b/arch/avr32/Kconfig index 7e75d45..8c349f2 100644 --- a/arch/avr32/Kconfig +++ b/arch/avr32/Kconfig @@ -19,6 +19,7 @@ config AVR32 select HAVE_MOD_ARCH_SPECIFIC select MODULES_USE_ELF_RELA select HAVE_NMI + select ARCH_HAS_RAW_COPY_USER help AVR32 is a high-performance 32-bit RISC microprocessor core, designed for cost-sensitive embedded applications, with particular diff --git a/arch/avr32/include/asm/uaccess.h b/arch/avr32/include/asm/uaccess.h index 5f3ce85..619d50e 100644 --- a/arch/avr32/include/asm/uaccess.h +++ b/arch/avr32/include/asm/uaccess.h @@ -66,34 +66,18 @@ static inline void set_fs(mm_segment_t s) extern __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); -extern __kernel_size_t copy_to_user(void __user *to, const void *from, - __kernel_size_t n); -extern __kernel_size_t ___copy_from_user(void *to, const void __user *from, - __kernel_size_t n); - -static inline __kernel_size_t __copy_to_user(void __user *to, const void *from, - __kernel_size_t n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { return __copy_user((void __force *)to, from, n); } -static inline __kernel_size_t __copy_from_user(void *to, - const void __user *from, - __kernel_size_t n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { return __copy_user(to, (const void __force *)from, n); } -static inline __kernel_size_t copy_from_user(void *to, - const void __user *from, - __kernel_size_t n) -{ - size_t res = ___copy_from_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * put_user: - Write a simple value into user space. diff --git a/arch/avr32/kernel/avr32_ksyms.c b/arch/avr32/kernel/avr32_ksyms.c index 0d05fd0..0cdae8e 100644 --- a/arch/avr32/kernel/avr32_ksyms.c +++ b/arch/avr32/kernel/avr32_ksyms.c @@ -36,8 +36,6 @@ EXPORT_SYMBOL(copy_page); /* * Userspace access stuff. */ -EXPORT_SYMBOL(___copy_from_user); -EXPORT_SYMBOL(copy_to_user); EXPORT_SYMBOL(__copy_user); EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user); diff --git a/arch/avr32/lib/copy_user.S b/arch/avr32/lib/copy_user.S index 0753734..f432757 100644 --- a/arch/avr32/lib/copy_user.S +++ b/arch/avr32/lib/copy_user.S @@ -23,21 +23,6 @@ */ .text .align 1 - .global ___copy_from_user - .type ___copy_from_user, @function -___copy_from_user: - branch_if_kernel r8, __copy_user - ret_if_privileged r8, r11, r10, r10 - rjmp __copy_user - .size ___copy_from_user, . - ___copy_from_user - - .global copy_to_user - .type copy_to_user, @function -copy_to_user: - branch_if_kernel r8, __copy_user - ret_if_privileged r8, r12, r10, r10 - .size copy_to_user, . - copy_to_user - .global __copy_user .type __copy_user, @function __copy_user: -- cgit v1.1 From 72d6f65dd7895276f929b48c8397575e272efcca Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: blackfin: switch to generic extable.h Signed-off-by: Al Viro --- arch/blackfin/include/asm/Kbuild | 1 + arch/blackfin/include/asm/uaccess.h | 17 +---------------- 2 files changed, 2 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/include/asm/Kbuild b/arch/blackfin/include/asm/Kbuild index 625db8a..dc4ef9a 100644 --- a/arch/blackfin/include/asm/Kbuild +++ b/arch/blackfin/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += device.h generic-y += div64.h generic-y += emergency-restart.h generic-y += errno.h +generic-y += extable.h generic-y += fb.h generic-y += futex.h generic-y += hw_irq.h diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index c9fedc3..9019ccc 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -42,22 +42,7 @@ static inline int _access_ok(unsigned long addr, unsigned long size) { return 1; extern int _access_ok(unsigned long addr, unsigned long size); #endif -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned long insn, fixup; -}; +#include /* * These are the main single-value transfer routines. They automatically -- cgit v1.1 From 50e9ab915a79b3f51f6de8ef9bbdae8279e8fa46 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 00:36:07 -0400 Subject: bfin: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/blackfin/Kconfig | 1 + arch/blackfin/include/asm/uaccess.h | 26 ++++---------------------- 2 files changed, 5 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index 3c1bd64..919dad1 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig @@ -41,6 +41,7 @@ config BLACKFIN select MODULES_USE_ELF_RELA select HAVE_DEBUG_STACKOVERFLOW select HAVE_NMI + select ARCH_HAS_RAW_COPY_USER config GENERIC_CSUM def_bool y diff --git a/arch/blackfin/include/asm/uaccess.h b/arch/blackfin/include/asm/uaccess.h index 9019ccc..f54a34f 100644 --- a/arch/blackfin/include/asm/uaccess.h +++ b/arch/blackfin/include/asm/uaccess.h @@ -144,41 +144,23 @@ static inline int bad_user_access_length(void) : "a" (__ptr(ptr))); \ }) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { memcpy(to, (const void __force *)from, n); return 0; } static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { memcpy((void __force *)to, from, n); SSYNC(); return 0; } -static inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) - return __copy_from_user(to, from, n); - memset(to, 0, n); - return n; -} - -static inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_WRITE, to, n))) - return __copy_to_user(to, from, n); - return n; -} - +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * Copy a null terminated string from userspace. */ -- cgit v1.1 From dcecd9369e3dcbb5da3ecdc9e0b5021d6ee6f879 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 14:16:16 -0500 Subject: c6x: remove duplicate definition of __access_ok Signed-off-by: Al Viro --- arch/c6x/include/asm/uaccess.h | 4 ---- 1 file changed, 4 deletions(-) (limited to 'arch') diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h index 79fb6f4..174f3bd 100644 --- a/arch/c6x/include/asm/uaccess.h +++ b/arch/c6x/include/asm/uaccess.h @@ -13,10 +13,6 @@ #include #include -#ifdef CONFIG_ACCESS_CHECK -#define __access_ok _access_ok -#endif - /* * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h * -- cgit v1.1 From 86944ee1581f4be6af0126e80877b8069e177739 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 01:11:14 -0400 Subject: c6x: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/c6x/Kconfig | 1 + arch/c6x/include/asm/uaccess.h | 12 ++++++------ 2 files changed, 7 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/c6x/Kconfig b/arch/c6x/Kconfig index 5aa8ea8..3c7bd9a 100644 --- a/arch/c6x/Kconfig +++ b/arch/c6x/Kconfig @@ -18,6 +18,7 @@ config C6X select GENERIC_CLOCKEVENTS select MODULES_USE_ELF_RELA select ARCH_NO_COHERENT_DMA_MMAP + select ARCH_HAS_RAW_COPY_USER config MMU def_bool n diff --git a/arch/c6x/include/asm/uaccess.h b/arch/c6x/include/asm/uaccess.h index 174f3bd..ba67568 100644 --- a/arch/c6x/include/asm/uaccess.h +++ b/arch/c6x/include/asm/uaccess.h @@ -14,12 +14,10 @@ #include /* - * __copy_from_user/copy_to_user are based on ones in asm-generic/uaccess.h - * * C6X supports unaligned 32 and 64 bit loads and stores. */ -static inline __must_check long __copy_from_user(void *to, - const void __user *from, unsigned long n) +static inline __must_check unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { u32 tmp32; u64 tmp64; @@ -54,8 +52,8 @@ static inline __must_check long __copy_from_user(void *to, return 0; } -static inline __must_check long __copy_to_user(void __user *to, - const void *from, unsigned long n) +static inline __must_check unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { u32 tmp32; u64 tmp64; @@ -89,6 +87,8 @@ static inline __must_check long __copy_to_user(void __user *to, memcpy((void __force *)to, from, n); return 0; } +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER extern int _access_ok(unsigned long addr, unsigned long size); #ifdef CONFIG_ACCESS_CHECK -- cgit v1.1 From a8be34459ca92d4f1034921bffd09df5441938be Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: cris: switch to generic extable.h Signed-off-by: Al Viro --- arch/cris/include/asm/Kbuild | 1 + arch/cris/include/asm/uaccess.h | 18 +----------------- 2 files changed, 2 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild index 0f5132b..2890099 100644 --- a/arch/cris/include/asm/Kbuild +++ b/arch/cris/include/asm/Kbuild @@ -9,6 +9,7 @@ generic-y += device.h generic-y += div64.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += emergency-restart.h generic-y += fcntl.h generic-y += futex.h diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index 5f5b8f5..f62f546 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -50,23 +50,7 @@ #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), (size)) #include - -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned long insn, fixup; -}; +#include /* * These are the main single-value transfer routines. They automatically -- cgit v1.1 From 07f78b308984042dde424191ab8f3237bcf6a0d1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Mar 2017 15:07:21 -0400 Subject: cris: don't rely upon __copy_user_zeroing() zeroing the tail we want to get rid of it; unfortunately, it's tangled as hell, so it'll take many steps, more's the pity. Signed-off-by: Al Viro --- arch/cris/include/asm/uaccess.h | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index f62f546..c8858e1 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -338,14 +338,16 @@ static inline size_t clear_user(void __user *to, size_t n) static inline size_t copy_from_user(void *to, const void __user *from, size_t n) { - if (unlikely(!access_ok(VERIFY_READ, from, n))) { - memset(to, 0, n); - return n; + size_t res = n; + if (likely(access_ok(VERIFY_READ, from, n))) { + if (__builtin_constant_p(n)) + res = __constant_copy_from_user(to, from, n); + else + res = __copy_user_zeroing(to, from, n); } - if (__builtin_constant_p(n)) - return __constant_copy_from_user(to, from, n); - else - return __copy_user_zeroing(to, from, n); + if (unlikely(res)) + memset(to + n - res , 0, res); + return res; } static inline size_t copy_to_user(void __user *to, const void *from, size_t n) -- cgit v1.1 From c8313947af458d9081c3adca2a9a9c962cf014f1 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Mar 2017 15:17:25 -0400 Subject: cris: get rid of zeroing in __asm_copy_from_user_N for N > 4 only one user for those Signed-off-by: Al Viro --- arch/cris/include/arch-v10/arch/uaccess.h | 34 +++++++++++-------------- arch/cris/include/arch-v32/arch/uaccess.h | 42 +++++++++++-------------------- 2 files changed, 28 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/cris/include/arch-v10/arch/uaccess.h b/arch/cris/include/arch-v10/arch/uaccess.h index 65b02d9..f68d3d19 100644 --- a/arch/cris/include/arch-v10/arch/uaccess.h +++ b/arch/cris/include/arch-v10/arch/uaccess.h @@ -210,8 +210,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_4x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "4: move.b $r9,[%0+]\n", \ - "5: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "5: addq 1,%2\n", \ " .dword 4b,5b\n") #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -219,7 +218,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.w [%1+],$r9\n" \ "4: move.w $r9,[%0+]\n" COPY, \ "5: addq 2,%2\n" \ - " clear.w [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 4b,5b\n" TENTRY) #define __asm_copy_from_user_6(to, from, ret) \ @@ -229,8 +228,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_6x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "6: move.b $r9,[%0+]\n", \ - "7: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "7: addq 1,%2\n", \ " .dword 6b,7b\n") #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -238,7 +236,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.d [%1+],$r9\n" \ "4: move.d $r9,[%0+]\n" COPY, \ "5: addq 4,%2\n" \ - " clear.d [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 4b,5b\n" TENTRY) #define __asm_copy_from_user_8(to, from, ret) \ @@ -248,8 +246,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_8x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "6: move.b $r9,[%0+]\n", \ - "7: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "7: addq 1,%2\n", \ " .dword 6b,7b\n") #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -257,7 +254,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.w [%1+],$r9\n" \ "6: move.w $r9,[%0+]\n" COPY, \ "7: addq 2,%2\n" \ - " clear.w [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 6b,7b\n" TENTRY) #define __asm_copy_from_user_10(to, from, ret) \ @@ -267,8 +264,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_10x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "8: move.b $r9,[%0+]\n", \ - "9: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "9: addq 1,%2\n", \ " .dword 8b,9b\n") #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -276,7 +272,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.d [%1+],$r9\n" \ "6: move.d $r9,[%0+]\n" COPY, \ "7: addq 4,%2\n" \ - " clear.d [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 6b,7b\n" TENTRY) #define __asm_copy_from_user_12(to, from, ret) \ @@ -286,8 +282,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_12x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "8: move.b $r9,[%0+]\n", \ - "9: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "9: addq 1,%2\n", \ " .dword 8b,9b\n") #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -295,7 +290,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.w [%1+],$r9\n" \ "8: move.w $r9,[%0+]\n" COPY, \ "9: addq 2,%2\n" \ - " clear.w [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 8b,9b\n" TENTRY) #define __asm_copy_from_user_14(to, from, ret) \ @@ -305,8 +300,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_14x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "10: move.b $r9,[%0+]\n", \ - "11: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "11: addq 1,%2\n", \ " .dword 10b,11b\n") #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -314,7 +308,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.d [%1+],$r9\n" \ "8: move.d $r9,[%0+]\n" COPY, \ "9: addq 4,%2\n" \ - " clear.d [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 8b,9b\n" TENTRY) #define __asm_copy_from_user_16(to, from, ret) \ @@ -325,7 +319,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.d [%1+],$r9\n" \ "10: move.d $r9,[%0+]\n" COPY, \ "11: addq 4,%2\n" \ - " clear.d [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 10b,11b\n" TENTRY) #define __asm_copy_from_user_20(to, from, ret) \ @@ -336,7 +330,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.d [%1+],$r9\n" \ "12: move.d $r9,[%0+]\n" COPY, \ "13: addq 4,%2\n" \ - " clear.d [%0+]\n" FIXUP, \ + FIXUP, \ " .dword 12b,13b\n" TENTRY) #define __asm_copy_from_user_24(to, from, ret) \ diff --git a/arch/cris/include/arch-v32/arch/uaccess.h b/arch/cris/include/arch-v32/arch/uaccess.h index 3196019..7a0032a 100644 --- a/arch/cris/include/arch-v32/arch/uaccess.h +++ b/arch/cris/include/arch-v32/arch/uaccess.h @@ -224,8 +224,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_4x_cont(to, from, ret, \ "4: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "5: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "5: addq 1,%2\n", \ " .dword 4b,5b\n") #define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -234,8 +233,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "4: move.w [%1+],$acr\n" \ " move.w $acr,[%0+]\n", \ FIXUP \ - "5: addq 2,%2\n" \ - " clear.w [%0+]\n", \ + "5: addq 2,%2\n", \ TENTRY \ " .dword 4b,5b\n") @@ -246,8 +244,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_6x_cont(to, from, ret, \ "6: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "7: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "7: addq 1,%2\n", \ " .dword 6b,7b\n") #define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -256,8 +253,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "4: move.d [%1+],$acr\n" \ " move.d $acr,[%0+]\n", \ FIXUP \ - "5: addq 4,%2\n" \ - " clear.d [%0+]\n", \ + "5: addq 4,%2\n", \ TENTRY \ " .dword 4b,5b\n") @@ -268,8 +264,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_8x_cont(to, from, ret, \ "6: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "7: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "7: addq 1,%2\n", \ " .dword 6b,7b\n") #define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -278,8 +273,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "6: move.w [%1+],$acr\n" \ " move.w $acr,[%0+]\n", \ FIXUP \ - "7: addq 2,%2\n" \ - " clear.w [%0+]\n", \ + "7: addq 2,%2\n", \ TENTRY \ " .dword 6b,7b\n") @@ -290,8 +284,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_10x_cont(to, from, ret, \ "8: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "9: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "9: addq 1,%2\n", \ " .dword 8b,9b\n") #define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -300,8 +293,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "6: move.d [%1+],$acr\n" \ " move.d $acr,[%0+]\n", \ FIXUP \ - "7: addq 4,%2\n" \ - " clear.d [%0+]\n", \ + "7: addq 4,%2\n", \ TENTRY \ " .dword 6b,7b\n") @@ -312,8 +304,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_12x_cont(to, from, ret, \ "8: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "9: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "9: addq 1,%2\n", \ " .dword 8b,9b\n") #define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -322,8 +313,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "8: move.w [%1+],$acr\n" \ " move.w $acr,[%0+]\n", \ FIXUP \ - "9: addq 2,%2\n" \ - " clear.w [%0+]\n", \ + "9: addq 2,%2\n", \ TENTRY \ " .dword 8b,9b\n") @@ -334,8 +324,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_14x_cont(to, from, ret, \ "10: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "11: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "11: addq 1,%2\n", \ " .dword 10b,11b\n") #define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -344,8 +333,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "8: move.d [%1+],$acr\n" \ " move.d $acr,[%0+]\n", \ FIXUP \ - "9: addq 4,%2\n" \ - " clear.d [%0+]\n", \ + "9: addq 4,%2\n", \ TENTRY \ " .dword 8b,9b\n") @@ -358,8 +346,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "10: move.d [%1+],$acr\n" \ " move.d $acr,[%0+]\n", \ FIXUP \ - "11: addq 4,%2\n" \ - " clear.d [%0+]\n", \ + "11: addq 4,%2\n", \ TENTRY \ " .dword 10b,11b\n") @@ -372,8 +359,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "12: move.d [%1+],$acr\n" \ " move.d $acr,[%0+]\n", \ FIXUP \ - "13: addq 4,%2\n" \ - " clear.d [%0+]\n", \ + "13: addq 4,%2\n", \ TENTRY \ " .dword 12b,13b\n") -- cgit v1.1 From de09be340d3ef4568b27282f2f6fc44b1cb559ab Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Mar 2017 15:25:35 -0400 Subject: cris: get rid of zeroing ... the rest of it Signed-off-by: Al Viro --- arch/cris/arch-v10/lib/usercopy.c | 24 ++++++------------------ arch/cris/arch-v32/lib/usercopy.c | 23 ++++++----------------- arch/cris/include/arch-v10/arch/uaccess.h | 12 ++++-------- arch/cris/include/arch-v32/arch/uaccess.h | 12 ++++-------- 4 files changed, 20 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c index 1ba7cc0..80b7219 100644 --- a/arch/cris/arch-v10/lib/usercopy.c +++ b/arch/cris/arch-v10/lib/usercopy.c @@ -217,19 +217,17 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, { __asm_copy_from_user_1 (dst, src, retn); n--; + if (retn) + goto exception; } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2 (dst, src, retn); n -= 2; + if (retn) + goto exception; } - - /* We only need one check after the unalignment-adjustments, because - if both adjustments were done, either both or neither reference - had an exception. */ - if (retn != 0) - goto copy_exception_bytes; } /* Decide which copying method to use. */ @@ -328,7 +326,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) - goto copy_exception_bytes; + goto exception; } /* If we get here, there were no memory read faults. */ @@ -356,17 +354,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, bytes. */ return retn; -copy_exception_bytes: - /* We already have "retn" bytes cleared, and need to clear the - remaining "n" bytes. A non-optimized simple byte-for-byte in-line - memset is preferred here, since this isn't speed-critical code and - we'd rather have this a leaf-function than calling memset. */ - { - char *endp; - for (endp = dst + n; dst < endp; dst++) - *dst = 0; - } - +exception: return retn + n; } EXPORT_SYMBOL(__copy_user_zeroing); diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c index 05e58da..25f421f 100644 --- a/arch/cris/arch-v32/lib/usercopy.c +++ b/arch/cris/arch-v32/lib/usercopy.c @@ -184,19 +184,18 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, { __asm_copy_from_user_1 (dst, src, retn); n--; + if (retn != 0) + goto exception; } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2 (dst, src, retn); n -= 2; + if (retn != 0) + goto exception; } - /* We only need one check after the unalignment-adjustments, because - if both adjustments were done, either both or neither reference - had an exception. */ - if (retn != 0) - goto copy_exception_bytes; } /* Movem is dirt cheap. The overheap is low enough to always use the @@ -279,7 +278,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) - goto copy_exception_bytes; + goto exception; } /* If we get here, there were no memory read faults. */ @@ -307,17 +306,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, bytes. */ return retn; -copy_exception_bytes: - /* We already have "retn" bytes cleared, and need to clear the - remaining "n" bytes. A non-optimized simple byte-for-byte in-line - memset is preferred here, since this isn't speed-critical code and - we'd rather have this a leaf-function than calling memset. */ - { - char *endp; - for (endp = dst + n; dst < endp; dst++) - *dst = 0; - } - +exception: return retn + n; } EXPORT_SYMBOL(__copy_user_zeroing); diff --git a/arch/cris/include/arch-v10/arch/uaccess.h b/arch/cris/include/arch-v10/arch/uaccess.h index f68d3d19..5477c98 100644 --- a/arch/cris/include/arch-v10/arch/uaccess.h +++ b/arch/cris/include/arch-v10/arch/uaccess.h @@ -172,16 +172,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_user_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "2: move.b $r9,[%0+]\n", \ - "3: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "3: addq 1,%2\n", \ " .dword 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " move.w [%1+],$r9\n" \ "2: move.w $r9,[%0+]\n" COPY, \ - "3: addq 2,%2\n" \ - " clear.w [%0+]\n" FIXUP, \ + "3: addq 2,%2\n" FIXUP, \ " .dword 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ @@ -191,16 +189,14 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_2x_cont(to, from, ret, \ " move.b [%1+],$r9\n" \ "4: move.b $r9,[%0+]\n", \ - "5: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "5: addq 1,%2\n", \ " .dword 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " move.d [%1+],$r9\n" \ "2: move.d $r9,[%0+]\n" COPY, \ - "3: addq 4,%2\n" \ - " clear.d [%0+]\n" FIXUP, \ + "3: addq 4,%2\n" FIXUP, \ " .dword 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ diff --git a/arch/cris/include/arch-v32/arch/uaccess.h b/arch/cris/include/arch-v32/arch/uaccess.h index 7a0032a..dc2ce09 100644 --- a/arch/cris/include/arch-v32/arch/uaccess.h +++ b/arch/cris/include/arch-v32/arch/uaccess.h @@ -178,8 +178,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) "2: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ "3: addq 1,%2\n" \ - " jump 1b\n" \ - " clear.b [%0+]\n", \ + " jump 1b\n", \ " .dword 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -189,8 +188,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.w $acr,[%0+]\n", \ FIXUP \ "3: addq 2,%2\n" \ - " jump 1b\n" \ - " clear.w [%0+]\n", \ + " jump 1b\n", \ TENTRY \ " .dword 2b,3b\n") @@ -201,8 +199,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) __asm_copy_from_user_2x_cont(to, from, ret, \ "4: move.b [%1+],$acr\n" \ " move.b $acr,[%0+]\n", \ - "5: addq 1,%2\n" \ - " clear.b [%0+]\n", \ + "5: addq 1,%2\n", \ " .dword 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ @@ -212,8 +209,7 @@ __do_strncpy_from_user(char *dst, const char *src, long count) " move.d $acr,[%0+]\n", \ FIXUP \ "3: addq 4,%2\n" \ - " jump 1b\n" \ - " clear.d [%0+]\n", \ + " jump 1b\n", \ TENTRY \ " .dword 2b,3b\n") -- cgit v1.1 From b71f1bf57f995ae4ce0fe2f907604c841ca8493f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Mar 2017 15:28:30 -0400 Subject: cris: rename __copy_user_zeroing to __copy_user_in ... now that it doesn't zero anymore Signed-off-by: Al Viro --- arch/cris/arch-v10/lib/usercopy.c | 7 +++---- arch/cris/arch-v32/lib/usercopy.c | 7 +++---- arch/cris/include/asm/uaccess.h | 8 ++++---- 3 files changed, 10 insertions(+), 12 deletions(-) (limited to 'arch') diff --git a/arch/cris/arch-v10/lib/usercopy.c b/arch/cris/arch-v10/lib/usercopy.c index 80b7219..48fa37f 100644 --- a/arch/cris/arch-v10/lib/usercopy.c +++ b/arch/cris/arch-v10/lib/usercopy.c @@ -188,11 +188,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) } EXPORT_SYMBOL(__copy_user); -/* Copy from user to kernel, zeroing the bytes that were inaccessible in - userland. The return-value is the number of bytes that were +/* Copy from user to kernel. The return-value is the number of bytes that were inaccessible. */ -unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, +unsigned long __copy_user_in(void *pdst, const void __user *psrc, unsigned long pn) { /* We want the parameters put in special registers. @@ -357,7 +356,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, exception: return retn + n; } -EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(__copy_user_in); /* Zero userspace. */ unsigned long __do_clear_user(void __user *pto, unsigned long pn) diff --git a/arch/cris/arch-v32/lib/usercopy.c b/arch/cris/arch-v32/lib/usercopy.c index 25f421f..20b6080 100644 --- a/arch/cris/arch-v32/lib/usercopy.c +++ b/arch/cris/arch-v32/lib/usercopy.c @@ -156,10 +156,9 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, unsigned long pn) } EXPORT_SYMBOL(__copy_user); -/* Copy from user to kernel, zeroing the bytes that were inaccessible in - userland. The return-value is the number of bytes that were +/* Copy from user to kernel. The return-value is the number of bytes that were inaccessible. */ -unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, +unsigned long __copy_user_in(void *pdst, const void __user *psrc, unsigned long pn) { /* We want the parameters put in special registers. @@ -309,7 +308,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, exception: return retn + n; } -EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(__copy_user_in); /* Zero userspace. */ unsigned long __do_clear_user(void __user *pto, unsigned long pn) diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index c8858e1..fc30fda 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -169,7 +169,7 @@ extern long __get_user_bad(void); live in lib/usercopy.c */ extern unsigned long __copy_user(void __user *to, const void *from, unsigned long n); -extern unsigned long __copy_user_zeroing(void *to, const void __user *from, unsigned long n); +extern unsigned long __copy_user_in(void *to, const void __user *from, unsigned long n); extern unsigned long __do_clear_user(void __user *to, unsigned long n); static inline long @@ -236,7 +236,7 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) else if (n == 24) __asm_copy_from_user_24(to, from, ret); else - ret = __copy_user_zeroing(to, from, n); + ret = __copy_user_in(to, from, n); return ret; } @@ -343,7 +343,7 @@ static inline size_t copy_from_user(void *to, const void __user *from, size_t n) if (__builtin_constant_p(n)) res = __constant_copy_from_user(to, from, n); else - res = __copy_user_zeroing(to, from, n); + res = __copy_user_in(to, from, n); } if (unlikely(res)) memset(to + n - res , 0, res); @@ -368,7 +368,7 @@ static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void __user *from, unsigned long n) { - return __copy_user_zeroing(to, from, n); + return __copy_user_in(to, from, n); } static inline unsigned long -- cgit v1.1 From 0c7e9a870eb2a5db03780d7d3f83d0df7623a9c7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 01:18:37 -0400 Subject: cris: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/cris/Kconfig | 1 + arch/cris/include/asm/uaccess.h | 53 +++++++++-------------------------------- 2 files changed, 12 insertions(+), 42 deletions(-) (limited to 'arch') diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index 71b758d..36f94c4 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig @@ -71,6 +71,7 @@ config CRIS select GENERIC_SCHED_CLOCK if ETRAX_ARCH_V32 select HAVE_DEBUG_BUGVERBOSE if ETRAX_ARCH_V32 select HAVE_NMI + select ARCH_HAS_RAW_COPY_USER config HZ int diff --git a/arch/cris/include/asm/uaccess.h b/arch/cris/include/asm/uaccess.h index fc30fda..0d473ae 100644 --- a/arch/cris/include/asm/uaccess.h +++ b/arch/cris/include/asm/uaccess.h @@ -336,64 +336,33 @@ static inline size_t clear_user(void __user *to, size_t n) return __do_clear_user(to, n); } -static inline size_t copy_from_user(void *to, const void __user *from, size_t n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - size_t res = n; - if (likely(access_ok(VERIFY_READ, from, n))) { - if (__builtin_constant_p(n)) - res = __constant_copy_from_user(to, from, n); - else - res = __copy_user_in(to, from, n); - } - if (unlikely(res)) - memset(to + n - res , 0, res); - return res; + if (__builtin_constant_p(n)) + return __constant_copy_from_user(to, from, n); + else + return __copy_user_in(to, from, n); } -static inline size_t copy_to_user(void __user *to, const void *from, size_t n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - if (unlikely(!access_ok(VERIFY_WRITE, to, n))) - return n; if (__builtin_constant_p(n)) return __constant_copy_to_user(to, from, n); else return __copy_user(to, from, n); } -/* We let the __ versions of copy_from/to_user inline, because they're often - * used in fast paths and have only a small space overhead. - */ - -static inline unsigned long -__generic_copy_from_user_nocheck(void *to, const void __user *from, - unsigned long n) -{ - return __copy_user_in(to, from, n); -} - -static inline unsigned long -__generic_copy_to_user_nocheck(void __user *to, const void *from, - unsigned long n) -{ - return __copy_user(to, from, n); -} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER static inline unsigned long -__generic_clear_user_nocheck(void __user *to, unsigned long n) +__clear_user(void __user *to, unsigned long n) { return __do_clear_user(to, n); } -/* without checking */ - -#define __copy_to_user(to, from, n) \ - __generic_copy_to_user_nocheck((to), (from), (n)) -#define __copy_from_user(to, from, n) \ - __generic_copy_from_user_nocheck((to), (from), (n)) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user -#define __clear_user(to, n) __generic_clear_user_nocheck((to), (n)) - #define strlen_user(str) strnlen_user((str), 0x7ffffffe) #endif /* _CRIS_UACCESS_H */ -- cgit v1.1 From 9a7513cfa26f5fcce15de6130ce3c27d77c0ce55 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:12:18 -0500 Subject: frv: switch to use of fixup_exception() Massage frv search_exception_table() to a) taking pt_regs pointer as explicit argument b) updating ->pc on success Simplifies callers a bit and allows to convert to generic extable.h, while we are at it. Signed-off-by: Al Viro --- arch/frv/include/asm/Kbuild | 1 + arch/frv/include/asm/uaccess.h | 23 +---------------------- arch/frv/kernel/traps.c | 7 +------ arch/frv/mm/extable.c | 27 +++++++++++++-------------- arch/frv/mm/fault.c | 6 ++---- 5 files changed, 18 insertions(+), 46 deletions(-) (limited to 'arch') diff --git a/arch/frv/include/asm/Kbuild b/arch/frv/include/asm/Kbuild index c33b467..cce3bc3 100644 --- a/arch/frv/include/asm/Kbuild +++ b/arch/frv/include/asm/Kbuild @@ -1,6 +1,7 @@ generic-y += clkdev.h generic-y += exec.h +generic-y += extable.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 55b3a69..5bcc57d 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -18,6 +18,7 @@ #include #include #include +#include #define __ptr(x) ((unsigned long __force *)(x)) @@ -59,26 +60,6 @@ static inline int ___range_ok(unsigned long addr, unsigned long size) #define access_ok(type,addr,size) (__range_ok((void __user *)(addr), (size)) == 0) #define __access_ok(addr,size) (__range_ok((addr), (size)) == 0) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -extern unsigned long search_exception_table(unsigned long); - /* * These are the main single-value transfer routines. They automatically @@ -314,6 +295,4 @@ extern long strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) -extern unsigned long search_exception_table(unsigned long addr); - #endif /* _ASM_UACCESS_H */ diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c index ce29991..fb08ebe 100644 --- a/arch/frv/kernel/traps.c +++ b/arch/frv/kernel/traps.c @@ -360,13 +360,8 @@ asmlinkage void memory_access_exception(unsigned long esr0, siginfo_t info; #ifdef CONFIG_MMU - unsigned long fixup; - - fixup = search_exception_table(__frame->pc); - if (fixup) { - __frame->pc = fixup; + if (fixup_exception(__frame)) return; - } #endif die_if_kernel("-- Memory Access Exception --\n" diff --git a/arch/frv/mm/extable.c b/arch/frv/mm/extable.c index a0e8b3e..9198ddd 100644 --- a/arch/frv/mm/extable.c +++ b/arch/frv/mm/extable.c @@ -10,40 +10,39 @@ extern const void __memset_end, __memset_user_error_lr, __memset_user_error_hand extern const void __memcpy_end, __memcpy_user_error_lr, __memcpy_user_error_handler; extern spinlock_t modlist_lock; - -/*****************************************************************************/ -/* - * see if there's a fixup handler available to deal with a kernel fault - */ -unsigned long search_exception_table(unsigned long pc) +int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *extab; + unsigned long pc = regs->pc; /* determine if the fault lay during a memcpy_user or a memset_user */ - if (__frame->lr == (unsigned long) &__memset_user_error_lr && + if (regs->lr == (unsigned long) &__memset_user_error_lr && (unsigned long) &memset <= pc && pc < (unsigned long) &__memset_end ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a clear_user() */ - return (unsigned long) &__memset_user_error_handler; + regs->pc = (unsigned long) &__memset_user_error_handler; + return 1; } - if (__frame->lr == (unsigned long) &__memcpy_user_error_lr && + if (regs->lr == (unsigned long) &__memcpy_user_error_lr && (unsigned long) &memcpy <= pc && pc < (unsigned long) &__memcpy_end ) { /* the fault occurred in a protected memset * - we search for the return address (in LR) instead of the program counter * - it was probably during a copy_to/from_user() */ - return (unsigned long) &__memcpy_user_error_handler; + regs->pc = (unsigned long) &__memcpy_user_error_handler; + return 1; } extab = search_exception_tables(pc); - if (extab) - return extab->fixup; + if (extab) { + regs->pc = extab->fixup; + return 1; + } return 0; - -} /* end search_exception_table() */ +} diff --git a/arch/frv/mm/fault.c b/arch/frv/mm/fault.c index 614a46c..179e79e 100644 --- a/arch/frv/mm/fault.c +++ b/arch/frv/mm/fault.c @@ -33,7 +33,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear { struct vm_area_struct *vma; struct mm_struct *mm; - unsigned long _pme, lrai, lrad, fixup; + unsigned long _pme, lrai, lrad; unsigned long flags = 0; siginfo_t info; pgd_t *pge; @@ -201,10 +201,8 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear no_context: /* are we prepared to handle this kernel fault? */ - if ((fixup = search_exception_table(__frame->pc)) != 0) { - __frame->pc = fixup; + if (fixup_exception(__frame)) return; - } /* * Oops. The kernel tried to access some bad page. We'll have to -- cgit v1.1 From 48f666c986671ea2021971bfbe4d74a7ebcf0a44 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 08:46:17 -0400 Subject: frv: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/frv/Kconfig | 1 + arch/frv/include/asm/uaccess.h | 57 +++++++++++++++++------------------------- 2 files changed, 24 insertions(+), 34 deletions(-) (limited to 'arch') diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index eefd9a4..e489fef 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig @@ -16,6 +16,7 @@ config FRV select OLD_SIGACTION select HAVE_DEBUG_STACKOVERFLOW select ARCH_NO_COHERENT_DMA_MMAP + select ARCH_HAS_RAW_COPY_USER config ZONE_DMA bool diff --git a/arch/frv/include/asm/uaccess.h b/arch/frv/include/asm/uaccess.h index 5bcc57d..e4e33b4 100644 --- a/arch/frv/include/asm/uaccess.h +++ b/arch/frv/include/asm/uaccess.h @@ -233,61 +233,50 @@ do { \ /* * */ + #define ____force(x) (__force void *)(void __user *)(x) #ifdef CONFIG_MMU extern long __memset_user(void *dst, unsigned long count); extern long __memcpy_user(void *dst, const void *src, unsigned long count); #define __clear_user(dst,count) __memset_user(____force(dst), (count)) -#define __copy_from_user_inatomic(to, from, n) __memcpy_user((to), ____force(from), (n)) -#define __copy_to_user_inatomic(to, from, n) __memcpy_user(____force(to), (from), (n)) #else #define __clear_user(dst,count) (memset(____force(dst), 0, (count)), 0) -#define __copy_from_user_inatomic(to, from, n) (memcpy((to), ____force(from), (n)), 0) -#define __copy_to_user_inatomic(to, from, n) (memcpy(____force(to), (from), (n)), 0) #endif -static inline unsigned long __must_check -clear_user(void __user *to, unsigned long n) -{ - if (likely(__access_ok(to, n))) - n = __clear_user(to, n); - return n; -} - -static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - return __copy_to_user_inatomic(to, from, n); -} - static inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - might_fault(); - return __copy_from_user_inatomic(to, from, n); +#ifdef CONFIG_MMU + return __memcpy_user(to, (__force const void *)from, n); +#else + memcpy(to, (__force const void *)from, n); + return 0; +#endif } -static inline long copy_from_user(void *to, const void __user *from, unsigned long n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - unsigned long ret = n; - - if (likely(__access_ok(from, n))) - ret = __copy_from_user(to, from, n); - - if (unlikely(ret != 0)) - memset(to + (n - ret), 0, ret); - - return ret; +#ifdef CONFIG_MMU + return __memcpy_user((__force void *)to, from, n); +#else + memcpy((__force void *)to, from, n); + return 0; +#endif } +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER -static inline long copy_to_user(void __user *to, const void *from, unsigned long n) +static inline unsigned long __must_check +clear_user(void __user *to, unsigned long n) { - return likely(__access_ok(to, n)) ? __copy_to_user(to, from, n) : n; + if (likely(__access_ok(to, n))) + n = __clear_user(to, n); + return n; } extern long strncpy_from_user(char *dst, const char __user *src, long count); -- cgit v1.1 From 33ab2da801debd947bf0f36848d2b9ea20fad835 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 01:22:03 -0400 Subject: 8300: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/h8300/Kconfig | 1 + arch/h8300/include/asm/uaccess.h | 10 ++++++---- 2 files changed, 7 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index 3ae8525..4738834 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig @@ -22,6 +22,7 @@ config H8300 select HAVE_ARCH_KGDB select HAVE_ARCH_HASH select CPU_NO_EFFICIENT_FFS + select ARCH_HAS_RAW_COPY_USER config RWSEM_GENERIC_SPINLOCK def_bool y diff --git a/arch/h8300/include/asm/uaccess.h b/arch/h8300/include/asm/uaccess.h index c156a41..6f6144a 100644 --- a/arch/h8300/include/asm/uaccess.h +++ b/arch/h8300/include/asm/uaccess.h @@ -3,8 +3,8 @@ #include -static inline __must_check long __copy_from_user(void *to, - const void __user * from, unsigned long n) +static inline __must_check unsigned long +raw_copy_from_user(void *to, const void __user * from, unsigned long n) { if (__builtin_constant_p(n)) { switch(n) { @@ -24,8 +24,8 @@ static inline __must_check long __copy_from_user(void *to, return 0; } -static inline __must_check long __copy_to_user(void __user *to, - const void *from, unsigned long n) +static inline __must_check unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n)) { switch(n) { @@ -46,6 +46,8 @@ static inline __must_check long __copy_to_user(void __user *to, memcpy((void __force *)to, from, n); return 0; } +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #include -- cgit v1.1 From 35f8acd5c660eb9dbf7a8379724821abb823a3d8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: m32r: switch to generic extable.h Signed-off-by: Al Viro --- arch/m32r/include/asm/Kbuild | 1 + arch/m32r/include/asm/uaccess.h | 20 +------------------- 2 files changed, 2 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/m32r/include/asm/Kbuild b/arch/m32r/include/asm/Kbuild index deb2987..c000ffa 100644 --- a/arch/m32r/include/asm/Kbuild +++ b/arch/m32r/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += clkdev.h generic-y += current.h generic-y += exec.h +generic-y += extable.h generic-y += irq_work.h generic-y += kvm_para.h generic-y += mcs_spinlock.h diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index 96b0efd..e47dcc0 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -109,25 +109,7 @@ static inline int access_ok(int type, const void *addr, unsigned long size) } #endif /* CONFIG_MMU */ -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); +#include /* * These are the main single-value transfer routines. They automatically -- cgit v1.1 From 8cd920f26785ce42ec6fb807d40de67fc2eb41ce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 19 Mar 2017 16:11:13 -0400 Subject: m32r: get rid of zeroing Signed-off-by: Al Viro --- arch/m32r/include/asm/uaccess.h | 66 +---------------------------------------- arch/m32r/lib/usercopy.c | 9 +++--- 2 files changed, 6 insertions(+), 69 deletions(-) (limited to 'arch') diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index e47dcc0..d5c5e68 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -460,77 +460,13 @@ do { \ : "r14", "memory"); \ } while (0) -#define __copy_user_zeroing(to, from, size) \ -do { \ - unsigned long __dst, __src, __c; \ - __asm__ __volatile__ ( \ - " mv r14, %0\n" \ - " or r14, %1\n" \ - " beq %0, %1, 9f\n" \ - " beqz %2, 9f\n" \ - " and3 r14, r14, #3\n" \ - " bnez r14, 2f\n" \ - " and3 %2, %2, #3\n" \ - " beqz %3, 2f\n" \ - " addi %0, #-4 ; word_copy \n" \ - " .fillinsn\n" \ - "0: ld r14, @%1+\n" \ - " addi %3, #-1\n" \ - " .fillinsn\n" \ - "1: st r14, @+%0\n" \ - " bnez %3, 0b\n" \ - " beqz %2, 9f\n" \ - " addi %0, #4\n" \ - " .fillinsn\n" \ - "2: ldb r14, @%1 ; byte_copy \n" \ - " .fillinsn\n" \ - "3: stb r14, @%0\n" \ - " addi %1, #1\n" \ - " addi %2, #-1\n" \ - " addi %0, #1\n" \ - " bnez %2, 2b\n" \ - " .fillinsn\n" \ - "9:\n" \ - ".section .fixup,\"ax\"\n" \ - " .balign 4\n" \ - "5: addi %3, #1\n" \ - " addi %1, #-4\n" \ - " .fillinsn\n" \ - "6: slli %3, #2\n" \ - " add %2, %3\n" \ - " addi %0, #4\n" \ - " .fillinsn\n" \ - "7: ldi r14, #0 ; store zero \n" \ - " .fillinsn\n" \ - "8: addi %2, #-1\n" \ - " stb r14, @%0 ; ACE? \n" \ - " addi %0, #1\n" \ - " bnez %2, 8b\n" \ - " seth r14, #high(9b)\n" \ - " or3 r14, r14, #low(9b)\n" \ - " jmp r14\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .balign 4\n" \ - " .long 0b,6b\n" \ - " .long 1b,5b\n" \ - " .long 2b,7b\n" \ - " .long 3b,7b\n" \ - ".previous\n" \ - : "=&r" (__dst), "=&r" (__src), "=&r" (size), \ - "=&r" (__c) \ - : "0" (to), "1" (from), "2" (size), "3" (size / 4) \ - : "r14", "memory"); \ -} while (0) - - /* We let the __ versions of copy_from/to_user inline, because they're often * used in fast paths and have only a small space overhead. */ static inline unsigned long __generic_copy_from_user_nocheck(void *to, const void __user *from, unsigned long n) { - __copy_user_zeroing(to, from, n); + __copy_user(to, from, n); return n; } diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c index fd03f27..6aacf5b 100644 --- a/arch/m32r/lib/usercopy.c +++ b/arch/m32r/lib/usercopy.c @@ -23,12 +23,13 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n) unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long ret = n; prefetchw(to); if (access_ok(VERIFY_READ, from, n)) - __copy_user_zeroing(to,from,n); - else - memset(to, 0, n); - return n; + ret = __copy_user(to,from,n); + if (unlikely(ret)) + memset(to + n - ret, 0, ret); + return ret; } -- cgit v1.1 From 68acfdcb477abdbf875e33e4a950094c8de08f41 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:33:17 -0500 Subject: m68k: switch to generic extable.h Signed-off-by: Al Viro --- arch/m68k/include/asm/Kbuild | 1 + arch/m68k/include/asm/processor.h | 10 ---------- arch/m68k/include/asm/uaccess.h | 1 + arch/m68k/include/asm/uaccess_mm.h | 18 ------------------ arch/m68k/include/asm/uaccess_no.h | 19 ------------------- arch/m68k/kernel/signal.c | 2 +- arch/m68k/kernel/traps.c | 9 +++++++-- arch/m68k/mm/fault.c | 2 +- 8 files changed, 11 insertions(+), 51 deletions(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild index d4f9ccb..82005d2 100644 --- a/arch/m68k/include/asm/Kbuild +++ b/arch/m68k/include/asm/Kbuild @@ -5,6 +5,7 @@ generic-y += device.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += futex.h generic-y += hw_irq.h generic-y += ioctl.h diff --git a/arch/m68k/include/asm/processor.h b/arch/m68k/include/asm/processor.h index f5f790c..77239e8 100644 --- a/arch/m68k/include/asm/processor.h +++ b/arch/m68k/include/asm/processor.h @@ -122,16 +122,6 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc, wrusp(usp); } -#ifdef CONFIG_MMU -extern int handle_kernel_fault(struct pt_regs *regs); -#else -static inline int handle_kernel_fault(struct pt_regs *regs) -{ - /* Any fault in kernel is fatal on non-mmu */ - return 0; -} -#endif - /* Forward declaration, a strange C thing */ struct task_struct; diff --git a/arch/m68k/include/asm/uaccess.h b/arch/m68k/include/asm/uaccess.h index 3fadc4a..67b3481 100644 --- a/arch/m68k/include/asm/uaccess.h +++ b/arch/m68k/include/asm/uaccess.h @@ -4,6 +4,7 @@ #include #endif +#include #ifdef CONFIG_CPU_HAS_NO_UNALIGNED #include #else diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 14054a4..23be968 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -31,24 +31,6 @@ static inline int access_ok(int type, const void __user *addr, #define MOVES "move" #endif -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - extern int __put_user_bad(void); extern int __get_user_bad(void); diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index e77ce66..3f7f54d 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -23,25 +23,6 @@ static inline int _access_ok(unsigned long addr, unsigned long size) } /* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - - -/* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. */ diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c index 093b7c4..6f945bb 100644 --- a/arch/m68k/kernel/signal.c +++ b/arch/m68k/kernel/signal.c @@ -88,7 +88,7 @@ static inline int frame_extra_sizes(int f) return frame_size_change[f]; } -int handle_kernel_fault(struct pt_regs *regs) +int fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *fixup; struct pt_regs *tregs; diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c index a926d2c..c1cc4e9 100644 --- a/arch/m68k/kernel/traps.c +++ b/arch/m68k/kernel/traps.c @@ -1016,8 +1016,13 @@ asmlinkage void trap_c(struct frame *fp) /* traced a trapping instruction on a 68020/30, * real exception will be executed afterwards. */ - } else if (!handle_kernel_fault(&fp->ptregs)) - bad_super_trap(fp); + return; + } +#ifdef CONFIG_MMU + if (fixup_exception(&fp->ptregs)) + return; +#endif + bad_super_trap(fp); return; } diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c index bd66a0b..2795e4c 100644 --- a/arch/m68k/mm/fault.c +++ b/arch/m68k/mm/fault.c @@ -32,7 +32,7 @@ int send_fault_sig(struct pt_regs *regs) force_sig_info(siginfo.si_signo, &siginfo, current); } else { - if (handle_kernel_fault(regs)) + if (fixup_exception(regs)) return -1; //if (siginfo.si_signo == SIGBUS) -- cgit v1.1 From 7cefa5a05dbda1f0bbbd98e9d2861b09a35cc6ea Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 00:55:24 -0400 Subject: m68k: get rid of zeroing Signed-off-by: Al Viro --- arch/m68k/include/asm/uaccess_mm.h | 57 ++++++++++++++++++++++++++------------ arch/m68k/lib/uaccess.c | 12 ++------ 2 files changed, 43 insertions(+), 26 deletions(-) (limited to 'arch') diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 23be968..931bbba 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -179,39 +179,55 @@ asm volatile ("\n" \ unsigned long __generic_copy_from_user(void *to, const void __user *from, unsigned long n); unsigned long __generic_copy_to_user(void __user *to, const void *from, unsigned long n); -#define __constant_copy_from_user_asm(res, to, from, tmp, n, s1, s2, s3)\ +#define __suffix0 +#define __suffix1 b +#define __suffix2 w +#define __suffix4 l + +#define ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ asm volatile ("\n" \ "1: "MOVES"."#s1" (%2)+,%3\n" \ " move."#s1" %3,(%1)+\n" \ + " .ifnc \""#s2"\",\"\"\n" \ "2: "MOVES"."#s2" (%2)+,%3\n" \ " move."#s2" %3,(%1)+\n" \ " .ifnc \""#s3"\",\"\"\n" \ "3: "MOVES"."#s3" (%2)+,%3\n" \ " move."#s3" %3,(%1)+\n" \ " .endif\n" \ + " .endif\n" \ "4:\n" \ " .section __ex_table,\"a\"\n" \ " .align 4\n" \ " .long 1b,10f\n" \ + " .ifnc \""#s2"\",\"\"\n" \ " .long 2b,20f\n" \ " .ifnc \""#s3"\",\"\"\n" \ " .long 3b,30f\n" \ " .endif\n" \ + " .endif\n" \ " .previous\n" \ "\n" \ " .section .fixup,\"ax\"\n" \ " .even\n" \ - "10: clr."#s1" (%1)+\n" \ - "20: clr."#s2" (%1)+\n" \ + "10: addq.l #"#n1",%0\n" \ + " .ifnc \""#s2"\",\"\"\n" \ + "20: addq.l #"#n2",%0\n" \ " .ifnc \""#s3"\",\"\"\n" \ - "30: clr."#s3" (%1)+\n" \ + "30: addq.l #"#n3",%0\n" \ + " .endif\n" \ " .endif\n" \ - " moveq.l #"#n",%0\n" \ " jra 4b\n" \ " .previous\n" \ : "+d" (res), "+&a" (to), "+a" (from), "=&d" (tmp) \ : : "memory") +#define ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3)\ + ____constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, s1, s2, s3) +#define __constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3) \ + ___constant_copy_from_user_asm(res, to, from, tmp, n1, n2, n3, \ + __suffix##n1, __suffix##n2, __suffix##n3) + static __always_inline unsigned long __constant_copy_from_user(void *to, const void __user *from, unsigned long n) { @@ -219,37 +235,37 @@ __constant_copy_from_user(void *to, const void __user *from, unsigned long n) switch (n) { case 1: - __get_user_asm(res, *(u8 *)to, (u8 __user *)from, u8, b, d, 1); + __constant_copy_from_user_asm(res, to, from, tmp, 1, 0, 0); break; case 2: - __get_user_asm(res, *(u16 *)to, (u16 __user *)from, u16, w, r, 2); + __constant_copy_from_user_asm(res, to, from, tmp, 2, 0, 0); break; case 3: - __constant_copy_from_user_asm(res, to, from, tmp, 3, w, b,); + __constant_copy_from_user_asm(res, to, from, tmp, 2, 1, 0); break; case 4: - __get_user_asm(res, *(u32 *)to, (u32 __user *)from, u32, l, r, 4); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 0, 0); break; case 5: - __constant_copy_from_user_asm(res, to, from, tmp, 5, l, b,); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 1, 0); break; case 6: - __constant_copy_from_user_asm(res, to, from, tmp, 6, l, w,); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 0); break; case 7: - __constant_copy_from_user_asm(res, to, from, tmp, 7, l, w, b); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 2, 1); break; case 8: - __constant_copy_from_user_asm(res, to, from, tmp, 8, l, l,); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 0); break; case 9: - __constant_copy_from_user_asm(res, to, from, tmp, 9, l, l, b); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 1); break; case 10: - __constant_copy_from_user_asm(res, to, from, tmp, 10, l, l, w); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 2); break; case 12: - __constant_copy_from_user_asm(res, to, from, tmp, 12, l, l, l); + __constant_copy_from_user_asm(res, to, from, tmp, 4, 4, 4); break; default: /* we limit the inlined version to 3 moves */ @@ -353,7 +369,14 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) #define __copy_to_user_inatomic __copy_to_user #define __copy_from_user_inatomic __copy_from_user -#define copy_from_user(to, from, n) __copy_from_user(to, from, n) +static inline unsigned long +copy_from_user(void *to, const void __user *from, unsigned long n) +{ + unsigned long res = __copy_from_user_inatomic(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; +} #define copy_to_user(to, from, n) __copy_to_user(to, from, n) #define user_addr_max() \ diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c index a76b73a..7646e46 100644 --- a/arch/m68k/lib/uaccess.c +++ b/arch/m68k/lib/uaccess.c @@ -30,19 +30,13 @@ unsigned long __generic_copy_from_user(void *to, const void __user *from, "6:\n" " .section .fixup,\"ax\"\n" " .even\n" - "10: move.l %0,%3\n" - "7: clr.l (%2)+\n" - " subq.l #1,%3\n" - " jne 7b\n" - " lsl.l #2,%0\n" + "10: lsl.l #2,%0\n" " btst #1,%5\n" " jeq 8f\n" - "30: clr.w (%2)+\n" - " addq.l #2,%0\n" + "30: addq.l #2,%0\n" "8: btst #0,%5\n" " jeq 6b\n" - "50: clr.b (%2)+\n" - " addq.l #1,%0\n" + "50: addq.l #1,%0\n" " jra 6b\n" " .previous\n" "\n" -- cgit v1.1 From 29be02eb6fef5f26332eb3e13ead868cff0910af Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 08:50:56 -0400 Subject: m68k: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/m68k/Kconfig | 1 + arch/m68k/include/asm/uaccess_mm.h | 31 +++++++++++++------------------ arch/m68k/include/asm/uaccess_no.h | 20 ++++++++++++++------ 3 files changed, 28 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig index d140206..7d34575 100644 --- a/arch/m68k/Kconfig +++ b/arch/m68k/Kconfig @@ -22,6 +22,7 @@ config M68K select MODULES_USE_ELF_RELA select OLD_SIGSUSPEND3 select OLD_SIGACTION + select ARCH_HAS_RAW_COPY_USER config RWSEM_GENERIC_SPINLOCK bool diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h index 931bbba..ef856ff 100644 --- a/arch/m68k/include/asm/uaccess_mm.h +++ b/arch/m68k/include/asm/uaccess_mm.h @@ -356,28 +356,23 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n) return res; } -#define __copy_from_user(to, from, n) \ -(__builtin_constant_p(n) ? \ - __constant_copy_from_user(to, from, n) : \ - __generic_copy_from_user(to, from, n)) - -#define __copy_to_user(to, from, n) \ -(__builtin_constant_p(n) ? \ - __constant_copy_to_user(to, from, n) : \ - __generic_copy_to_user(to, from, n)) - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (__builtin_constant_p(n)) + return __constant_copy_from_user(to, from, n); + return __generic_copy_from_user(to, from, n); +} static inline unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - unsigned long res = __copy_from_user_inatomic(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; + if (__builtin_constant_p(n)) + return __constant_copy_to_user(to, from, n); + return __generic_copy_to_user(to, from, n); } -#define copy_to_user(to, from, n) __copy_to_user(to, from, n) +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #define user_addr_max() \ (uaccess_kernel() ? ~0UL : TASK_SIZE) diff --git a/arch/m68k/include/asm/uaccess_no.h b/arch/m68k/include/asm/uaccess_no.h index 3f7f54d..e482c38 100644 --- a/arch/m68k/include/asm/uaccess_no.h +++ b/arch/m68k/include/asm/uaccess_no.h @@ -101,13 +101,21 @@ extern int __get_user_bad(void); : "=d" (x) \ : "m" (*__ptr(ptr))) -#define copy_from_user(to, from, n) (memcpy(to, from, n), 0) -#define copy_to_user(to, from, n) (memcpy(to, from, n), 0) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + memcpy(to, (__force const void *)from, n); + return 0; +} -#define __copy_from_user(to, from, n) copy_from_user(to, from, n) -#define __copy_to_user(to, from, n) copy_to_user(to, from, n) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + memcpy((__force void *)to, from, n); + return 0; +} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * Copy a null terminated string from userspace. -- cgit v1.1 From 8b9a7e569569f28d21e5628d5e1f8efe089474f7 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: metag: switch to generic extable.h Signed-off-by: Al Viro --- arch/metag/include/asm/Kbuild | 1 + arch/metag/include/asm/uaccess.h | 18 +----------------- arch/mn10300/include/asm/Kbuild | 1 + 3 files changed, 3 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/metag/include/asm/Kbuild b/arch/metag/include/asm/Kbuild index f9b9df5..8f94055 100644 --- a/arch/metag/include/asm/Kbuild +++ b/arch/metag/include/asm/Kbuild @@ -8,6 +8,7 @@ generic-y += dma.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += futex.h diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 83de755..92ba79f 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -52,23 +52,7 @@ static inline int verify_area(int type, const void *addr, unsigned long size) return access_ok(type, addr, size) ? 0 : -EFAULT; } -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ -struct exception_table_entry { - unsigned long insn, fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); +#include /* * These are the main single-value transfer routines. They automatically diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 97f64c7..ed810e7 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h +generic-y += extable.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h -- cgit v1.1 From 2c3054176e2ef3f49be9ec8b6fd06d6a47156ff5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 28 Dec 2016 01:26:29 -0500 Subject: metag: kill verify_area() Deprecated in 2.6.12, killed in 2.6.13. Time to end that depravity, let's bury the body... Signed-off-by: Al Viro --- arch/metag/include/asm/uaccess.h | 5 ----- 1 file changed, 5 deletions(-) (limited to 'arch') diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 92ba79f..2613d91 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -47,11 +47,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size) #define access_ok(type, addr, size) __access_ok((unsigned long)(addr), \ (unsigned long)(size)) -static inline int verify_area(int type, const void *addr, unsigned long size) -{ - return access_ok(type, addr, size) ? 0 : -EFAULT; -} - #include /* -- cgit v1.1 From 730132e7e52210cbbfa95af67f5e23a63199c13d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:34:44 -0500 Subject: microblaze: switch to generic extable.h Signed-off-by: Al Viro --- arch/microblaze/include/asm/Kbuild | 1 + arch/microblaze/include/asm/uaccess.h | 17 +---------------- 2 files changed, 2 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/include/asm/Kbuild b/arch/microblaze/include/asm/Kbuild index 1732ec1..56830ff 100644 --- a/arch/microblaze/include/asm/Kbuild +++ b/arch/microblaze/include/asm/Kbuild @@ -3,6 +3,7 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += device.h generic-y += exec.h +generic-y += extable.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index b132cd3..2c39375 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -17,6 +17,7 @@ #include #include #include +#include #include /* @@ -47,22 +48,6 @@ # define segment_eq(a, b) ((a).seg == (b).seg) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ -struct exception_table_entry { - unsigned long insn, fixup; -}; - #ifndef CONFIG_MMU /* Check against bounds of physical memory */ -- cgit v1.1 From 6c03905a63165921411fbe91c167186b13879c8d Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: mn10300: switch to generic extable.h Signed-off-by: Al Viro --- arch/mn10300/include/asm/Kbuild | 1 + arch/mn10300/include/asm/uaccess.h | 21 +-------------------- 2 files changed, 2 insertions(+), 20 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/include/asm/Kbuild b/arch/mn10300/include/asm/Kbuild index 97f64c7..ed810e7 100644 --- a/arch/mn10300/include/asm/Kbuild +++ b/arch/mn10300/include/asm/Kbuild @@ -2,6 +2,7 @@ generic-y += barrier.h generic-y += clkdev.h generic-y += exec.h +generic-y += extable.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 2da7b0f..2eeaa8c 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -66,26 +66,7 @@ static inline int ___range_ok(unsigned long addr, unsigned int size) #define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0) #define __access_ok(addr, size) (__range_ok((addr), (size)) == 0) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - -/* Returns 0 if exception not found and fixup otherwise. */ -extern int fixup_exception(struct pt_regs *regs); +#include #define put_user(x, ptr) __put_user_check((x), (ptr), sizeof(*(ptr))) #define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) -- cgit v1.1 From 4df3715efe5ed1e77344c6d5dcbfd75017a2f8b5 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 20:45:41 -0400 Subject: mn10300: get rid of zeroing Signed-off-by: Al Viro --- arch/mn10300/include/asm/uaccess.h | 150 ++++--------------------------------- arch/mn10300/lib/usercopy.c | 11 +-- 2 files changed, 20 insertions(+), 141 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 2eeaa8c..1c35be3 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -275,55 +275,19 @@ do { \ } \ } while (0) -#define __copy_user_zeroing(to, from, size) \ -do { \ - if (size) { \ - void *__to = to; \ - const void *__from = from; \ - int w; \ - asm volatile( \ - "0: movbu (%0),%3;\n" \ - "1: movbu %3,(%1);\n" \ - " inc %0;\n" \ - " inc %1;\n" \ - " add -1,%2;\n" \ - " bne 0b;\n" \ - "2:\n" \ - " .section .fixup,\"ax\"\n" \ - "3:\n" \ - " mov %2,%0\n" \ - " clr %3\n" \ - "4: movbu %3,(%1);\n" \ - " inc %1;\n" \ - " add -1,%2;\n" \ - " bne 4b;\n" \ - " mov %0,%2\n" \ - " jmp 2b\n" \ - " .previous\n" \ - " .section __ex_table,\"a\"\n" \ - " .balign 4\n" \ - " .long 0b,3b\n" \ - " .long 1b,3b\n" \ - " .previous\n" \ - : "=a"(__from), "=a"(__to), "=r"(size), "=&r"(w)\ - : "0"(__from), "1"(__to), "2"(size) \ - : "cc", "memory"); \ - } \ -} while (0) - /* We let the __ versions of copy_from/to_user inline, because they're often * used in fast paths and have only a small space overhead. */ static inline -unsigned long __generic_copy_from_user_nocheck(void *to, const void *from, +unsigned long __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) { - __copy_user_zeroing(to, from, n); + __copy_user(to, from, n); return n; } static inline -unsigned long __generic_copy_to_user_nocheck(void *to, const void *from, +unsigned long __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) { __copy_user(to, from, n); @@ -331,110 +295,24 @@ unsigned long __generic_copy_to_user_nocheck(void *to, const void *from, } -#if 0 -#error "don't use - these macros don't increment to & from pointers" -/* Optimize just a little bit when we know the size of the move. */ -#define __constant_copy_user(to, from, size) \ -do { \ - asm volatile( \ - " mov %0,a0;\n" \ - "0: movbu (%1),d3;\n" \ - "1: movbu d3,(%2);\n" \ - " add -1,a0;\n" \ - " bne 0b;\n" \ - "2:;" \ - ".section .fixup,\"ax\"\n" \ - "3: jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .balign 4\n" \ - " .long 0b,3b\n" \ - " .long 1b,3b\n" \ - ".previous" \ - : \ - : "d"(size), "d"(to), "d"(from) \ - : "d3", "a0"); \ -} while (0) - -/* Optimize just a little bit when we know the size of the move. */ -#define __constant_copy_user_zeroing(to, from, size) \ -do { \ - asm volatile( \ - " mov %0,a0;\n" \ - "0: movbu (%1),d3;\n" \ - "1: movbu d3,(%2);\n" \ - " add -1,a0;\n" \ - " bne 0b;\n" \ - "2:;" \ - ".section .fixup,\"ax\"\n" \ - "3: jmp 2b\n" \ - ".previous\n" \ - ".section __ex_table,\"a\"\n" \ - " .balign 4\n" \ - " .long 0b,3b\n" \ - " .long 1b,3b\n" \ - ".previous" \ - : \ - : "d"(size), "d"(to), "d"(from) \ - : "d3", "a0"); \ -} while (0) - -static inline -unsigned long __constant_copy_to_user(void *to, const void *from, - unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - __constant_copy_user(to, from, n); - return n; -} - -static inline -unsigned long __constant_copy_from_user(void *to, const void *from, - unsigned long n) -{ - if (access_ok(VERIFY_READ, from, n)) - __constant_copy_user_zeroing(to, from, n); - return n; -} +extern unsigned long __generic_copy_to_user(void __user *, const void *, + unsigned long); +extern unsigned long __generic_copy_from_user(void *, const void __user *, + unsigned long); -static inline -unsigned long __constant_copy_to_user_nocheck(void *to, const void *from, - unsigned long n) +static inline unsigned long __copy_to_user(void __user *to, const void *from, + unsigned long n) { - __constant_copy_user(to, from, n); - return n; + might_fault(); + return __copy_to_user_inatomic(to, from, n); } -static inline -unsigned long __constant_copy_from_user_nocheck(void *to, const void *from, +static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) { - __constant_copy_user_zeroing(to, from, n); - return n; + might_fault(); + return __copy_from_user_inatomic(to, from, n); } -#endif - -extern unsigned long __generic_copy_to_user(void __user *, const void *, - unsigned long); -extern unsigned long __generic_copy_from_user(void *, const void __user *, - unsigned long); - -#define __copy_to_user_inatomic(to, from, n) \ - __generic_copy_to_user_nocheck((to), (from), (n)) -#define __copy_from_user_inatomic(to, from, n) \ - __generic_copy_from_user_nocheck((to), (from), (n)) - -#define __copy_to_user(to, from, n) \ -({ \ - might_fault(); \ - __copy_to_user_inatomic((to), (from), (n)); \ -}) - -#define __copy_from_user(to, from, n) \ -({ \ - might_fault(); \ - __copy_from_user_inatomic((to), (from), (n)); \ -}) #define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c index ce8899e..b48af8d 100644 --- a/arch/mn10300/lib/usercopy.c +++ b/arch/mn10300/lib/usercopy.c @@ -22,11 +22,12 @@ __generic_copy_to_user(void *to, const void *from, unsigned long n) unsigned long __generic_copy_from_user(void *to, const void *from, unsigned long n) { - if (access_ok(VERIFY_READ, from, n)) - __copy_user_zeroing(to, from, n); - else - memset(to, 0, n); - return n; + unsigned long res = n; + if (access_ok(VERIFY_READ, from, res)) + __copy_user(to, from, res); + if (unlikely(res)) + memset(to + n - res, 0, res); + return res; } /* -- cgit v1.1 From 19dbf70c5acce91c6cacfd0983c2b062644fa7a4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 12:06:57 -0400 Subject: mn10300: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/mn10300/Kconfig | 1 + arch/mn10300/include/asm/uaccess.h | 37 ++++--------------------------------- arch/mn10300/kernel/mn10300_ksyms.c | 2 -- arch/mn10300/lib/usercopy.c | 19 ------------------- 4 files changed, 5 insertions(+), 54 deletions(-) (limited to 'arch') diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index 38e3494..a96f3dc 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig @@ -16,6 +16,7 @@ config MN10300 select OLD_SIGACTION select HAVE_DEBUG_STACKOVERFLOW select ARCH_NO_COHERENT_DMA_MMAP + select ARCH_HAS_RAW_COPY_USER config AM33_2 def_bool n diff --git a/arch/mn10300/include/asm/uaccess.h b/arch/mn10300/include/asm/uaccess.h index 1c35be3..c696647 100644 --- a/arch/mn10300/include/asm/uaccess.h +++ b/arch/mn10300/include/asm/uaccess.h @@ -275,49 +275,20 @@ do { \ } \ } while (0) -/* We let the __ versions of copy_from/to_user inline, because they're often - * used in fast paths and have only a small space overhead. - */ -static inline -unsigned long __copy_from_user_inatomic(void *to, const void __user *from, - unsigned long n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { __copy_user(to, from, n); return n; } -static inline -unsigned long __copy_to_user_inatomic(void __user *to, const void *from, - unsigned long n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { __copy_user(to, from, n); return n; } - -extern unsigned long __generic_copy_to_user(void __user *, const void *, - unsigned long); -extern unsigned long __generic_copy_from_user(void *, const void __user *, - unsigned long); - -static inline unsigned long __copy_to_user(void __user *to, const void *from, - unsigned long n) -{ - might_fault(); - return __copy_to_user_inatomic(to, from, n); -} - -static inline unsigned long __copy_from_user(void *to, const void __user *from, - unsigned long n) -{ - might_fault(); - return __copy_from_user_inatomic(to, from, n); -} - - -#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) -#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) - extern long strncpy_from_user(char *dst, const char __user *src, long count); extern long __strncpy_from_user(char *dst, const char __user *src, long count); extern long strnlen_user(const char __user *str, long n); diff --git a/arch/mn10300/kernel/mn10300_ksyms.c b/arch/mn10300/kernel/mn10300_ksyms.c index ec6c4f8..5e9f919 100644 --- a/arch/mn10300/kernel/mn10300_ksyms.c +++ b/arch/mn10300/kernel/mn10300_ksyms.c @@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__generic_copy_from_user); -EXPORT_SYMBOL(__generic_copy_to_user); EXPORT_SYMBOL(strnlen_user); extern u64 __ashrdi3(u64, unsigned); diff --git a/arch/mn10300/lib/usercopy.c b/arch/mn10300/lib/usercopy.c index b48af8d..cece179 100644 --- a/arch/mn10300/lib/usercopy.c +++ b/arch/mn10300/lib/usercopy.c @@ -11,25 +11,6 @@ */ #include -unsigned long -__generic_copy_to_user(void *to, const void *from, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - __copy_user(to, from, n); - return n; -} - -unsigned long -__generic_copy_from_user(void *to, const void *from, unsigned long n) -{ - unsigned long res = n; - if (access_ok(VERIFY_READ, from, res)) - __copy_user(to, from, res); - if (unlikely(res)) - memset(to + n - res, 0, res); - return res; -} - /* * Copy a null terminated string from userspace. */ -- cgit v1.1 From 981db65b5229edb5ab336e3e23758791a1aa0b03 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: nios2: switch to generic extable.h Signed-off-by: Al Viro --- arch/nios2/include/asm/Kbuild | 1 + arch/nios2/include/asm/uaccess.h | 19 +------------------ 2 files changed, 2 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/nios2/include/asm/Kbuild b/arch/nios2/include/asm/Kbuild index aaa3c21..87e70f2 100644 --- a/arch/nios2/include/asm/Kbuild +++ b/arch/nios2/include/asm/Kbuild @@ -13,6 +13,7 @@ generic-y += dma.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index 198bbf1..ed06750 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -17,24 +17,7 @@ #include -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ -struct exception_table_entry { - unsigned long insn; - unsigned long fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); +#include /* * Segment stuff -- cgit v1.1 From de51d6cc2cf9daa9b5155faefb714f5b477cf160 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Mar 2017 13:08:32 -0400 Subject: nios2: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/nios2/Kconfig | 1 + arch/nios2/include/asm/uaccess.h | 31 ++++++------------------------- arch/nios2/mm/uaccess.c | 16 ++++++++-------- 3 files changed, 15 insertions(+), 33 deletions(-) (limited to 'arch') diff --git a/arch/nios2/Kconfig b/arch/nios2/Kconfig index 51a56c8..45b4727 100644 --- a/arch/nios2/Kconfig +++ b/arch/nios2/Kconfig @@ -16,6 +16,7 @@ config NIOS2 select SPARSE_IRQ select USB_ARCH_HAS_HCD if USB_SUPPORT select CPU_NO_EFFICIENT_FFS + select ARCH_HAS_RAW_COPY_USER config GENERIC_CSUM def_bool y diff --git a/arch/nios2/include/asm/uaccess.h b/arch/nios2/include/asm/uaccess.h index ed06750..727bd95 100644 --- a/arch/nios2/include/asm/uaccess.h +++ b/arch/nios2/include/asm/uaccess.h @@ -73,36 +73,17 @@ static inline unsigned long __must_check clear_user(void __user *to, return __clear_user(to, n); } -extern long __copy_from_user(void *to, const void __user *from, - unsigned long n); -extern long __copy_to_user(void __user *to, const void *from, unsigned long n); - -static inline long copy_from_user(void *to, const void __user *from, - unsigned long n) -{ - unsigned long res = n; - if (access_ok(VERIFY_READ, from, n)) - res = __copy_from_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -static inline long copy_to_user(void __user *to, const void *from, - unsigned long n) -{ - if (!access_ok(VERIFY_WRITE, to, n)) - return n; - return __copy_to_user(to, from, n); -} +extern unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n); +extern unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n); +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER extern long strncpy_from_user(char *__to, const char __user *__from, long __len); extern long strnlen_user(const char __user *s, long n); -#define __copy_from_user_inatomic __copy_from_user -#define __copy_to_user_inatomic __copy_to_user - /* Optimized macros */ #define __get_user_asm(val, insn, addr, err) \ { \ diff --git a/arch/nios2/mm/uaccess.c b/arch/nios2/mm/uaccess.c index 7663e15..8049833 100644 --- a/arch/nios2/mm/uaccess.c +++ b/arch/nios2/mm/uaccess.c @@ -10,9 +10,9 @@ #include #include -asm(".global __copy_from_user\n" - " .type __copy_from_user, @function\n" - "__copy_from_user:\n" +asm(".global raw_copy_from_user\n" + " .type raw_copy_from_user, @function\n" + "raw_copy_from_user:\n" " movi r2,7\n" " mov r3,r4\n" " bge r2,r6,1f\n" @@ -65,12 +65,12 @@ asm(".global __copy_from_user\n" ".word 7b,13b\n" ".previous\n" ); -EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(raw_copy_from_user); asm( - " .global __copy_to_user\n" - " .type __copy_to_user, @function\n" - "__copy_to_user:\n" + " .global raw_copy_to_user\n" + " .type raw_copy_to_user, @function\n" + "raw_copy_to_user:\n" " movi r2,7\n" " mov r3,r4\n" " bge r2,r6,1f\n" @@ -127,7 +127,7 @@ asm( ".word 11b,13b\n" ".word 12b,13b\n" ".previous\n"); -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(raw_copy_to_user); long strncpy_from_user(char *__to, const char __user *__from, long __len) { -- cgit v1.1 From 6c36a4eb9aea4081a4286afaf23cf8cce30f3e6f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:34:44 -0500 Subject: openrisc: switch to generic extable.h Signed-off-by: Al Viro --- arch/openrisc/include/asm/Kbuild | 1 + arch/openrisc/include/asm/uaccess.h | 18 +----------------- 2 files changed, 2 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild index fb01873..df8e2f7 100644 --- a/arch/openrisc/include/asm/Kbuild +++ b/arch/openrisc/include/asm/Kbuild @@ -16,6 +16,7 @@ generic-y += dma.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += ftrace.h diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 227af1a..92236c9 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -25,6 +25,7 @@ #include #include #include +#include /* * The fs value determines whether argument validity checking should be @@ -61,23 +62,6 @@ __range_ok((unsigned long)addr, (unsigned long)size) /* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned long insn, fixup; -}; - -/* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * -- cgit v1.1 From ab89866be3a02008880719f8928ff4fd960f99b6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Mar 2017 13:13:15 -0400 Subject: openrisc: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/openrisc/Kconfig | 1 + arch/openrisc/include/asm/uaccess.h | 28 ++++++---------------------- 2 files changed, 7 insertions(+), 22 deletions(-) (limited to 'arch') diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig index 1e95920..3895418 100644 --- a/arch/openrisc/Kconfig +++ b/arch/openrisc/Kconfig @@ -28,6 +28,7 @@ config OPENRISC select OR1K_PIC select CPU_NO_EFFICIENT_FFS if !OPENRISC_HAVE_INST_FF1 select NO_BOOTMEM + select ARCH_HAS_RAW_COPY_USER config MMU def_bool y diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h index 92236c9..96fc425 100644 --- a/arch/openrisc/include/asm/uaccess.h +++ b/arch/openrisc/include/asm/uaccess.h @@ -236,34 +236,18 @@ do { \ extern unsigned long __must_check __copy_tofrom_user(void *to, const void *from, unsigned long size); - -#define __copy_from_user(to, from, size) \ - __copy_tofrom_user(to, from, size) -#define __copy_to_user(to, from, size) \ - __copy_tofrom_user(to, from, size) - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - static inline unsigned long -copy_from_user(void *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long size) { - unsigned long res = n; - - if (likely(access_ok(VERIFY_READ, from, n))) - res = __copy_tofrom_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; + return __copy_tofrom_user(to, (__force const void *)from, size); } - static inline unsigned long -copy_to_user(void *to, const void *from, unsigned long n) +raw_copy_to_user(void *to, const void __user *from, unsigned long size) { - if (likely(access_ok(VERIFY_WRITE, to, n))) - n = __copy_tofrom_user(to, from, n); - return n; + return __copy_tofrom_user((__force void *)to, from, size); } +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER extern unsigned long __clear_user(void *addr, unsigned long size); -- cgit v1.1 From 527b5baeada26f9660722b4a498d3855d90f276c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 26 Dec 2016 00:50:06 -0500 Subject: powerpc: switch to extable.h Signed-off-by: Al Viro --- arch/powerpc/include/asm/extable.h | 29 +++++++++++++++++++++++++++++ arch/powerpc/include/asm/uaccess.h | 26 +------------------------- 2 files changed, 30 insertions(+), 25 deletions(-) create mode 100644 arch/powerpc/include/asm/extable.h (limited to 'arch') diff --git a/arch/powerpc/include/asm/extable.h b/arch/powerpc/include/asm/extable.h new file mode 100644 index 0000000..07cc45c --- /dev/null +++ b/arch/powerpc/include/asm/extable.h @@ -0,0 +1,29 @@ +#ifndef _ARCH_POWERPC_EXTABLE_H +#define _ARCH_POWERPC_EXTABLE_H + +/* + * The exception table consists of pairs of relative addresses: the first is + * the address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out what + * to do. + * + * All the routines below use bits of fixup code that are out of line with the + * main instruction path. This means when everything is well, we don't even + * have to jump over them. Further, they do not intrude on our cache or tlb + * entries. + */ + +#define ARCH_HAS_RELATIVE_EXTABLE + +struct exception_table_entry { + int insn; + int fixup; +}; + +static inline unsigned long extable_fixup(const struct exception_table_entry *x) +{ + return (unsigned long)&x->fixup + x->fixup; +} + +#endif diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index 3904040..b19883f 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -5,6 +5,7 @@ #include #include #include +#include /* * The fs value determines whether argument validity checking should be @@ -56,31 +57,6 @@ __access_ok((__force unsigned long)(addr), (size), get_fs())) /* - * The exception table consists of pairs of relative addresses: the first is - * the address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out what - * to do. - * - * All the routines below use bits of fixup code that are out of line with the - * main instruction path. This means when everything is well, we don't even - * have to jump over them. Further, they do not intrude on our cache or tlb - * entries. - */ - -#define ARCH_HAS_RELATIVE_EXTABLE - -struct exception_table_entry { - int insn; - int fixup; -}; - -static inline unsigned long extable_fixup(const struct exception_table_entry *x) -{ - return (unsigned long)&x->fixup + x->fixup; -} - -/* * These are the main single-value transfer routines. They automatically * use the right size if we just have the right pointer type. * -- cgit v1.1 From e70f1d59be747a959c240bf2fe2ea9489b629496 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 26 Dec 2016 00:48:37 -0500 Subject: s390: switch to extable.h Signed-off-by: Al Viro --- arch/s390/include/asm/extable.h | 28 ++++++++++++++++++++++++++++ arch/s390/include/asm/uaccess.h | 26 +------------------------- 2 files changed, 29 insertions(+), 25 deletions(-) create mode 100644 arch/s390/include/asm/extable.h (limited to 'arch') diff --git a/arch/s390/include/asm/extable.h b/arch/s390/include/asm/extable.h new file mode 100644 index 0000000..16cfe2d --- /dev/null +++ b/arch/s390/include/asm/extable.h @@ -0,0 +1,28 @@ +#ifndef __S390_EXTABLE_H +#define __S390_EXTABLE_H +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry +{ + int insn, fixup; +}; + +static inline unsigned long extable_fixup(const struct exception_table_entry *x) +{ + return (unsigned long)&x->fixup + x->fixup; +} + +#define ARCH_HAS_RELATIVE_EXTABLE + +#endif diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 7228ed8..29f5bf2 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -14,6 +14,7 @@ */ #include #include +#include /* @@ -59,31 +60,6 @@ static inline int __range_ok(unsigned long addr, unsigned long size) #define access_ok(type, addr, size) __access_ok(addr, size) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry -{ - int insn, fixup; -}; - -static inline unsigned long extable_fixup(const struct exception_table_entry *x) -{ - return (unsigned long)&x->fixup + x->fixup; -} - -#define ARCH_HAS_RELATIVE_EXTABLE - /** * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. -- cgit v1.1 From 271abdc367e204821e19d71ae2bbc444059c8496 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: score: switch to generic extable.h Signed-off-by: Al Viro --- arch/score/include/asm/Kbuild | 1 + arch/score/include/asm/extable.h | 11 ----------- 2 files changed, 1 insertion(+), 11 deletions(-) delete mode 100644 arch/score/include/asm/extable.h (limited to 'arch') diff --git a/arch/score/include/asm/Kbuild b/arch/score/include/asm/Kbuild index 926943a..e3a8d0f 100644 --- a/arch/score/include/asm/Kbuild +++ b/arch/score/include/asm/Kbuild @@ -4,6 +4,7 @@ header-y += generic-y += barrier.h generic-y += clkdev.h generic-y += current.h +generic-y += extable.h generic-y += irq_work.h generic-y += mcs_spinlock.h generic-y += mm-arch-hooks.h diff --git a/arch/score/include/asm/extable.h b/arch/score/include/asm/extable.h deleted file mode 100644 index c4423ccf..0000000 --- a/arch/score/include/asm/extable.h +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef _ASM_SCORE_EXTABLE_H -#define _ASM_SCORE_EXTABLE_H - -struct exception_table_entry { - unsigned long insn; - unsigned long fixup; -}; - -struct pt_regs; -extern int fixup_exception(struct pt_regs *regs); -#endif -- cgit v1.1 From 4b4554f6d562c15159b053fad4146ced2201bc4c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 14:50:40 -0500 Subject: score: it's "VERIFY_WRITE", not "VERFITY_WRITE"... ... and the only reason it worked is that access_ok() discards the first argument before parser even gets a chance of looking at it. Still, no point keeping that typo. Signed-off-by: Al Viro --- arch/score/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index 7a6c698..79125fa 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -347,7 +347,7 @@ static inline unsigned long copy_in_user(void *to, const void *from, unsigned long len) { if (access_ok(VERIFY_READ, from, len) && - access_ok(VERFITY_WRITE, to, len)) + access_ok(VERIFY_WRITE, to, len)) return __copy_tofrom_user(to, from, len); } -- cgit v1.1 From dc1425352325bf8a56f5d8ecd1258671808bcadd Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Mar 2017 13:26:57 -0400 Subject: score: switch to RAW_COPY_USER ... and kill copy_in_user() - it's not a biarch architecture, to start with Signed-off-by: Al Viro --- arch/score/Kconfig | 1 + arch/score/include/asm/uaccess.h | 54 +++++----------------------------------- 2 files changed, 7 insertions(+), 48 deletions(-) (limited to 'arch') diff --git a/arch/score/Kconfig b/arch/score/Kconfig index 507d631..4d241cf 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig @@ -15,6 +15,7 @@ config SCORE select MODULES_USE_ELF_REL select CLONE_BACKWARDS select CPU_NO_EFFICIENT_FFS + select ARCH_HAS_RAW_COPY_USER choice prompt "System type" diff --git a/arch/score/include/asm/uaccess.h b/arch/score/include/asm/uaccess.h index 79125fa..916e5db 100644 --- a/arch/score/include/asm/uaccess.h +++ b/arch/score/include/asm/uaccess.h @@ -295,61 +295,19 @@ extern void __put_user_unknown(void); extern int __copy_tofrom_user(void *to, const void *from, unsigned long len); static inline unsigned long -copy_from_user(void *to, const void *from, unsigned long len) +raw_copy_from_user(void *to, const void __user *from, unsigned long len) { - unsigned long res = len; - - if (likely(access_ok(VERIFY_READ, from, len))) - res = __copy_tofrom_user(to, from, len); - - if (unlikely(res)) - memset(to + (len - res), 0, res); - - return res; + return __copy_tofrom_user(to, (__force const void *)from, len); } static inline unsigned long -copy_to_user(void *to, const void *from, unsigned long len) +raw_copy_to_user(void __user *to, const void *from, unsigned long len) { - if (likely(access_ok(VERIFY_WRITE, to, len))) - len = __copy_tofrom_user(to, from, len); - - return len; + return __copy_tofrom_user((__force void *)to, from, len); } -static inline unsigned long -__copy_from_user(void *to, const void *from, unsigned long len) -{ - unsigned long left = __copy_tofrom_user(to, from, len); - if (unlikely(left)) - memset(to + (len - left), 0, left); - return left; -} - -#define __copy_to_user(to, from, len) \ - __copy_tofrom_user((to), (from), (len)) - -static inline unsigned long -__copy_to_user_inatomic(void *to, const void *from, unsigned long len) -{ - return __copy_to_user(to, from, len); -} - -static inline unsigned long -__copy_from_user_inatomic(void *to, const void *from, unsigned long len) -{ - return __copy_tofrom_user(to, from, len); -} - -#define __copy_in_user(to, from, len) __copy_tofrom_user(to, from, len) - -static inline unsigned long -copy_in_user(void *to, const void *from, unsigned long len) -{ - if (access_ok(VERIFY_READ, from, len) && - access_ok(VERIFY_WRITE, to, len)) - return __copy_tofrom_user(to, from, len); -} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * __clear_user: - Zero a block of memory in user space, with less checking. -- cgit v1.1 From bcd541d9a2f43fbf08d2e909fb8247bed3dac750 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:49:42 -0500 Subject: sh: switch to extable.h Signed-off-by: Al Viro --- arch/sh/include/asm/extable.h | 10 ++++++++++ arch/sh/include/asm/uaccess.h | 23 +---------------------- 2 files changed, 11 insertions(+), 22 deletions(-) create mode 100644 arch/sh/include/asm/extable.h (limited to 'arch') diff --git a/arch/sh/include/asm/extable.h b/arch/sh/include/asm/extable.h new file mode 100644 index 0000000..df2ee2f --- /dev/null +++ b/arch/sh/include/asm/extable.h @@ -0,0 +1,10 @@ +#ifndef __ASM_SH_EXTABLE_H +#define __ASM_SH_EXTABLE_H + +#include + +#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU) +#define ARCH_HAS_SEARCH_EXTABLE +#endif + +#endif diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 89a28df..5cb0c54 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -2,6 +2,7 @@ #define __ASM_SH_UACCESS_H #include +#include #define __addr_ok(addr) \ ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg) @@ -166,28 +167,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n) return __copy_size; } -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ -struct exception_table_entry { - unsigned long insn, fixup; -}; - -#if defined(CONFIG_SUPERH64) && defined(CONFIG_MMU) -#define ARCH_HAS_SEARCH_EXTABLE -#endif - -int fixup_exception(struct pt_regs *regs); - extern void *set_exception_table_vec(unsigned int vec, void *handler); static inline void *set_exception_table_evt(unsigned int evt, void *handler) -- cgit v1.1 From f98f48ee7cd473a077f8500dbf3d0a1f9af73003 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 14:43:13 -0400 Subject: sh: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/sh/Kconfig | 1 + arch/sh/include/asm/uaccess.h | 36 ++++-------------------------------- 2 files changed, 5 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index ee08695..c689645 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig @@ -48,6 +48,7 @@ config SUPERH select HAVE_ARCH_AUDITSYSCALL select HAVE_FUTEX_CMPXCHG if FUTEX select HAVE_NMI + select ARCH_HAS_RAW_COPY_USER help The SuperH is a RISC processor targeted for use in embedded systems and consumer electronics; it was also used in the Sega Dreamcast diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h index 5cb0c54..2722b61 100644 --- a/arch/sh/include/asm/uaccess.h +++ b/arch/sh/include/asm/uaccess.h @@ -108,19 +108,18 @@ extern __must_check long strnlen_user(const char __user *str, long n); __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { return __copy_user(to, (__force void *)from, n); } static __always_inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { return __copy_user((__force void *)to, from, n); } - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * Clear the area and return remaining number of bytes @@ -140,33 +139,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size); __cl_size; \ }) -static inline unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long __copy_from = (unsigned long) from; - __kernel_size_t __copy_size = (__kernel_size_t) n; - - if (__copy_size && __access_ok(__copy_from, __copy_size)) - __copy_size = __copy_user(to, from, __copy_size); - - if (unlikely(__copy_size)) - memset(to + (n - __copy_size), 0, __copy_size); - - return __copy_size; -} - -static inline unsigned long -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - unsigned long __copy_to = (unsigned long) to; - __kernel_size_t __copy_size = (__kernel_size_t) n; - - if (__copy_size && __access_ok(__copy_to, __copy_size)) - return __copy_user(to, from, __copy_size); - - return __copy_size; -} - extern void *set_exception_table_vec(unsigned int vec, void *handler); static inline void *set_exception_table_evt(unsigned int evt, void *handler) -- cgit v1.1 From 1333eb785614779b2000e547bd32aef85c22ca7a Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 20:27:33 -0500 Subject: sparc32: kill __ret_efault() it's unused on 32bit one as well... Signed-off-by: Al Viro --- arch/sparc/include/asm/uaccess_32.h | 2 -- arch/sparc/kernel/head_32.S | 7 ------- 2 files changed, 9 deletions(-) (limited to 'arch') diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index a59a1e8..9391084 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -71,8 +71,6 @@ struct exception_table_entry /* Returns 0 if exception not found and fixup otherwise. */ unsigned long search_extables_range(unsigned long addr, unsigned long *g2); -void __ret_efault(void); - /* Uh, these should become the main single-value transfer routines.. * They automatically use the right size if we just have the right * pointer type.. diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S index 7bb317b..7274e43 100644 --- a/arch/sparc/kernel/head_32.S +++ b/arch/sparc/kernel/head_32.S @@ -809,10 +809,3 @@ lvl14_save: .word 0 .word 0 .word t_irq14 - - .section ".fixup",#alloc,#execinstr - .globl __ret_efault -__ret_efault: - ret - restore %g0, -EFAULT, %o0 -EXPORT_SYMBOL(__ret_efault) -- cgit v1.1 From c0ea73f18c057b41801106cfd2ffaf9794681444 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 03:33:03 -0500 Subject: tile: switch to generic extable.h Signed-off-by: Al Viro --- arch/tile/include/asm/Kbuild | 1 + arch/tile/include/asm/uaccess.h | 19 +------------------ 2 files changed, 2 insertions(+), 18 deletions(-) (limited to 'arch') diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild index aa48b6e..24c44e9 100644 --- a/arch/tile/include/asm/Kbuild +++ b/arch/tile/include/asm/Kbuild @@ -7,6 +7,7 @@ generic-y += clkdev.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fb.h generic-y += fcntl.h generic-y += hw_irq.h diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index 14ea3d1..ef3a1fd 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -98,24 +98,7 @@ int __range_ok(unsigned long addr, unsigned long size); likely(__range_ok((unsigned long)(addr), (size)) == 0); \ }) -/* - * The exception table consists of pairs of addresses: the first is the - * address of an instruction that is allowed to fault, and the second is - * the address at which the program should continue. No registers are - * modified, so it is entirely up to the continuation code to figure out - * what to do. - * - * All the routines below use bits of fixup code that are out of line - * with the main instruction path. This means when everything is well, - * we don't even have to jump over them. Further, they do not intrude - * on our cache or tlb entries. - */ - -struct exception_table_entry { - unsigned long insn, fixup; -}; - -extern int fixup_exception(struct pt_regs *regs); +#include /* * This is a type: either unsigned long, if the argument fits into -- cgit v1.1 From 23504bae7f3edd1484c4d470362f2b12bcd298f9 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 14:27:36 -0400 Subject: tile: get rid of zeroing, switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/tile/Kconfig | 1 + arch/tile/include/asm/uaccess.h | 143 ++-------------------------------------- arch/tile/lib/exports.c | 7 +- arch/tile/lib/memcpy_32.S | 41 ++++-------- arch/tile/lib/memcpy_user_64.c | 15 +---- 5 files changed, 27 insertions(+), 180 deletions(-) (limited to 'arch') diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 4583c03..27808d8 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig @@ -33,6 +33,7 @@ config TILE select USER_STACKTRACE_SUPPORT select USE_PMC if PERF_EVENTS select VIRT_TO_BUS + select ARCH_HAS_RAW_COPY_USER config MMU def_bool y diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h index ef3a1fd..a803f6b 100644 --- a/arch/tile/include/asm/uaccess.h +++ b/arch/tile/include/asm/uaccess.h @@ -313,145 +313,16 @@ extern int __put_user_bad(void) ((x) = 0, -EFAULT); \ }) -/** - * __copy_to_user() - copy data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * An alternate version - __copy_to_user_inatomic() - is designed - * to be called from atomic context, typically bracketed by calls - * to pagefault_disable() and pagefault_enable(). - */ -extern unsigned long __must_check __copy_to_user_inatomic( - void __user *to, const void *from, unsigned long n); - -static inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - return __copy_to_user_inatomic(to, from, n); -} - -static inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; -} - -/** - * __copy_from_user() - copy data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - * - * An alternate version - __copy_from_user_inatomic() - is designed - * to be called from atomic context, typically bracketed by calls - * to pagefault_disable() and pagefault_enable(). This version - * does *NOT* pad with zeros. - */ -extern unsigned long __must_check __copy_from_user_inatomic( - void *to, const void __user *from, unsigned long n); -extern unsigned long __must_check __copy_from_user_zeroing( - void *to, const void __user *from, unsigned long n); - -static inline unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n) -{ - might_fault(); - return __copy_from_user_zeroing(to, from, n); -} - -static inline unsigned long __must_check -_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; -} - -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -static inline unsigned long __must_check copy_from_user(void *to, - const void __user *from, - unsigned long n) -{ - int sz = __compiletime_object_size(to); - - if (likely(sz == -1 || sz >= n)) - n = _copy_from_user(to, from, n); - else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} +extern unsigned long __must_check +raw_copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long __must_check +raw_copy_from_user(void *to, const void __user *from, unsigned long n); +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #ifdef __tilegx__ -/** - * __copy_in_user() - copy data within user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to user space. Caller must check - * the specified blocks with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -extern unsigned long __copy_in_user_inatomic( +extern unsigned long raw_copy_in_user( void __user *to, const void __user *from, unsigned long n); - -static inline unsigned long __must_check -__copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - might_fault(); - return __copy_in_user_inatomic(to, from, n); -} - -static inline unsigned long __must_check -copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n)) - n = __copy_in_user(to, from, n); - return n; -} #endif diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index c5369fe..ecce8e1 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -38,11 +38,10 @@ EXPORT_SYMBOL(__mcount); /* arch/tile/lib/, various memcpy files */ EXPORT_SYMBOL(memcpy); -EXPORT_SYMBOL(__copy_to_user_inatomic); -EXPORT_SYMBOL(__copy_from_user_inatomic); -EXPORT_SYMBOL(__copy_from_user_zeroing); +EXPORT_SYMBOL(raw_copy_to_user); +EXPORT_SYMBOL(raw_copy_from_user); #ifdef __tilegx__ -EXPORT_SYMBOL(__copy_in_user_inatomic); +EXPORT_SYMBOL(raw_copy_in_user); #endif /* hypervisor glue */ diff --git a/arch/tile/lib/memcpy_32.S b/arch/tile/lib/memcpy_32.S index a2771ae..270f126 100644 --- a/arch/tile/lib/memcpy_32.S +++ b/arch/tile/lib/memcpy_32.S @@ -24,7 +24,6 @@ #define IS_MEMCPY 0 #define IS_COPY_FROM_USER 1 -#define IS_COPY_FROM_USER_ZEROING 2 #define IS_COPY_TO_USER -1 .section .text.memcpy_common, "ax" @@ -42,40 +41,31 @@ 9 -/* __copy_from_user_inatomic takes the kernel target address in r0, +/* raw_copy_from_user takes the kernel target address in r0, * the user source in r1, and the bytes to copy in r2. * It returns the number of uncopiable bytes (hopefully zero) in r0. */ -ENTRY(__copy_from_user_inatomic) -.type __copy_from_user_inatomic, @function - FEEDBACK_ENTER_EXPLICIT(__copy_from_user_inatomic, \ +ENTRY(raw_copy_from_user) +.type raw_copy_from_user, @function + FEEDBACK_ENTER_EXPLICIT(raw_copy_from_user, \ .text.memcpy_common, \ - .Lend_memcpy_common - __copy_from_user_inatomic) + .Lend_memcpy_common - raw_copy_from_user) { movei r29, IS_COPY_FROM_USER; j memcpy_common } - .size __copy_from_user_inatomic, . - __copy_from_user_inatomic + .size raw_copy_from_user, . - raw_copy_from_user -/* __copy_from_user_zeroing is like __copy_from_user_inatomic, but - * any uncopiable bytes are zeroed in the target. - */ -ENTRY(__copy_from_user_zeroing) -.type __copy_from_user_zeroing, @function - FEEDBACK_REENTER(__copy_from_user_inatomic) - { movei r29, IS_COPY_FROM_USER_ZEROING; j memcpy_common } - .size __copy_from_user_zeroing, . - __copy_from_user_zeroing - -/* __copy_to_user_inatomic takes the user target address in r0, +/* raw_copy_to_user takes the user target address in r0, * the kernel source in r1, and the bytes to copy in r2. * It returns the number of uncopiable bytes (hopefully zero) in r0. */ -ENTRY(__copy_to_user_inatomic) -.type __copy_to_user_inatomic, @function - FEEDBACK_REENTER(__copy_from_user_inatomic) +ENTRY(raw_copy_to_user) +.type raw_copy_to_user, @function + FEEDBACK_REENTER(raw_copy_from_user) { movei r29, IS_COPY_TO_USER; j memcpy_common } - .size __copy_to_user_inatomic, . - __copy_to_user_inatomic + .size raw_copy_to_user, . - raw_copy_to_user ENTRY(memcpy) .type memcpy, @function - FEEDBACK_REENTER(__copy_from_user_inatomic) + FEEDBACK_REENTER(raw_copy_from_user) { movei r29, IS_MEMCPY } .size memcpy, . - memcpy /* Fall through */ @@ -520,12 +510,7 @@ copy_from_user_fixup_loop: { bnzt r2, copy_from_user_fixup_loop } .Lcopy_from_user_fixup_zero_remainder: - { bbs r29, 2f } /* low bit set means IS_COPY_FROM_USER */ - /* byte-at-a-time loop faulted, so zero the rest. */ - { move r3, r2; bz r2, 2f /* should be impossible, but handle it. */ } -1: { sb r0, zero; addi r0, r0, 1; addi r3, r3, -1 } - { bnzt r3, 1b } -2: move lr, r27 + move lr, r27 { move r0, r2; jrp lr } copy_to_user_fixup_loop: diff --git a/arch/tile/lib/memcpy_user_64.c b/arch/tile/lib/memcpy_user_64.c index 97bbb60..a3fea9f 100644 --- a/arch/tile/lib/memcpy_user_64.c +++ b/arch/tile/lib/memcpy_user_64.c @@ -51,7 +51,7 @@ __v; \ }) -#define USERCOPY_FUNC __copy_to_user_inatomic +#define USERCOPY_FUNC raw_copy_to_user #define ST1(p, v) _ST((p), st1, (v)) #define ST2(p, v) _ST((p), st2, (v)) #define ST4(p, v) _ST((p), st4, (v)) @@ -62,7 +62,7 @@ #define LD8 LD #include "memcpy_64.c" -#define USERCOPY_FUNC __copy_from_user_inatomic +#define USERCOPY_FUNC raw_copy_from_user #define ST1 ST #define ST2 ST #define ST4 ST @@ -73,7 +73,7 @@ #define LD8(p) _LD((p), ld) #include "memcpy_64.c" -#define USERCOPY_FUNC __copy_in_user_inatomic +#define USERCOPY_FUNC raw_copy_in_user #define ST1(p, v) _ST((p), st1, (v)) #define ST2(p, v) _ST((p), st2, (v)) #define ST4(p, v) _ST((p), st4, (v)) @@ -83,12 +83,3 @@ #define LD4(p) _LD((p), ld4u) #define LD8(p) _LD((p), ld) #include "memcpy_64.c" - -unsigned long __copy_from_user_zeroing(void *to, const void __user *from, - unsigned long n) -{ - unsigned long rc = __copy_from_user_inatomic(to, from, n); - if (unlikely(rc)) - memset(to + n - rc, 0, rc); - return rc; -} -- cgit v1.1 From a668ce3a001fe0564deb6357129bfd81f757743b Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 01:33:09 -0400 Subject: um: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/um/Kconfig.common | 1 + arch/um/include/asm/uaccess.h | 8 ++++---- arch/um/kernel/skas/uaccess.c | 8 ++++---- 3 files changed, 9 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/um/Kconfig.common b/arch/um/Kconfig.common index fd44385..e13f763 100644 --- a/arch/um/Kconfig.common +++ b/arch/um/Kconfig.common @@ -13,6 +13,7 @@ config UML select GENERIC_CLOCKEVENTS select HAVE_GCC_PLUGINS select TTY # Needed for line.c + select ARCH_HAS_RAW_COPY_USER config MMU bool diff --git a/arch/um/include/asm/uaccess.h b/arch/um/include/asm/uaccess.h index 9396c8a..cc00fc5 100644 --- a/arch/um/include/asm/uaccess.h +++ b/arch/um/include/asm/uaccess.h @@ -21,8 +21,8 @@ #define __addr_range_nowrap(addr, size) \ ((unsigned long) (addr) <= ((unsigned long) (addr) + (size))) -extern long __copy_from_user(void *to, const void __user *from, unsigned long n); -extern long __copy_to_user(void __user *to, const void *from, unsigned long n); +extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); +extern unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); extern long __strncpy_from_user(char *dst, const char __user *src, long count); extern long __strnlen_user(const void __user *str, long len); extern unsigned long __clear_user(void __user *mem, unsigned long len); @@ -33,8 +33,8 @@ static inline int __access_ok(unsigned long addr, unsigned long size); #define __clear_user __clear_user #define __strnlen_user __strnlen_user #define __strncpy_from_user __strncpy_from_user -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #include diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 22c9f79..d450797 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c @@ -139,7 +139,7 @@ static int copy_chunk_from_user(unsigned long from, int len, void *arg) return 0; } -long __copy_from_user(void *to, const void __user *from, unsigned long n) +unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { if (uaccess_kernel()) { memcpy(to, (__force void*)from, n); @@ -148,7 +148,7 @@ long __copy_from_user(void *to, const void __user *from, unsigned long n) return buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to); } -EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(raw_copy_from_user); static int copy_chunk_to_user(unsigned long to, int len, void *arg) { @@ -159,7 +159,7 @@ static int copy_chunk_to_user(unsigned long to, int len, void *arg) return 0; } -long __copy_to_user(void __user *to, const void *from, unsigned long n) +unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { if (uaccess_kernel()) { memcpy((__force void *) to, from, n); @@ -168,7 +168,7 @@ long __copy_to_user(void __user *to, const void *from, unsigned long n) return buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from); } -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(raw_copy_to_user); static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) { -- cgit v1.1 From 122b05ddf506e637336dcf64b5a129825f7bf6d4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Mar 2017 18:36:22 -0400 Subject: amd64: get rid of zeroing Signed-off-by: Al Viro --- arch/x86/include/asm/uaccess.h | 12 ++++++++++++ arch/x86/include/asm/uaccess_64.h | 16 ++++++++-------- arch/x86/lib/usercopy.c | 9 +++++---- arch/x86/lib/usercopy_64.c | 4 ---- 4 files changed, 25 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 0522d88..26410af 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -379,6 +379,18 @@ do { \ : "=r" (err), ltype(x) \ : "m" (__m(addr)), "i" (errret), "0" (err)) +#define __get_user_asm_nozero(x, addr, err, itype, rtype, ltype, errret) \ + asm volatile("\n" \ + "1: mov"itype" %2,%"rtype"1\n" \ + "2:\n" \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r" (err), ltype(x) \ + : "m" (__m(addr)), "i" (errret), "0" (err)) + /* * This doesn't do __uaccess_begin/end - the exception handling * around it must do that. diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 8ddadd93..142f0f1 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -59,44 +59,44 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) switch (size) { case 1: __uaccess_begin(); - __get_user_asm(*(u8 *)dst, (u8 __user *)src, + __get_user_asm_nozero(*(u8 *)dst, (u8 __user *)src, ret, "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: __uaccess_begin(); - __get_user_asm(*(u16 *)dst, (u16 __user *)src, + __get_user_asm_nozero(*(u16 *)dst, (u16 __user *)src, ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: __uaccess_begin(); - __get_user_asm(*(u32 *)dst, (u32 __user *)src, + __get_user_asm_nozero(*(u32 *)dst, (u32 __user *)src, ret, "l", "k", "=r", 4); __uaccess_end(); return ret; case 8: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 8); __uaccess_end(); return ret; case 10: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 10); if (likely(!ret)) - __get_user_asm(*(u16 *)(8 + (char *)dst), + __get_user_asm_nozero(*(u16 *)(8 + (char *)dst), (u16 __user *)(8 + (char __user *)src), ret, "w", "w", "=r", 2); __uaccess_end(); return ret; case 16: __uaccess_begin(); - __get_user_asm(*(u64 *)dst, (u64 __user *)src, + __get_user_asm_nozero(*(u64 *)dst, (u64 __user *)src, ret, "q", "", "=r", 16); if (likely(!ret)) - __get_user_asm(*(u64 *)(8 + (char *)dst), + __get_user_asm_nozero(*(u64 *)(8 + (char *)dst), (u64 __user *)(8 + (char __user *)src), ret, "q", "", "=r", 8); __uaccess_end(); diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index c074799..a851f3d 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -76,10 +76,11 @@ EXPORT_SYMBOL(_copy_to_user); */ unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) { + unsigned long res = n; if (access_ok(VERIFY_READ, from, n)) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; + res = __copy_from_user_inatomic(to, from, n); + if (unlikely(res)) + memset(to + n - res, 0, res); + return res; } EXPORT_SYMBOL(_copy_from_user); diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 6987358..6c8b6a6 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -80,9 +80,5 @@ copy_user_handle_tail(char *to, char *from, unsigned len) break; } clac(); - - /* If the destination is a kernel buffer, we always clear the end */ - if (!__addr_ok(to)) - memset(to, 0, len); return len; } -- cgit v1.1 From 2ef59f28567f8a5d66dccc0e6a99a1bd655fd605 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 01:37:07 -0400 Subject: unicore32: get rid of zeroing and switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/unicore32/Kconfig | 1 + arch/unicore32/include/asm/uaccess.h | 6 ++++-- arch/unicore32/kernel/ksyms.c | 4 ++-- arch/unicore32/lib/copy_from_user.S | 16 ++++++---------- arch/unicore32/lib/copy_to_user.S | 6 +++--- 5 files changed, 16 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/unicore32/Kconfig b/arch/unicore32/Kconfig index 0769066..319519e 100644 --- a/arch/unicore32/Kconfig +++ b/arch/unicore32/Kconfig @@ -18,6 +18,7 @@ config UNICORE32 select ARCH_WANT_FRAME_POINTERS select GENERIC_IOMAP select MODULES_USE_ELF_REL + select ARCH_HAS_RAW_COPY_USER help UniCore-32 is 32-bit Instruction Set Architecture, including a series of low-power-consumption RISC chip diff --git a/arch/unicore32/include/asm/uaccess.h b/arch/unicore32/include/asm/uaccess.h index b2bcab6..1d55f2f 100644 --- a/arch/unicore32/include/asm/uaccess.h +++ b/arch/unicore32/include/asm/uaccess.h @@ -24,15 +24,17 @@ #define __access_ok(addr, size) (__kernel_ok || __user_ok((addr), (size))) extern unsigned long __must_check -__copy_from_user(void *to, const void __user *from, unsigned long n); +raw_copy_from_user(void *to, const void __user *from, unsigned long n); extern unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n); +raw_copy_to_user(void __user *to, const void *from, unsigned long n); extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n); extern unsigned long __must_check __strncpy_from_user(char *to, const char __user *from, unsigned long count); extern unsigned long __strnlen_user(const char __user *s, long n); +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #include diff --git a/arch/unicore32/kernel/ksyms.c b/arch/unicore32/kernel/ksyms.c index 0323528..dcc72ee 100644 --- a/arch/unicore32/kernel/ksyms.c +++ b/arch/unicore32/kernel/ksyms.c @@ -46,8 +46,8 @@ EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(copy_page); -EXPORT_SYMBOL(__copy_from_user); -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(raw_copy_from_user); +EXPORT_SYMBOL(raw_copy_to_user); EXPORT_SYMBOL(__clear_user); EXPORT_SYMBOL(__ashldi3); diff --git a/arch/unicore32/lib/copy_from_user.S b/arch/unicore32/lib/copy_from_user.S index ab0767ea..5f80fcb 100644 --- a/arch/unicore32/lib/copy_from_user.S +++ b/arch/unicore32/lib/copy_from_user.S @@ -16,7 +16,7 @@ /* * Prototype: * - * size_t __copy_from_user(void *to, const void *from, size_t n) + * size_t raw_copy_from_user(void *to, const void *from, size_t n) * * Purpose: * @@ -87,22 +87,18 @@ .text -ENTRY(__copy_from_user) +ENTRY(raw_copy_from_user) #include "copy_template.S" -ENDPROC(__copy_from_user) +ENDPROC(raw_copy_from_user) .pushsection .fixup,"ax" .align 0 copy_abort_preamble - ldm.w (r1, r2), [sp]+ - sub r3, r0, r1 - rsub r2, r3, r2 - stw r2, [sp] - mov r1, #0 - b.l memset - ldw.w r0, [sp]+, #4 + ldm.w (r1, r2, r3), [sp]+ + sub r0, r0, r1 + rsub r0, r0, r2 copy_abort_end .popsection diff --git a/arch/unicore32/lib/copy_to_user.S b/arch/unicore32/lib/copy_to_user.S index 6e22151..857c681 100644 --- a/arch/unicore32/lib/copy_to_user.S +++ b/arch/unicore32/lib/copy_to_user.S @@ -16,7 +16,7 @@ /* * Prototype: * - * size_t __copy_to_user(void *to, const void *from, size_t n) + * size_t raw_copy_to_user(void *to, const void *from, size_t n) * * Purpose: * @@ -79,11 +79,11 @@ .text -WEAK(__copy_to_user) +WEAK(raw_copy_to_user) #include "copy_template.S" -ENDPROC(__copy_to_user) +ENDPROC(raw_copy_to_user) .pushsection .fixup,"ax" .align 0 -- cgit v1.1 From 3f763453e6f27d82fa0ac58f8e1ac4094c1fb1f8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Mar 2017 18:47:28 -0400 Subject: kill __copy_from_user_nocache() Signed-off-by: Al Viro --- arch/x86/include/asm/uaccess_32.h | 30 ---------- arch/x86/include/asm/uaccess_64.h | 8 --- arch/x86/lib/usercopy_32.c | 118 -------------------------------------- 3 files changed, 156 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 5268ecc..19e6c05 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -14,8 +14,6 @@ unsigned long __must_check __copy_from_user_ll (void *to, const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nozero (void *to, const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nocache - (void *to, const void __user *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nocache_nozero (void *to, const void __user *from, unsigned long n); @@ -119,34 +117,6 @@ __copy_from_user(void *to, const void __user *from, unsigned long n) return __copy_from_user_ll(to, from, n); } -static __always_inline unsigned long __copy_from_user_nocache(void *to, - const void __user *from, unsigned long n) -{ - might_fault(); - if (__builtin_constant_p(n)) { - unsigned long ret; - - switch (n) { - case 1: - __uaccess_begin(); - __get_user_size(*(u8 *)to, from, 1, ret, 1); - __uaccess_end(); - return ret; - case 2: - __uaccess_begin(); - __get_user_size(*(u16 *)to, from, 2, ret, 2); - __uaccess_end(); - return ret; - case 4: - __uaccess_begin(); - __get_user_size(*(u32 *)to, from, 4, ret, 4); - __uaccess_end(); - return ret; - } - } - return __copy_from_user_ll_nocache(to, from, n); -} - static __always_inline unsigned long __copy_from_user_inatomic_nocache(void *to, const void __user *from, unsigned long n) diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 142f0f1..242936b 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -261,14 +261,6 @@ extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); static inline int -__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) -{ - might_fault(); - kasan_check_write(dst, size); - return __copy_user_nocache(dst, src, size, 1); -} - -static inline int __copy_from_user_inatomic_nocache(void *dst, const void __user *src, unsigned size) { diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 1f65ff65..02aa7aa 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -293,105 +293,6 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) return size; } -/* - * Non Temporal Hint version of __copy_user_zeroing_intel. It is cache aware. - * hyoshiok@miraclelinux.com - */ - -static unsigned long __copy_user_zeroing_intel_nocache(void *to, - const void __user *from, unsigned long size) -{ - int d0, d1; - - __asm__ __volatile__( - " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" - "1: movl 64(%4), %%eax\n" - " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" - " movnti %%eax, 0(%3)\n" - " movnti %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" - " movnti %%eax, 8(%3)\n" - " movnti %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" - " movnti %%eax, 16(%3)\n" - " movnti %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" - " movnti %%eax, 24(%3)\n" - " movnti %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" - " movnti %%eax, 32(%3)\n" - " movnti %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" - " movnti %%eax, 40(%3)\n" - " movnti %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" - " movnti %%eax, 48(%3)\n" - " movnti %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" - " movnti %%eax, 56(%3)\n" - " movnti %%edx, 60(%3)\n" - " addl $-64, %0\n" - " addl $64, %4\n" - " addl $64, %3\n" - " cmpl $63, %0\n" - " ja 0b\n" - " sfence \n" - "5: movl %0, %%eax\n" - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" - "6: rep; movsl\n" - " movl %%eax,%0\n" - "7: rep; movsb\n" - "8:\n" - ".section .fixup,\"ax\"\n" - "9: lea 0(%%eax,%0,4),%0\n" - "16: pushl %0\n" - " pushl %%eax\n" - " xorl %%eax,%%eax\n" - " rep; stosb\n" - " popl %%eax\n" - " popl %0\n" - " jmp 8b\n" - ".previous\n" - _ASM_EXTABLE(0b,16b) - _ASM_EXTABLE(1b,16b) - _ASM_EXTABLE(2b,16b) - _ASM_EXTABLE(21b,16b) - _ASM_EXTABLE(3b,16b) - _ASM_EXTABLE(31b,16b) - _ASM_EXTABLE(4b,16b) - _ASM_EXTABLE(41b,16b) - _ASM_EXTABLE(10b,16b) - _ASM_EXTABLE(51b,16b) - _ASM_EXTABLE(11b,16b) - _ASM_EXTABLE(61b,16b) - _ASM_EXTABLE(12b,16b) - _ASM_EXTABLE(71b,16b) - _ASM_EXTABLE(13b,16b) - _ASM_EXTABLE(81b,16b) - _ASM_EXTABLE(14b,16b) - _ASM_EXTABLE(91b,16b) - _ASM_EXTABLE(6b,9b) - _ASM_EXTABLE(7b,16b) - : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) - : "eax", "edx", "memory"); - return size; -} - static unsigned long __copy_user_intel_nocache(void *to, const void __user *from, unsigned long size) { @@ -490,8 +391,6 @@ unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size); unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long size); -unsigned long __copy_user_zeroing_intel_nocache(void *to, - const void __user *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ /* Generic arbitrary sized copy. */ @@ -607,23 +506,6 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, } EXPORT_SYMBOL(__copy_from_user_ll_nozero); -unsigned long __copy_from_user_ll_nocache(void *to, const void __user *from, - unsigned long n) -{ - stac(); -#ifdef CONFIG_X86_INTEL_USERCOPY - if (n > 64 && static_cpu_has(X86_FEATURE_XMM2)) - n = __copy_user_zeroing_intel_nocache(to, from, n); - else - __copy_user_zeroing(to, from, n); -#else - __copy_user_zeroing(to, from, n); -#endif - clac(); - return n; -} -EXPORT_SYMBOL(__copy_from_user_ll_nocache); - unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) { -- cgit v1.1 From 0b46a94e84c1323d54f8b82eacd3143400fb9521 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:34:44 -0500 Subject: xtensa: switch to generic extable.h Signed-off-by: Al Viro --- arch/xtensa/include/asm/Kbuild | 1 + arch/xtensa/include/asm/uaccess.h | 7 +------ 2 files changed, 2 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/Kbuild b/arch/xtensa/include/asm/Kbuild index f41408c..cc23e9e 100644 --- a/arch/xtensa/include/asm/Kbuild +++ b/arch/xtensa/include/asm/Kbuild @@ -6,6 +6,7 @@ generic-y += dma-contiguous.h generic-y += emergency-restart.h generic-y += errno.h generic-y += exec.h +generic-y += extable.h generic-y += fcntl.h generic-y += hardirq.h generic-y += ioctl.h diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 2651269..0f33877 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -18,6 +18,7 @@ #include #include +#include /* * The fs value determines whether argument validity checking should @@ -342,10 +343,4 @@ static inline long strnlen_user(const char *str, long len) return __strnlen_user(str, len); } - -struct exception_table_entry -{ - unsigned long insn, fixup; -}; - #endif /* _XTENSA_UACCESS_H */ -- cgit v1.1 From 3a0e75adecc8da026a5befb2c5828d08c999373c Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 22 Mar 2017 13:02:41 -0400 Subject: xtensa: get rid of zeroing, use RAW_COPY_USER Signed-off-by: Al Viro --- arch/xtensa/Kconfig | 1 + arch/xtensa/include/asm/uaccess.h | 54 +++--------------- arch/xtensa/lib/usercopy.S | 116 ++++++++++++++++---------------------- 3 files changed, 57 insertions(+), 114 deletions(-) (limited to 'arch') diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index f4126cf..043d37d 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig @@ -29,6 +29,7 @@ config XTENSA select NO_BOOTMEM select PERF_USE_VMALLOC select VIRT_TO_BUS + select ARCH_HAS_RAW_COPY_USER help Xtensa processors are 32-bit RISC machines designed by Tensilica primarily for embedded systems. These processors are both diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 0f33877..8e93ed8 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -234,60 +234,22 @@ __asm__ __volatile__( \ * Copy to/from user space */ -/* - * We use a generic, arbitrary-sized copy subroutine. The Xtensa - * architecture would cause heavy code bloat if we tried to inline - * these functions and provide __constant_copy_* equivalents like the - * i386 versions. __xtensa_copy_user is quite efficient. See the - * .fixup section of __xtensa_copy_user for a discussion on the - * X_zeroing equivalents for Xtensa. - */ - extern unsigned __xtensa_copy_user(void *to, const void *from, unsigned n); -#define __copy_user(to, from, size) __xtensa_copy_user(to, from, size) - static inline unsigned long -__generic_copy_from_user_nocheck(void *to, const void *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - return __copy_user(to, from, n); -} - -static inline unsigned long -__generic_copy_to_user_nocheck(void *to, const void *from, unsigned long n) -{ - return __copy_user(to, from, n); -} - -static inline unsigned long -__generic_copy_to_user(void *to, const void *from, unsigned long n) -{ - prefetch(from); - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to, from, n); - return n; + prefetchw(to); + return __xtensa_copy_user(to, (__force const void *)from, n); } - static inline unsigned long -__generic_copy_from_user(void *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - prefetchw(to); - if (access_ok(VERIFY_READ, from, n)) - return __copy_user(to, from, n); - else - memset(to, 0, n); - return n; + prefetchw(from); + return __xtensa_copy_user((__force void *)to, from, n); } - -#define copy_to_user(to, from, n) __generic_copy_to_user((to), (from), (n)) -#define copy_from_user(to, from, n) __generic_copy_from_user((to), (from), (n)) -#define __copy_to_user(to, from, n) \ - __generic_copy_to_user_nocheck((to), (from), (n)) -#define __copy_from_user(to, from, n) \ - __generic_copy_from_user_nocheck((to), (from), (n)) -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * We need to return the number of bytes not cleared. Our memset() diff --git a/arch/xtensa/lib/usercopy.S b/arch/xtensa/lib/usercopy.S index 7ea4dd6..d9cd766 100644 --- a/arch/xtensa/lib/usercopy.S +++ b/arch/xtensa/lib/usercopy.S @@ -102,9 +102,9 @@ __xtensa_copy_user: bltui a4, 7, .Lbytecopy # do short copies byte by byte # copy 1 byte - EX(l8ui, a6, a3, 0, l_fixup) + EX(l8ui, a6, a3, 0, fixup) addi a3, a3, 1 - EX(s8i, a6, a5, 0, s_fixup) + EX(s8i, a6, a5, 0, fixup) addi a5, a5, 1 addi a4, a4, -1 bbci.l a5, 1, .Ldstaligned # if dst is now aligned, then @@ -112,11 +112,11 @@ __xtensa_copy_user: .Ldst2mod4: # dst 16-bit aligned # copy 2 bytes bltui a4, 6, .Lbytecopy # do short copies byte by byte - EX(l8ui, a6, a3, 0, l_fixup) - EX(l8ui, a7, a3, 1, l_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(l8ui, a7, a3, 1, fixup) addi a3, a3, 2 - EX(s8i, a6, a5, 0, s_fixup) - EX(s8i, a7, a5, 1, s_fixup) + EX(s8i, a6, a5, 0, fixup) + EX(s8i, a7, a5, 1, fixup) addi a5, a5, 2 addi a4, a4, -2 j .Ldstaligned # dst is now aligned, return to main algorithm @@ -135,9 +135,9 @@ __xtensa_copy_user: add a7, a3, a4 # a7 = end address for source #endif /* !XCHAL_HAVE_LOOPS */ .Lnextbyte: - EX(l8ui, a6, a3, 0, l_fixup) + EX(l8ui, a6, a3, 0, fixup) addi a3, a3, 1 - EX(s8i, a6, a5, 0, s_fixup) + EX(s8i, a6, a5, 0, fixup) addi a5, a5, 1 #if !XCHAL_HAVE_LOOPS blt a3, a7, .Lnextbyte @@ -161,15 +161,15 @@ __xtensa_copy_user: add a8, a8, a3 # a8 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop1: - EX(l32i, a6, a3, 0, l_fixup) - EX(l32i, a7, a3, 4, l_fixup) - EX(s32i, a6, a5, 0, s_fixup) - EX(l32i, a6, a3, 8, l_fixup) - EX(s32i, a7, a5, 4, s_fixup) - EX(l32i, a7, a3, 12, l_fixup) - EX(s32i, a6, a5, 8, s_fixup) + EX(l32i, a6, a3, 0, fixup) + EX(l32i, a7, a3, 4, fixup) + EX(s32i, a6, a5, 0, fixup) + EX(l32i, a6, a3, 8, fixup) + EX(s32i, a7, a5, 4, fixup) + EX(l32i, a7, a3, 12, fixup) + EX(s32i, a6, a5, 8, fixup) addi a3, a3, 16 - EX(s32i, a7, a5, 12, s_fixup) + EX(s32i, a7, a5, 12, fixup) addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a8, .Loop1 @@ -177,31 +177,31 @@ __xtensa_copy_user: .Loop1done: bbci.l a4, 3, .L2 # copy 8 bytes - EX(l32i, a6, a3, 0, l_fixup) - EX(l32i, a7, a3, 4, l_fixup) + EX(l32i, a6, a3, 0, fixup) + EX(l32i, a7, a3, 4, fixup) addi a3, a3, 8 - EX(s32i, a6, a5, 0, s_fixup) - EX(s32i, a7, a5, 4, s_fixup) + EX(s32i, a6, a5, 0, fixup) + EX(s32i, a7, a5, 4, fixup) addi a5, a5, 8 .L2: bbci.l a4, 2, .L3 # copy 4 bytes - EX(l32i, a6, a3, 0, l_fixup) + EX(l32i, a6, a3, 0, fixup) addi a3, a3, 4 - EX(s32i, a6, a5, 0, s_fixup) + EX(s32i, a6, a5, 0, fixup) addi a5, a5, 4 .L3: bbci.l a4, 1, .L4 # copy 2 bytes - EX(l16ui, a6, a3, 0, l_fixup) + EX(l16ui, a6, a3, 0, fixup) addi a3, a3, 2 - EX(s16i, a6, a5, 0, s_fixup) + EX(s16i, a6, a5, 0, fixup) addi a5, a5, 2 .L4: bbci.l a4, 0, .L5 # copy 1 byte - EX(l8ui, a6, a3, 0, l_fixup) - EX(s8i, a6, a5, 0, s_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(s8i, a6, a5, 0, fixup) .L5: movi a2, 0 # return success for len bytes copied retw @@ -217,7 +217,7 @@ __xtensa_copy_user: # copy 16 bytes per iteration for word-aligned dst and unaligned src and a10, a3, a8 # save unalignment offset for below sub a3, a3, a10 # align a3 (to avoid sim warnings only; not needed for hardware) - EX(l32i, a6, a3, 0, l_fixup) # load first word + EX(l32i, a6, a3, 0, fixup) # load first word #if XCHAL_HAVE_LOOPS loopnez a7, .Loop2done #else /* !XCHAL_HAVE_LOOPS */ @@ -226,19 +226,19 @@ __xtensa_copy_user: add a12, a12, a3 # a12 = end of last 16B source chunk #endif /* !XCHAL_HAVE_LOOPS */ .Loop2: - EX(l32i, a7, a3, 4, l_fixup) - EX(l32i, a8, a3, 8, l_fixup) + EX(l32i, a7, a3, 4, fixup) + EX(l32i, a8, a3, 8, fixup) ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, s_fixup) - EX(l32i, a9, a3, 12, l_fixup) + EX(s32i, a6, a5, 0, fixup) + EX(l32i, a9, a3, 12, fixup) ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, s_fixup) - EX(l32i, a6, a3, 16, l_fixup) + EX(s32i, a7, a5, 4, fixup) + EX(l32i, a6, a3, 16, fixup) ALIGN( a8, a8, a9) - EX(s32i, a8, a5, 8, s_fixup) + EX(s32i, a8, a5, 8, fixup) addi a3, a3, 16 ALIGN( a9, a9, a6) - EX(s32i, a9, a5, 12, s_fixup) + EX(s32i, a9, a5, 12, fixup) addi a5, a5, 16 #if !XCHAL_HAVE_LOOPS blt a3, a12, .Loop2 @@ -246,39 +246,39 @@ __xtensa_copy_user: .Loop2done: bbci.l a4, 3, .L12 # copy 8 bytes - EX(l32i, a7, a3, 4, l_fixup) - EX(l32i, a8, a3, 8, l_fixup) + EX(l32i, a7, a3, 4, fixup) + EX(l32i, a8, a3, 8, fixup) ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, s_fixup) + EX(s32i, a6, a5, 0, fixup) addi a3, a3, 8 ALIGN( a7, a7, a8) - EX(s32i, a7, a5, 4, s_fixup) + EX(s32i, a7, a5, 4, fixup) addi a5, a5, 8 mov a6, a8 .L12: bbci.l a4, 2, .L13 # copy 4 bytes - EX(l32i, a7, a3, 4, l_fixup) + EX(l32i, a7, a3, 4, fixup) addi a3, a3, 4 ALIGN( a6, a6, a7) - EX(s32i, a6, a5, 0, s_fixup) + EX(s32i, a6, a5, 0, fixup) addi a5, a5, 4 mov a6, a7 .L13: add a3, a3, a10 # readjust a3 with correct misalignment bbci.l a4, 1, .L14 # copy 2 bytes - EX(l8ui, a6, a3, 0, l_fixup) - EX(l8ui, a7, a3, 1, l_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(l8ui, a7, a3, 1, fixup) addi a3, a3, 2 - EX(s8i, a6, a5, 0, s_fixup) - EX(s8i, a7, a5, 1, s_fixup) + EX(s8i, a6, a5, 0, fixup) + EX(s8i, a7, a5, 1, fixup) addi a5, a5, 2 .L14: bbci.l a4, 0, .L15 # copy 1 byte - EX(l8ui, a6, a3, 0, l_fixup) - EX(s8i, a6, a5, 0, s_fixup) + EX(l8ui, a6, a3, 0, fixup) + EX(s8i, a6, a5, 0, fixup) .L15: movi a2, 0 # return success for len bytes copied retw @@ -291,30 +291,10 @@ __xtensa_copy_user: * bytes_copied = a5 - a2 * retval = bytes_not_copied = original len - bytes_copied * retval = a11 - (a5 - a2) - * - * Clearing the remaining pieces of kernel memory plugs security - * holes. This functionality is the equivalent of the *_zeroing - * functions that some architectures provide. */ -.Lmemset: - .word memset -s_fixup: +fixup: sub a2, a5, a2 /* a2 <-- bytes copied */ sub a2, a11, a2 /* a2 <-- bytes not copied */ retw - -l_fixup: - sub a2, a5, a2 /* a2 <-- bytes copied */ - sub a2, a11, a2 /* a2 <-- bytes not copied == return value */ - - /* void *memset(void *s, int c, size_t n); */ - mov a6, a5 /* s */ - movi a7, 0 /* c */ - mov a8, a2 /* n */ - l32r a4, .Lmemset - callx4 a4 - /* Ignore memset return value in a6. */ - /* a2 still contains bytes not copied. */ - retw -- cgit v1.1 From 839cc2954cbdb82511d2ecfa07ba06bab4198519 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 08:19:02 -0400 Subject: arc: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/arc/Kconfig | 1 + arch/arc/include/asm/uaccess.h | 8 ++++---- 2 files changed, 5 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig index c9f30f4..7e213ff 100644 --- a/arch/arc/Kconfig +++ b/arch/arc/Kconfig @@ -44,6 +44,7 @@ config ARC select HAVE_GENERIC_DMA_COHERENT select HAVE_KERNEL_GZIP select HAVE_KERNEL_LZMA + select ARCH_HAS_RAW_COPY_USER config MIGHT_HAVE_PCI bool diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index 657401f..c4d26e8 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -721,8 +721,8 @@ static inline long __arc_strnlen_user(const char __user *s, long n) } #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE -#define __copy_from_user(t, f, n) __arc_copy_from_user(t, f, n) -#define __copy_to_user(t, f, n) __arc_copy_to_user(t, f, n) +#define raw_copy_from_user __arc_copy_from_user +#define raw_copy_to_user __arc_copy_to_user #define __clear_user(d, n) __arc_clear_user(d, n) #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) #define __strnlen_user(s, n) __arc_strnlen_user(s, n) @@ -737,8 +737,8 @@ extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src, long count); extern long arc_strnlen_user_noinline(const char __user *src, long n); -#define __copy_from_user(t, f, n) arc_copy_from_user_noinline(t, f, n) -#define __copy_to_user(t, f, n) arc_copy_to_user_noinline(t, f, n) +#define raw_copy_from_user arc_copy_from_user_noinline +#define raw_copy_to_user arc_copy_to_user_noinline #define __clear_user(d, n) arc_clear_user_noinline(d, n) #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) -- cgit v1.1 From a41e0d754240fe8ca9c4f2070bf67e3b0228aa22 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Mar 2017 19:38:23 -0400 Subject: x86: don't wank with magical size in __copy_in_user() ... especially since copy_in_user() doesn't Signed-off-by: Al Viro --- arch/x86/include/asm/uaccess_64.h | 58 ++------------------------------------- arch/x86/lib/usercopy_64.c | 9 ------ 2 files changed, 2 insertions(+), 65 deletions(-) (limited to 'arch') diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 242936b..497ca1b 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -185,62 +185,8 @@ int __copy_to_user(void __user *dst, const void *src, unsigned size) static __always_inline __must_check int __copy_in_user(void __user *dst, const void __user *src, unsigned size) { - int ret = 0; - - might_fault(); - if (!__builtin_constant_p(size)) - return copy_user_generic((__force void *)dst, - (__force void *)src, size); - switch (size) { - case 1: { - u8 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u8 __user *)src, - ret, "b", "b", "=q", 1); - if (likely(!ret)) - __put_user_asm(tmp, (u8 __user *)dst, - ret, "b", "b", "iq", 1); - __uaccess_end(); - return ret; - } - case 2: { - u16 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u16 __user *)src, - ret, "w", "w", "=r", 2); - if (likely(!ret)) - __put_user_asm(tmp, (u16 __user *)dst, - ret, "w", "w", "ir", 2); - __uaccess_end(); - return ret; - } - - case 4: { - u32 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u32 __user *)src, - ret, "l", "k", "=r", 4); - if (likely(!ret)) - __put_user_asm(tmp, (u32 __user *)dst, - ret, "l", "k", "ir", 4); - __uaccess_end(); - return ret; - } - case 8: { - u64 tmp; - __uaccess_begin(); - __get_user_asm(tmp, (u64 __user *)src, - ret, "q", "", "=r", 8); - if (likely(!ret)) - __put_user_asm(tmp, (u64 __user *)dst, - ret, "q", "", "er", 8); - __uaccess_end(); - return ret; - } - default: - return copy_user_generic((__force void *)dst, - (__force void *)src, size); - } + return copy_user_generic((__force void *)dst, + (__force void *)src, size); } static __must_check __always_inline int diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index 6c8b6a6..3b7c40a 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -54,15 +54,6 @@ unsigned long clear_user(void __user *to, unsigned long n) } EXPORT_SYMBOL(clear_user); -unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) -{ - if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { - return copy_user_generic((__force void *)to, (__force void *)from, len); - } - return len; -} -EXPORT_SYMBOL(copy_in_user); - /* * Try to copy last bytes and clear the rest if needed. * Since protection fault in copy_from/to_user is not a normal situation, -- cgit v1.1 From beba3a20bf90ce1b93e24592c3ebf0d0bb581bbe Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sat, 25 Mar 2017 19:33:21 -0400 Subject: x86: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/x86/Kconfig | 1 + arch/x86/include/asm/uaccess.h | 53 ------------ arch/x86/include/asm/uaccess_32.h | 95 ++++----------------- arch/x86/include/asm/uaccess_64.h | 45 ++-------- arch/x86/lib/usercopy.c | 55 +----------- arch/x86/lib/usercopy_32.c | 170 +------------------------------------- 6 files changed, 24 insertions(+), 395 deletions(-) (limited to 'arch') diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index cc98d5a..5f59fc3 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -175,6 +175,7 @@ config X86 select USER_STACKTRACE_SUPPORT select VIRT_TO_BUS select X86_FEATURE_NAMES if PROC_FS + select ARCH_HAS_RAW_COPY_USER config INSTRUCTION_DECODER def_bool y diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h index 26410af..68766b2 100644 --- a/arch/x86/include/asm/uaccess.h +++ b/arch/x86/include/asm/uaccess.h @@ -682,59 +682,6 @@ extern struct movsl_mask { # include #endif -unsigned long __must_check _copy_from_user(void *to, const void __user *from, - unsigned n); -unsigned long __must_check _copy_to_user(void __user *to, const void *from, - unsigned n); - -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -static __always_inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - int sz = __compiletime_object_size(to); - - might_fault(); - - kasan_check_write(to, n); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(to, n, false); - n = _copy_from_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} - -static __always_inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - int sz = __compiletime_object_size(from); - - kasan_check_read(from, n); - - might_fault(); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(from, n, true); - n = _copy_to_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} - /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h index 19e6c05..aeda9bb 100644 --- a/arch/x86/include/asm/uaccess_32.h +++ b/arch/x86/include/asm/uaccess_32.h @@ -8,113 +8,48 @@ #include #include -unsigned long __must_check __copy_to_user_ll - (void __user *to, const void *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll - (void *to, const void __user *from, unsigned long n); -unsigned long __must_check __copy_from_user_ll_nozero - (void *to, const void __user *from, unsigned long n); +unsigned long __must_check __copy_user_ll + (void *to, const void *from, unsigned long n); unsigned long __must_check __copy_from_user_ll_nocache_nozero (void *to, const void __user *from, unsigned long n); -/** - * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * The caller should also make sure he pins the user space address - * so that we don't result in page fault and sleep. - */ static __always_inline unsigned long __must_check -__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - check_object_size(from, n, true); - return __copy_to_user_ll(to, from, n); + return __copy_user_ll((__force void *)to, from, n); } -/** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static __always_inline unsigned long __must_check -__copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - return __copy_to_user_inatomic(to, from, n); -} - -static __always_inline unsigned long -__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) -{ - return __copy_from_user_ll_nozero(to, from, n); -} - -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - * - * An alternate version - __copy_from_user_inatomic() - may be called from - * atomic context and will fail rather than sleep. In this case the - * uncopied bytes will *NOT* be padded with zeros. See fs/filemap.h - * for explanation of why this is needed. - */ static __always_inline unsigned long -__copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - might_fault(); - check_object_size(to, n, false); if (__builtin_constant_p(n)) { unsigned long ret; switch (n) { case 1: + ret = 0; __uaccess_begin(); - __get_user_size(*(u8 *)to, from, 1, ret, 1); + __get_user_asm_nozero(*(u8 *)to, from, ret, + "b", "b", "=q", 1); __uaccess_end(); return ret; case 2: + ret = 0; __uaccess_begin(); - __get_user_size(*(u16 *)to, from, 2, ret, 2); + __get_user_asm_nozero(*(u16 *)to, from, ret, + "w", "w", "=r", 2); __uaccess_end(); return ret; case 4: + ret = 0; __uaccess_begin(); - __get_user_size(*(u32 *)to, from, 4, ret, 4); + __get_user_asm_nozero(*(u32 *)to, from, ret, + "l", "k", "=r", 4); __uaccess_end(); return ret; } } - return __copy_from_user_ll(to, from, n); + return __copy_user_ll(to, (__force const void *)from, n); } static __always_inline unsigned long diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h index 497ca1b..c5504b9 100644 --- a/arch/x86/include/asm/uaccess_64.h +++ b/arch/x86/include/asm/uaccess_64.h @@ -45,15 +45,11 @@ copy_user_generic(void *to, const void *from, unsigned len) return ret; } -__must_check unsigned long -copy_in_user(void __user *to, const void __user *from, unsigned len); - -static __always_inline __must_check -int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) +static __always_inline __must_check unsigned long +raw_copy_from_user(void *dst, const void __user *src, unsigned long size) { int ret = 0; - check_object_size(dst, size, false); if (!__builtin_constant_p(size)) return copy_user_generic(dst, (__force void *)src, size); switch (size) { @@ -106,20 +102,11 @@ int __copy_from_user_nocheck(void *dst, const void __user *src, unsigned size) } } -static __always_inline __must_check -int __copy_from_user(void *dst, const void __user *src, unsigned size) -{ - might_fault(); - kasan_check_write(dst, size); - return __copy_from_user_nocheck(dst, src, size); -} - -static __always_inline __must_check -int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) +static __always_inline __must_check unsigned long +raw_copy_to_user(void __user *dst, const void *src, unsigned long size) { int ret = 0; - check_object_size(src, size, true); if (!__builtin_constant_p(size)) return copy_user_generic((__force void *)dst, src, size); switch (size) { @@ -175,34 +162,12 @@ int __copy_to_user_nocheck(void __user *dst, const void *src, unsigned size) } static __always_inline __must_check -int __copy_to_user(void __user *dst, const void *src, unsigned size) -{ - might_fault(); - kasan_check_read(src, size); - return __copy_to_user_nocheck(dst, src, size); -} - -static __always_inline __must_check -int __copy_in_user(void __user *dst, const void __user *src, unsigned size) +unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long size) { return copy_user_generic((__force void *)dst, (__force void *)src, size); } -static __must_check __always_inline int -__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) -{ - kasan_check_write(dst, size); - return __copy_from_user_nocheck(dst, src, size); -} - -static __must_check __always_inline int -__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) -{ - kasan_check_read(src, size); - return __copy_to_user_nocheck(dst, src, size); -} - extern long __copy_user_nocache(void *dst, const void __user *src, unsigned size, int zerorest); diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c index a851f3d..c8c6ad0 100644 --- a/arch/x86/lib/usercopy.c +++ b/arch/x86/lib/usercopy.c @@ -4,12 +4,9 @@ * For licencing details see kernel-base/COPYING */ -#include +#include #include -#include -#include - /* * We rely on the nested NMI work to allow atomic faults from the NMI path; the * nested NMI paths are careful to preserve CR2. @@ -34,53 +31,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n) return ret; } EXPORT_SYMBOL_GPL(copy_from_user_nmi); - -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -unsigned long _copy_to_user(void __user *to, const void *from, unsigned n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - n = __copy_to_user(to, from, n); - return n; -} -EXPORT_SYMBOL(_copy_to_user); - -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -unsigned long _copy_from_user(void *to, const void __user *from, unsigned n) -{ - unsigned long res = n; - if (access_ok(VERIFY_READ, from, n)) - res = __copy_from_user_inatomic(to, from, n); - if (unlikely(res)) - memset(to + n - res, 0, res); - return res; -} -EXPORT_SYMBOL(_copy_from_user); diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index 02aa7aa..bd057a4 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c @@ -5,12 +5,7 @@ * Copyright 1997 Andi Kleen * Copyright 1997 Linus Torvalds */ -#include -#include -#include #include -#include -#include #include #include #include @@ -201,98 +196,6 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size) return size; } -static unsigned long -__copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size) -{ - int d0, d1; - __asm__ __volatile__( - " .align 2,0x90\n" - "0: movl 32(%4), %%eax\n" - " cmpl $67, %0\n" - " jbe 2f\n" - "1: movl 64(%4), %%eax\n" - " .align 2,0x90\n" - "2: movl 0(%4), %%eax\n" - "21: movl 4(%4), %%edx\n" - " movl %%eax, 0(%3)\n" - " movl %%edx, 4(%3)\n" - "3: movl 8(%4), %%eax\n" - "31: movl 12(%4),%%edx\n" - " movl %%eax, 8(%3)\n" - " movl %%edx, 12(%3)\n" - "4: movl 16(%4), %%eax\n" - "41: movl 20(%4), %%edx\n" - " movl %%eax, 16(%3)\n" - " movl %%edx, 20(%3)\n" - "10: movl 24(%4), %%eax\n" - "51: movl 28(%4), %%edx\n" - " movl %%eax, 24(%3)\n" - " movl %%edx, 28(%3)\n" - "11: movl 32(%4), %%eax\n" - "61: movl 36(%4), %%edx\n" - " movl %%eax, 32(%3)\n" - " movl %%edx, 36(%3)\n" - "12: movl 40(%4), %%eax\n" - "71: movl 44(%4), %%edx\n" - " movl %%eax, 40(%3)\n" - " movl %%edx, 44(%3)\n" - "13: movl 48(%4), %%eax\n" - "81: movl 52(%4), %%edx\n" - " movl %%eax, 48(%3)\n" - " movl %%edx, 52(%3)\n" - "14: movl 56(%4), %%eax\n" - "91: movl 60(%4), %%edx\n" - " movl %%eax, 56(%3)\n" - " movl %%edx, 60(%3)\n" - " addl $-64, %0\n" - " addl $64, %4\n" - " addl $64, %3\n" - " cmpl $63, %0\n" - " ja 0b\n" - "5: movl %0, %%eax\n" - " shrl $2, %0\n" - " andl $3, %%eax\n" - " cld\n" - "6: rep; movsl\n" - " movl %%eax,%0\n" - "7: rep; movsb\n" - "8:\n" - ".section .fixup,\"ax\"\n" - "9: lea 0(%%eax,%0,4),%0\n" - "16: pushl %0\n" - " pushl %%eax\n" - " xorl %%eax,%%eax\n" - " rep; stosb\n" - " popl %%eax\n" - " popl %0\n" - " jmp 8b\n" - ".previous\n" - _ASM_EXTABLE(0b,16b) - _ASM_EXTABLE(1b,16b) - _ASM_EXTABLE(2b,16b) - _ASM_EXTABLE(21b,16b) - _ASM_EXTABLE(3b,16b) - _ASM_EXTABLE(31b,16b) - _ASM_EXTABLE(4b,16b) - _ASM_EXTABLE(41b,16b) - _ASM_EXTABLE(10b,16b) - _ASM_EXTABLE(51b,16b) - _ASM_EXTABLE(11b,16b) - _ASM_EXTABLE(61b,16b) - _ASM_EXTABLE(12b,16b) - _ASM_EXTABLE(71b,16b) - _ASM_EXTABLE(13b,16b) - _ASM_EXTABLE(81b,16b) - _ASM_EXTABLE(14b,16b) - _ASM_EXTABLE(91b,16b) - _ASM_EXTABLE(6b,9b) - _ASM_EXTABLE(7b,16b) - : "=&c"(size), "=&D" (d0), "=&S" (d1) - : "1"(to), "2"(from), "0"(size) - : "eax", "edx", "memory"); - return size; -} - static unsigned long __copy_user_intel_nocache(void *to, const void __user *from, unsigned long size) { @@ -387,8 +290,6 @@ static unsigned long __copy_user_intel_nocache(void *to, * Leave these declared but undefined. They should not be any references to * them */ -unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, - unsigned long size); unsigned long __copy_user_intel(void __user *to, const void *from, unsigned long size); #endif /* CONFIG_X86_INTEL_USERCOPY */ @@ -427,47 +328,7 @@ do { \ : "memory"); \ } while (0) -#define __copy_user_zeroing(to, from, size) \ -do { \ - int __d0, __d1, __d2; \ - __asm__ __volatile__( \ - " cmp $7,%0\n" \ - " jbe 1f\n" \ - " movl %1,%0\n" \ - " negl %0\n" \ - " andl $7,%0\n" \ - " subl %0,%3\n" \ - "4: rep; movsb\n" \ - " movl %3,%0\n" \ - " shrl $2,%0\n" \ - " andl $3,%3\n" \ - " .align 2,0x90\n" \ - "0: rep; movsl\n" \ - " movl %3,%0\n" \ - "1: rep; movsb\n" \ - "2:\n" \ - ".section .fixup,\"ax\"\n" \ - "5: addl %3,%0\n" \ - " jmp 6f\n" \ - "3: lea 0(%3,%0,4),%0\n" \ - "6: pushl %0\n" \ - " pushl %%eax\n" \ - " xorl %%eax,%%eax\n" \ - " rep; stosb\n" \ - " popl %%eax\n" \ - " popl %0\n" \ - " jmp 2b\n" \ - ".previous\n" \ - _ASM_EXTABLE(4b,5b) \ - _ASM_EXTABLE(0b,3b) \ - _ASM_EXTABLE(1b,6b) \ - : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ - : "3"(size), "0"(size), "1"(to), "2"(from) \ - : "memory"); \ -} while (0) - -unsigned long __copy_to_user_ll(void __user *to, const void *from, - unsigned long n) +unsigned long __copy_user_ll(void *to, const void *from, unsigned long n) { stac(); if (movsl_is_ok(to, from, n)) @@ -477,34 +338,7 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from, clac(); return n; } -EXPORT_SYMBOL(__copy_to_user_ll); - -unsigned long __copy_from_user_ll(void *to, const void __user *from, - unsigned long n) -{ - stac(); - if (movsl_is_ok(to, from, n)) - __copy_user_zeroing(to, from, n); - else - n = __copy_user_zeroing_intel(to, from, n); - clac(); - return n; -} -EXPORT_SYMBOL(__copy_from_user_ll); - -unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from, - unsigned long n) -{ - stac(); - if (movsl_is_ok(to, from, n)) - __copy_user(to, from, n); - else - n = __copy_user_intel((void __user *)to, - (const void *)from, n); - clac(); - return n; -} -EXPORT_SYMBOL(__copy_from_user_ll_nozero); +EXPORT_SYMBOL(__copy_user_ll); unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from, unsigned long n) -- cgit v1.1 From 554bfeceb8a22d448cd986fc9efce25e833278a1 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 29 Mar 2017 21:41:05 +0200 Subject: parisc: Fix access fault handling in pa_memcpy() pa_memcpy() is the major memcpy implementation in the parisc kernel which is used to do any kind of userspace/kernel memory copies. Al Viro noticed various bugs in the implementation of pa_mempcy(), most notably that in case of faults it may report back to have copied more bytes than it actually did. Fixing those bugs is quite hard in the C-implementation, because the compiler is messing around with the registers and we are not guaranteed that specific variables are always in the same processor registers. This makes proper fault handling complicated. This patch implements pa_memcpy() in assembler. That way we have correct fault handling and adding a 64-bit copy routine was quite easy. Runtime tested with 32- and 64bit kernels. Reported-by: Al Viro Cc: # v4.9+ Signed-off-by: John David Anglin Signed-off-by: Helge Deller --- arch/parisc/lib/lusercopy.S | 318 ++++++++++++++++++++++++++++++ arch/parisc/lib/memcpy.c | 461 +------------------------------------------- 2 files changed, 321 insertions(+), 458 deletions(-) (limited to 'arch') diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S index 56845de..f01188c 100644 --- a/arch/parisc/lib/lusercopy.S +++ b/arch/parisc/lib/lusercopy.S @@ -5,6 +5,8 @@ * Copyright (C) 2000 Richard Hirst * Copyright (C) 2001 Matthieu Delahaye * Copyright (C) 2003 Randolph Chung + * Copyright (C) 2017 Helge Deller + * Copyright (C) 2017 John David Anglin * * * This program is free software; you can redistribute it and/or modify @@ -132,4 +134,320 @@ ENDPROC_CFI(lstrnlen_user) .procend + + +/* + * unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) + * + * Inputs: + * - sr1 already contains space of source region + * - sr2 already contains space of destination region + * + * Returns: + * - number of bytes that could not be copied. + * On success, this will be zero. + * + * This code is based on a C-implementation of a copy routine written by + * Randolph Chung, which in turn was derived from the glibc. + * + * Several strategies are tried to try to get the best performance for various + * conditions. In the optimal case, we copy by loops that copy 32- or 16-bytes + * at a time using general registers. Unaligned copies are handled either by + * aligning the destination and then using shift-and-write method, or in a few + * cases by falling back to a byte-at-a-time copy. + * + * Testing with various alignments and buffer sizes shows that this code is + * often >10x faster than a simple byte-at-a-time copy, even for strangely + * aligned operands. It is interesting to note that the glibc version of memcpy + * (written in C) is actually quite fast already. This routine is able to beat + * it by 30-40% for aligned copies because of the loop unrolling, but in some + * cases the glibc version is still slightly faster. This lends more + * credibility that gcc can generate very good code as long as we are careful. + * + * Possible optimizations: + * - add cache prefetching + * - try not to use the post-increment address modifiers; they may create + * additional interlocks. Assumption is that those were only efficient on old + * machines (pre PA8000 processors) + */ + + dst = arg0 + src = arg1 + len = arg2 + end = arg3 + t1 = r19 + t2 = r20 + t3 = r21 + t4 = r22 + srcspc = sr1 + dstspc = sr2 + + t0 = r1 + a1 = t1 + a2 = t2 + a3 = t3 + a0 = t4 + + save_src = ret0 + save_dst = ret1 + save_len = r31 + +ENTRY_CFI(pa_memcpy) + .proc + .callinfo NO_CALLS + .entry + + /* Last destination address */ + add dst,len,end + + /* short copy with less than 16 bytes? */ + cmpib,>>=,n 15,len,.Lbyte_loop + + /* same alignment? */ + xor src,dst,t0 + extru t0,31,2,t1 + cmpib,<>,n 0,t1,.Lunaligned_copy + +#ifdef CONFIG_64BIT + /* only do 64-bit copies if we can get aligned. */ + extru t0,31,3,t1 + cmpib,<>,n 0,t1,.Lalign_loop32 + + /* loop until we are 64-bit aligned */ +.Lalign_loop64: + extru dst,31,3,t1 + cmpib,=,n 0,t1,.Lcopy_loop_16 +20: ldb,ma 1(srcspc,src),t1 +21: stb,ma t1,1(dstspc,dst) + b .Lalign_loop64 + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + + ldi 31,t0 +.Lcopy_loop_16: + cmpb,COND(>>=),n t0,len,.Lword_loop + +10: ldd 0(srcspc,src),t1 +11: ldd 8(srcspc,src),t2 + ldo 16(src),src +12: std,ma t1,8(dstspc,dst) +13: std,ma t2,8(dstspc,dst) +14: ldd 0(srcspc,src),t1 +15: ldd 8(srcspc,src),t2 + ldo 16(src),src +16: std,ma t1,8(dstspc,dst) +17: std,ma t2,8(dstspc,dst) + + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy16_fault) + ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy16_fault) + ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) + + b .Lcopy_loop_16 + ldo -32(len),len + +.Lword_loop: + cmpib,COND(>>=),n 3,len,.Lbyte_loop +20: ldw,ma 4(srcspc,src),t1 +21: stw,ma t1,4(dstspc,dst) + b .Lword_loop + ldo -4(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + +#endif /* CONFIG_64BIT */ + + /* loop until we are 32-bit aligned */ +.Lalign_loop32: + extru dst,31,2,t1 + cmpib,=,n 0,t1,.Lcopy_loop_4 +20: ldb,ma 1(srcspc,src),t1 +21: stb,ma t1,1(dstspc,dst) + b .Lalign_loop32 + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + + +.Lcopy_loop_4: + cmpib,COND(>>=),n 15,len,.Lbyte_loop + +10: ldw 0(srcspc,src),t1 +11: ldw 4(srcspc,src),t2 +12: stw,ma t1,4(dstspc,dst) +13: stw,ma t2,4(dstspc,dst) +14: ldw 8(srcspc,src),t1 +15: ldw 12(srcspc,src),t2 + ldo 16(src),src +16: stw,ma t1,4(dstspc,dst) +17: stw,ma t2,4(dstspc,dst) + + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(11b,.Lcopy8_fault) + ASM_EXCEPTIONTABLE_ENTRY(12b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(13b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(14b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(15b,.Lcopy8_fault) + ASM_EXCEPTIONTABLE_ENTRY(16b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(17b,.Lcopy_done) + + b .Lcopy_loop_4 + ldo -16(len),len + +.Lbyte_loop: + cmpclr,COND(<>) len,%r0,%r0 + b,n .Lcopy_done +20: ldb 0(srcspc,src),t1 + ldo 1(src),src +21: stb,ma t1,1(dstspc,dst) + b .Lbyte_loop + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + +.Lcopy_done: + bv %r0(%r2) + sub end,dst,ret0 + + + /* src and dst are not aligned the same way. */ + /* need to go the hard way */ +.Lunaligned_copy: + /* align until dst is 32bit-word-aligned */ + extru dst,31,2,t1 + cmpib,COND(=),n 0,t1,.Lcopy_dstaligned +20: ldb 0(srcspc,src),t1 + ldo 1(src),src +21: stb,ma t1,1(dstspc,dst) + b .Lunaligned_copy + ldo -1(len),len + + ASM_EXCEPTIONTABLE_ENTRY(20b,.Lcopy_done) + ASM_EXCEPTIONTABLE_ENTRY(21b,.Lcopy_done) + +.Lcopy_dstaligned: + + /* store src, dst and len in safe place */ + copy src,save_src + copy dst,save_dst + copy len,save_len + + /* len now needs give number of words to copy */ + SHRREG len,2,len + + /* + * Copy from a not-aligned src to an aligned dst using shifts. + * Handles 4 words per loop. + */ + + depw,z src,28,2,t0 + subi 32,t0,t0 + mtsar t0 + extru len,31,2,t0 + cmpib,= 2,t0,.Lcase2 + /* Make src aligned by rounding it down. */ + depi 0,31,2,src + + cmpiclr,<> 3,t0,%r0 + b,n .Lcase3 + cmpiclr,<> 1,t0,%r0 + b,n .Lcase1 +.Lcase0: + cmpb,= %r0,len,.Lcda_finish + nop + +1: ldw,ma 4(srcspc,src), a3 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a0 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + b,n .Ldo3 +.Lcase1: +1: ldw,ma 4(srcspc,src), a2 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a3 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + ldo -1(len),len + cmpb,=,n %r0,len,.Ldo0 +.Ldo4: +1: ldw,ma 4(srcspc,src), a0 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a2, a3, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) +.Ldo3: +1: ldw,ma 4(srcspc,src), a1 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a3, a0, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) +.Ldo2: +1: ldw,ma 4(srcspc,src), a2 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a0, a1, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) +.Ldo1: +1: ldw,ma 4(srcspc,src), a3 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + shrpw a1, a2, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) + ldo -4(len),len + cmpb,<> %r0,len,.Ldo4 + nop +.Ldo0: + shrpw a2, a3, %sar, t0 +1: stw,ma t0, 4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcopy_done) + +.Lcda_rdfault: +.Lcda_finish: + /* calculate new src, dst and len and jump to byte-copy loop */ + sub dst,save_dst,t0 + add save_src,t0,src + b .Lbyte_loop + sub save_len,t0,len + +.Lcase3: +1: ldw,ma 4(srcspc,src), a0 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a1 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + b .Ldo2 + ldo 1(len),len +.Lcase2: +1: ldw,ma 4(srcspc,src), a1 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) +1: ldw,ma 4(srcspc,src), a2 + ASM_EXCEPTIONTABLE_ENTRY(1b,.Lcda_rdfault) + b .Ldo1 + ldo 2(len),len + + + /* fault exception fixup handlers: */ +#ifdef CONFIG_64BIT +.Lcopy16_fault: +10: b .Lcopy_done + std,ma t1,8(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) +#endif + +.Lcopy8_fault: +10: b .Lcopy_done + stw,ma t1,4(dstspc,dst) + ASM_EXCEPTIONTABLE_ENTRY(10b,.Lcopy_done) + + .exit +ENDPROC_CFI(pa_memcpy) + .procend + .end diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index f82ff10..b3d47ec 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -2,7 +2,7 @@ * Optimized memory copy routines. * * Copyright (C) 2004 Randolph Chung - * Copyright (C) 2013 Helge Deller + * Copyright (C) 2013-2017 Helge Deller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -21,474 +21,21 @@ * Portions derived from the GNU C Library * Copyright (C) 1991, 1997, 2003 Free Software Foundation, Inc. * - * Several strategies are tried to try to get the best performance for various - * conditions. In the optimal case, we copy 64-bytes in an unrolled loop using - * fp regs. This is followed by loops that copy 32- or 16-bytes at a time using - * general registers. Unaligned copies are handled either by aligning the - * destination and then using shift-and-write method, or in a few cases by - * falling back to a byte-at-a-time copy. - * - * I chose to implement this in C because it is easier to maintain and debug, - * and in my experiments it appears that the C code generated by gcc (3.3/3.4 - * at the time of writing) is fairly optimal. Unfortunately some of the - * semantics of the copy routine (exception handling) is difficult to express - * in C, so we have to play some tricks to get it to work. - * - * All the loads and stores are done via explicit asm() code in order to use - * the right space registers. - * - * Testing with various alignments and buffer sizes shows that this code is - * often >10x faster than a simple byte-at-a-time copy, even for strangely - * aligned operands. It is interesting to note that the glibc version - * of memcpy (written in C) is actually quite fast already. This routine is - * able to beat it by 30-40% for aligned copies because of the loop unrolling, - * but in some cases the glibc version is still slightly faster. This lends - * more credibility that gcc can generate very good code as long as we are - * careful. - * - * TODO: - * - cache prefetching needs more experimentation to get optimal settings - * - try not to use the post-increment address modifiers; they create additional - * interlocks - * - replace byte-copy loops with stybs sequences */ -#ifdef __KERNEL__ #include #include #include -#define s_space "%%sr1" -#define d_space "%%sr2" -#else -#include "memcpy.h" -#define s_space "%%sr0" -#define d_space "%%sr0" -#define pa_memcpy new2_copy -#endif DECLARE_PER_CPU(struct exception_data, exception_data); -#define preserve_branch(label) do { \ - volatile int dummy = 0; \ - /* The following branch is never taken, it's just here to */ \ - /* prevent gcc from optimizing away our exception code. */ \ - if (unlikely(dummy != dummy)) \ - goto label; \ -} while (0) - #define get_user_space() (segment_eq(get_fs(), KERNEL_DS) ? 0 : mfsp(3)) #define get_kernel_space() (0) -#define MERGE(w0, sh_1, w1, sh_2) ({ \ - unsigned int _r; \ - asm volatile ( \ - "mtsar %3\n" \ - "shrpw %1, %2, %%sar, %0\n" \ - : "=r"(_r) \ - : "r"(w0), "r"(w1), "r"(sh_2) \ - ); \ - _r; \ -}) -#define THRESHOLD 16 - -#ifdef DEBUG_MEMCPY -#define DPRINTF(fmt, args...) do { printk(KERN_DEBUG "%s:%d:%s ", __FILE__, __LINE__, __func__ ); printk(KERN_DEBUG fmt, ##args ); } while (0) -#else -#define DPRINTF(fmt, args...) -#endif - -#define def_load_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn ",ma " #_sz "(" _s ",%1), %0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : _tt(_t), "+r"(_a) \ - : \ - : "r8") - -#define def_store_ai_insn(_insn,_sz,_tt,_s,_a,_t,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn ",ma %1, " #_sz "(" _s ",%0)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : "+r"(_a) \ - : _tt(_t) \ - : "r8") - -#define ldbma(_s, _a, _t, _e) def_load_ai_insn(ldbs,1,"=r",_s,_a,_t,_e) -#define stbma(_s, _t, _a, _e) def_store_ai_insn(stbs,1,"r",_s,_a,_t,_e) -#define ldwma(_s, _a, _t, _e) def_load_ai_insn(ldw,4,"=r",_s,_a,_t,_e) -#define stwma(_s, _t, _a, _e) def_store_ai_insn(stw,4,"r",_s,_a,_t,_e) -#define flddma(_s, _a, _t, _e) def_load_ai_insn(fldd,8,"=f",_s,_a,_t,_e) -#define fstdma(_s, _t, _a, _e) def_store_ai_insn(fstd,8,"f",_s,_a,_t,_e) - -#define def_load_insn(_insn,_tt,_s,_o,_a,_t,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn " " #_o "(" _s ",%1), %0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : _tt(_t) \ - : "r"(_a) \ - : "r8") - -#define def_store_insn(_insn,_tt,_s,_t,_o,_a,_e) \ - __asm__ __volatile__ ( \ - "1:\t" #_insn " %0, " #_o "(" _s ",%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b,_e) \ - : \ - : _tt(_t), "r"(_a) \ - : "r8") - -#define ldw(_s,_o,_a,_t,_e) def_load_insn(ldw,"=r",_s,_o,_a,_t,_e) -#define stw(_s,_t,_o,_a,_e) def_store_insn(stw,"r",_s,_t,_o,_a,_e) - -#ifdef CONFIG_PREFETCH -static inline void prefetch_src(const void *addr) -{ - __asm__("ldw 0(" s_space ",%0), %%r0" : : "r" (addr)); -} - -static inline void prefetch_dst(const void *addr) -{ - __asm__("ldd 0(" d_space ",%0), %%r0" : : "r" (addr)); -} -#else -#define prefetch_src(addr) do { } while(0) -#define prefetch_dst(addr) do { } while(0) -#endif - -#define PA_MEMCPY_OK 0 -#define PA_MEMCPY_LOAD_ERROR 1 -#define PA_MEMCPY_STORE_ERROR 2 - -/* Copy from a not-aligned src to an aligned dst, using shifts. Handles 4 words - * per loop. This code is derived from glibc. - */ -static noinline unsigned long copy_dstaligned(unsigned long dst, - unsigned long src, unsigned long len) -{ - /* gcc complains that a2 and a3 may be uninitialized, but actually - * they cannot be. Initialize a2/a3 to shut gcc up. - */ - register unsigned int a0, a1, a2 = 0, a3 = 0; - int sh_1, sh_2; - - /* prefetch_src((const void *)src); */ - - /* Calculate how to shift a word read at the memory operation - aligned srcp to make it aligned for copy. */ - sh_1 = 8 * (src % sizeof(unsigned int)); - sh_2 = 8 * sizeof(unsigned int) - sh_1; - - /* Make src aligned by rounding it down. */ - src &= -sizeof(unsigned int); - - switch (len % 4) - { - case 2: - /* a1 = ((unsigned int *) src)[0]; - a2 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a1, cda_ldw_exc); - ldw(s_space, 4, src, a2, cda_ldw_exc); - src -= 1 * sizeof(unsigned int); - dst -= 3 * sizeof(unsigned int); - len += 2; - goto do1; - case 3: - /* a0 = ((unsigned int *) src)[0]; - a1 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a0, cda_ldw_exc); - ldw(s_space, 4, src, a1, cda_ldw_exc); - src -= 0 * sizeof(unsigned int); - dst -= 2 * sizeof(unsigned int); - len += 1; - goto do2; - case 0: - if (len == 0) - return PA_MEMCPY_OK; - /* a3 = ((unsigned int *) src)[0]; - a0 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a3, cda_ldw_exc); - ldw(s_space, 4, src, a0, cda_ldw_exc); - src -=-1 * sizeof(unsigned int); - dst -= 1 * sizeof(unsigned int); - len += 0; - goto do3; - case 1: - /* a2 = ((unsigned int *) src)[0]; - a3 = ((unsigned int *) src)[1]; */ - ldw(s_space, 0, src, a2, cda_ldw_exc); - ldw(s_space, 4, src, a3, cda_ldw_exc); - src -=-2 * sizeof(unsigned int); - dst -= 0 * sizeof(unsigned int); - len -= 1; - if (len == 0) - goto do0; - goto do4; /* No-op. */ - } - - do - { - /* prefetch_src((const void *)(src + 4 * sizeof(unsigned int))); */ -do4: - /* a0 = ((unsigned int *) src)[0]; */ - ldw(s_space, 0, src, a0, cda_ldw_exc); - /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ - stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); -do3: - /* a1 = ((unsigned int *) src)[1]; */ - ldw(s_space, 4, src, a1, cda_ldw_exc); - /* ((unsigned int *) dst)[1] = MERGE (a3, sh_1, a0, sh_2); */ - stw(d_space, MERGE (a3, sh_1, a0, sh_2), 4, dst, cda_stw_exc); -do2: - /* a2 = ((unsigned int *) src)[2]; */ - ldw(s_space, 8, src, a2, cda_ldw_exc); - /* ((unsigned int *) dst)[2] = MERGE (a0, sh_1, a1, sh_2); */ - stw(d_space, MERGE (a0, sh_1, a1, sh_2), 8, dst, cda_stw_exc); -do1: - /* a3 = ((unsigned int *) src)[3]; */ - ldw(s_space, 12, src, a3, cda_ldw_exc); - /* ((unsigned int *) dst)[3] = MERGE (a1, sh_1, a2, sh_2); */ - stw(d_space, MERGE (a1, sh_1, a2, sh_2), 12, dst, cda_stw_exc); - - src += 4 * sizeof(unsigned int); - dst += 4 * sizeof(unsigned int); - len -= 4; - } - while (len != 0); - -do0: - /* ((unsigned int *) dst)[0] = MERGE (a2, sh_1, a3, sh_2); */ - stw(d_space, MERGE (a2, sh_1, a3, sh_2), 0, dst, cda_stw_exc); - - preserve_branch(handle_load_error); - preserve_branch(handle_store_error); - - return PA_MEMCPY_OK; - -handle_load_error: - __asm__ __volatile__ ("cda_ldw_exc:\n"); - return PA_MEMCPY_LOAD_ERROR; - -handle_store_error: - __asm__ __volatile__ ("cda_stw_exc:\n"); - return PA_MEMCPY_STORE_ERROR; -} - - -/* Returns PA_MEMCPY_OK, PA_MEMCPY_LOAD_ERROR or PA_MEMCPY_STORE_ERROR. - * In case of an access fault the faulty address can be read from the per_cpu - * exception data struct. */ -static noinline unsigned long pa_memcpy_internal(void *dstp, const void *srcp, - unsigned long len) -{ - register unsigned long src, dst, t1, t2, t3; - register unsigned char *pcs, *pcd; - register unsigned int *pws, *pwd; - register double *pds, *pdd; - unsigned long ret; - - src = (unsigned long)srcp; - dst = (unsigned long)dstp; - pcs = (unsigned char *)srcp; - pcd = (unsigned char *)dstp; - - /* prefetch_src((const void *)srcp); */ - - if (len < THRESHOLD) - goto byte_copy; - - /* Check alignment */ - t1 = (src ^ dst); - if (unlikely(t1 & (sizeof(double)-1))) - goto unaligned_copy; - - /* src and dst have same alignment. */ - - /* Copy bytes till we are double-aligned. */ - t2 = src & (sizeof(double) - 1); - if (unlikely(t2 != 0)) { - t2 = sizeof(double) - t2; - while (t2 && len) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - len--; - stbma(d_space, t3, pcd, pmc_store_exc); - t2--; - } - } - - pds = (double *)pcs; - pdd = (double *)pcd; - -#if 0 - /* Copy 8 doubles at a time */ - while (len >= 8*sizeof(double)) { - register double r1, r2, r3, r4, r5, r6, r7, r8; - /* prefetch_src((char *)pds + L1_CACHE_BYTES); */ - flddma(s_space, pds, r1, pmc_load_exc); - flddma(s_space, pds, r2, pmc_load_exc); - flddma(s_space, pds, r3, pmc_load_exc); - flddma(s_space, pds, r4, pmc_load_exc); - fstdma(d_space, r1, pdd, pmc_store_exc); - fstdma(d_space, r2, pdd, pmc_store_exc); - fstdma(d_space, r3, pdd, pmc_store_exc); - fstdma(d_space, r4, pdd, pmc_store_exc); - -#if 0 - if (L1_CACHE_BYTES <= 32) - prefetch_src((char *)pds + L1_CACHE_BYTES); -#endif - flddma(s_space, pds, r5, pmc_load_exc); - flddma(s_space, pds, r6, pmc_load_exc); - flddma(s_space, pds, r7, pmc_load_exc); - flddma(s_space, pds, r8, pmc_load_exc); - fstdma(d_space, r5, pdd, pmc_store_exc); - fstdma(d_space, r6, pdd, pmc_store_exc); - fstdma(d_space, r7, pdd, pmc_store_exc); - fstdma(d_space, r8, pdd, pmc_store_exc); - len -= 8*sizeof(double); - } -#endif - - pws = (unsigned int *)pds; - pwd = (unsigned int *)pdd; - -word_copy: - while (len >= 8*sizeof(unsigned int)) { - register unsigned int r1,r2,r3,r4,r5,r6,r7,r8; - /* prefetch_src((char *)pws + L1_CACHE_BYTES); */ - ldwma(s_space, pws, r1, pmc_load_exc); - ldwma(s_space, pws, r2, pmc_load_exc); - ldwma(s_space, pws, r3, pmc_load_exc); - ldwma(s_space, pws, r4, pmc_load_exc); - stwma(d_space, r1, pwd, pmc_store_exc); - stwma(d_space, r2, pwd, pmc_store_exc); - stwma(d_space, r3, pwd, pmc_store_exc); - stwma(d_space, r4, pwd, pmc_store_exc); - - ldwma(s_space, pws, r5, pmc_load_exc); - ldwma(s_space, pws, r6, pmc_load_exc); - ldwma(s_space, pws, r7, pmc_load_exc); - ldwma(s_space, pws, r8, pmc_load_exc); - stwma(d_space, r5, pwd, pmc_store_exc); - stwma(d_space, r6, pwd, pmc_store_exc); - stwma(d_space, r7, pwd, pmc_store_exc); - stwma(d_space, r8, pwd, pmc_store_exc); - len -= 8*sizeof(unsigned int); - } - - while (len >= 4*sizeof(unsigned int)) { - register unsigned int r1,r2,r3,r4; - ldwma(s_space, pws, r1, pmc_load_exc); - ldwma(s_space, pws, r2, pmc_load_exc); - ldwma(s_space, pws, r3, pmc_load_exc); - ldwma(s_space, pws, r4, pmc_load_exc); - stwma(d_space, r1, pwd, pmc_store_exc); - stwma(d_space, r2, pwd, pmc_store_exc); - stwma(d_space, r3, pwd, pmc_store_exc); - stwma(d_space, r4, pwd, pmc_store_exc); - len -= 4*sizeof(unsigned int); - } - - pcs = (unsigned char *)pws; - pcd = (unsigned char *)pwd; - -byte_copy: - while (len) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - stbma(d_space, t3, pcd, pmc_store_exc); - len--; - } - - return PA_MEMCPY_OK; - -unaligned_copy: - /* possibly we are aligned on a word, but not on a double... */ - if (likely((t1 & (sizeof(unsigned int)-1)) == 0)) { - t2 = src & (sizeof(unsigned int) - 1); - - if (unlikely(t2 != 0)) { - t2 = sizeof(unsigned int) - t2; - while (t2) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - stbma(d_space, t3, pcd, pmc_store_exc); - len--; - t2--; - } - } - - pws = (unsigned int *)pcs; - pwd = (unsigned int *)pcd; - goto word_copy; - } - - /* Align the destination. */ - if (unlikely((dst & (sizeof(unsigned int) - 1)) != 0)) { - t2 = sizeof(unsigned int) - (dst & (sizeof(unsigned int) - 1)); - while (t2) { - /* *pcd++ = *pcs++; */ - ldbma(s_space, pcs, t3, pmc_load_exc); - stbma(d_space, t3, pcd, pmc_store_exc); - len--; - t2--; - } - dst = (unsigned long)pcd; - src = (unsigned long)pcs; - } - - ret = copy_dstaligned(dst, src, len / sizeof(unsigned int)); - if (ret) - return ret; - - pcs += (len & -sizeof(unsigned int)); - pcd += (len & -sizeof(unsigned int)); - len %= sizeof(unsigned int); - - preserve_branch(handle_load_error); - preserve_branch(handle_store_error); - - goto byte_copy; - -handle_load_error: - __asm__ __volatile__ ("pmc_load_exc:\n"); - return PA_MEMCPY_LOAD_ERROR; - -handle_store_error: - __asm__ __volatile__ ("pmc_store_exc:\n"); - return PA_MEMCPY_STORE_ERROR; -} - - /* Returns 0 for success, otherwise, returns number of bytes not transferred. */ -static unsigned long pa_memcpy(void *dstp, const void *srcp, unsigned long len) -{ - unsigned long ret, fault_addr, reference; - struct exception_data *d; - - ret = pa_memcpy_internal(dstp, srcp, len); - if (likely(ret == PA_MEMCPY_OK)) - return 0; - - /* if a load or store fault occured we can get the faulty addr */ - d = this_cpu_ptr(&exception_data); - fault_addr = d->fault_addr; - - /* error in load or store? */ - if (ret == PA_MEMCPY_LOAD_ERROR) - reference = (unsigned long) srcp; - else - reference = (unsigned long) dstp; +extern unsigned long pa_memcpy(void *dst, const void *src, + unsigned long len); - DPRINTF("pa_memcpy: fault type = %lu, len=%lu fault_addr=%lu ref=%lu\n", - ret, len, fault_addr, reference); - - if (fault_addr >= reference) - return len - (fault_addr - reference); - else - return len; -} - -#ifdef __KERNEL__ unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long len) { @@ -537,5 +84,3 @@ long probe_kernel_read(void *dst, const void *src, size_t size) return __probe_kernel_read(dst, src, size); } - -#endif -- cgit v1.1 From d19f5e41b344a057bb2450024a807476f30978d2 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Sat, 25 Mar 2017 11:59:15 +0100 Subject: parisc: Clean up fixup routines for get_user()/put_user() Al Viro noticed that userspace accesses via get_user()/put_user() can be simplified a lot with regard to usage of the exception handling. This patch implements a fixup routine for get_user() and put_user() in such that the exception handler will automatically load -EFAULT into the register %r8 (the error value) in case on a fault on userspace. Additionally the fixup routine will zero the target register on fault in case of a get_user() call. The target register is extracted out of the faulting assembly instruction. This patch brings a few benefits over the old implementation: 1. Exception handling gets much cleaner, easier and smaller in size. 2. Helper functions like fixup_get_user_skip_1 (all of fixup.S) can be dropped. 3. No need to hardcode %r9 as target register for get_user() any longer. This helps the compiler register allocator and thus creates less assembler statements. 4. No dependency on the exception_data contents any longer. 5. Nested faults will be handled cleanly. Reported-by: Al Viro Cc: # v4.9+ Signed-off-by: Helge Deller --- arch/parisc/include/asm/uaccess.h | 59 +++++++++++++---------- arch/parisc/kernel/parisc_ksyms.c | 10 ---- arch/parisc/lib/Makefile | 2 +- arch/parisc/lib/fixup.S | 98 --------------------------------------- arch/parisc/mm/fault.c | 17 +++++++ 5 files changed, 52 insertions(+), 134 deletions(-) delete mode 100644 arch/parisc/lib/fixup.S (limited to 'arch') diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index edfbf9d..8442727 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -65,6 +65,15 @@ struct exception_table_entry { ".previous\n" /* + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() creates a special exception table entry + * (with lowest bit set) for which the fault handler in fixup_exception() will + * load -EFAULT into %r8 for a read or write fault, and zeroes the target + * register in case of a read fault in get_user(). + */ +#define ASM_EXCEPTIONTABLE_ENTRY_EFAULT( fault_addr, except_addr )\ + ASM_EXCEPTIONTABLE_ENTRY( fault_addr, except_addr + 1) + +/* * The page fault handler stores, in a per-cpu area, the following information * if a fixup routine is available. */ @@ -91,7 +100,7 @@ struct exception_data { #define __get_user(x, ptr) \ ({ \ register long __gu_err __asm__ ("r8") = 0; \ - register long __gu_val __asm__ ("r9") = 0; \ + register long __gu_val; \ \ load_sr2(); \ switch (sizeof(*(ptr))) { \ @@ -107,22 +116,23 @@ struct exception_data { }) #define __get_user_asm(ldx, ptr) \ - __asm__("\n1:\t" ldx "\t0(%%sr2,%2),%0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_1)\ + __asm__("1: " ldx " 0(%%sr2,%2),%0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err) \ - : "r1"); + : "r"(ptr), "1"(__gu_err)); #if !defined(CONFIG_64BIT) #define __get_user_asm64(ptr) \ - __asm__("\n1:\tldw 0(%%sr2,%2),%0" \ - "\n2:\tldw 4(%%sr2,%2),%R0\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_get_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_get_user_skip_1)\ + __asm__(" copy %%r0,%R0\n" \ + "1: ldw 0(%%sr2,%2),%0\n" \ + "2: ldw 4(%%sr2,%2),%R0\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=r"(__gu_val), "=r"(__gu_err) \ - : "r"(ptr), "1"(__gu_err) \ - : "r1"); + : "r"(ptr), "1"(__gu_err)); #endif /* !defined(CONFIG_64BIT) */ @@ -148,32 +158,31 @@ struct exception_data { * The "__put_user/kernel_asm()" macros tell gcc they read from memory * instead of writing. This is because they do not write to any memory * gcc knows about, so there are no aliasing issues. These macros must - * also be aware that "fixup_put_user_skip_[12]" are executed in the - * context of the fault, and any registers used there must be listed - * as clobbers. In this case only "r1" is used by the current routines. - * r8/r9 are already listed as err/val. + * also be aware that fixups are executed in the context of the fault, + * and any registers used there must be listed as clobbers. + * r8 is already listed as err. */ #define __put_user_asm(stx, x, ptr) \ __asm__ __volatile__ ( \ - "\n1:\t" stx "\t%2,0(%%sr2,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_1)\ + "1: " stx " %2,0(%%sr2,%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(x), "0"(__pu_err) \ - : "r1") + : "r"(ptr), "r"(x), "0"(__pu_err)) #if !defined(CONFIG_64BIT) #define __put_user_asm64(__val, ptr) do { \ __asm__ __volatile__ ( \ - "\n1:\tstw %2,0(%%sr2,%1)" \ - "\n2:\tstw %R2,4(%%sr2,%1)\n\t" \ - ASM_EXCEPTIONTABLE_ENTRY(1b, fixup_put_user_skip_2)\ - ASM_EXCEPTIONTABLE_ENTRY(2b, fixup_put_user_skip_1)\ + "1: stw %2,0(%%sr2,%1)\n" \ + "2: stw %R2,4(%%sr2,%1)\n" \ + "9:\n" \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(1b, 9b) \ + ASM_EXCEPTIONTABLE_ENTRY_EFAULT(2b, 9b) \ : "=r"(__pu_err) \ - : "r"(ptr), "r"(__val), "0"(__pu_err) \ - : "r1"); \ + : "r"(ptr), "r"(__val), "0"(__pu_err)); \ } while (0) #endif /* !defined(CONFIG_64BIT) */ diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c index 7484b3d..c6d6272 100644 --- a/arch/parisc/kernel/parisc_ksyms.c +++ b/arch/parisc/kernel/parisc_ksyms.c @@ -47,16 +47,6 @@ EXPORT_SYMBOL(__cmpxchg_u64); EXPORT_SYMBOL(lclear_user); EXPORT_SYMBOL(lstrnlen_user); -/* Global fixups - defined as int to avoid creation of function pointers */ -extern int fixup_get_user_skip_1; -extern int fixup_get_user_skip_2; -extern int fixup_put_user_skip_1; -extern int fixup_put_user_skip_2; -EXPORT_SYMBOL(fixup_get_user_skip_1); -EXPORT_SYMBOL(fixup_get_user_skip_2); -EXPORT_SYMBOL(fixup_put_user_skip_1); -EXPORT_SYMBOL(fixup_put_user_skip_2); - #ifndef CONFIG_64BIT /* Needed so insmod can set dp value */ extern int $global$; diff --git a/arch/parisc/lib/Makefile b/arch/parisc/lib/Makefile index 8fa92b8..f2dac4d 100644 --- a/arch/parisc/lib/Makefile +++ b/arch/parisc/lib/Makefile @@ -2,7 +2,7 @@ # Makefile for parisc-specific library files # -lib-y := lusercopy.o bitops.o checksum.o io.o memset.o fixup.o memcpy.o \ +lib-y := lusercopy.o bitops.o checksum.o io.o memset.o memcpy.o \ ucmpdi2.o delay.o obj-y := iomap.o diff --git a/arch/parisc/lib/fixup.S b/arch/parisc/lib/fixup.S deleted file mode 100644 index a5b72f2..0000000 --- a/arch/parisc/lib/fixup.S +++ /dev/null @@ -1,98 +0,0 @@ -/* - * Linux/PA-RISC Project (http://www.parisc-linux.org/) - * - * Copyright (C) 2004 Randolph Chung - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2, or (at your option) - * any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. - * - * Fixup routines for kernel exception handling. - */ -#include -#include -#include -#include - -#ifdef CONFIG_SMP - .macro get_fault_ip t1 t2 - loadgp - addil LT%__per_cpu_offset,%r27 - LDREG RT%__per_cpu_offset(%r1),\t1 - /* t2 = smp_processor_id() */ - mfctl 30,\t2 - ldw TI_CPU(\t2),\t2 -#ifdef CONFIG_64BIT - extrd,u \t2,63,32,\t2 -#endif - /* t2 = &__per_cpu_offset[smp_processor_id()]; */ - LDREGX \t2(\t1),\t2 - addil LT%exception_data,%r27 - LDREG RT%exception_data(%r1),\t1 - /* t1 = this_cpu_ptr(&exception_data) */ - add,l \t1,\t2,\t1 - /* %r27 = t1->fault_gp - restore gp */ - LDREG EXCDATA_GP(\t1), %r27 - /* t1 = t1->fault_ip */ - LDREG EXCDATA_IP(\t1), \t1 - .endm -#else - .macro get_fault_ip t1 t2 - loadgp - /* t1 = this_cpu_ptr(&exception_data) */ - addil LT%exception_data,%r27 - LDREG RT%exception_data(%r1),\t2 - /* %r27 = t2->fault_gp - restore gp */ - LDREG EXCDATA_GP(\t2), %r27 - /* t1 = t2->fault_ip */ - LDREG EXCDATA_IP(\t2), \t1 - .endm -#endif - - .level LEVEL - - .text - .section .fixup, "ax" - - /* get_user() fixups, store -EFAULT in r8, and 0 in r9 */ -ENTRY_CFI(fixup_get_user_skip_1) - get_fault_ip %r1,%r8 - ldo 4(%r1), %r1 - ldi -EFAULT, %r8 - bv %r0(%r1) - copy %r0, %r9 -ENDPROC_CFI(fixup_get_user_skip_1) - -ENTRY_CFI(fixup_get_user_skip_2) - get_fault_ip %r1,%r8 - ldo 8(%r1), %r1 - ldi -EFAULT, %r8 - bv %r0(%r1) - copy %r0, %r9 -ENDPROC_CFI(fixup_get_user_skip_2) - - /* put_user() fixups, store -EFAULT in r8 */ -ENTRY_CFI(fixup_put_user_skip_1) - get_fault_ip %r1,%r8 - ldo 4(%r1), %r1 - bv %r0(%r1) - ldi -EFAULT, %r8 -ENDPROC_CFI(fixup_put_user_skip_1) - -ENTRY_CFI(fixup_put_user_skip_2) - get_fault_ip %r1,%r8 - ldo 8(%r1), %r1 - bv %r0(%r1) - ldi -EFAULT, %r8 -ENDPROC_CFI(fixup_put_user_skip_2) - diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c index deab89a..32ec221 100644 --- a/arch/parisc/mm/fault.c +++ b/arch/parisc/mm/fault.c @@ -150,6 +150,23 @@ int fixup_exception(struct pt_regs *regs) d->fault_space = regs->isr; d->fault_addr = regs->ior; + /* + * Fix up get_user() and put_user(). + * ASM_EXCEPTIONTABLE_ENTRY_EFAULT() sets the least-significant + * bit in the relative address of the fixup routine to indicate + * that %r8 should be loaded with -EFAULT to report a userspace + * access error. + */ + if (fix->fixup & 1) { + regs->gr[8] = -EFAULT; + + /* zero target register for get_user() */ + if (parisc_acctyp(0, regs->iir) == VM_READ) { + int treg = regs->iir & 0x1f; + regs->gr[treg] = 0; + } + } + regs->iaoq[0] = (unsigned long)&fix->fixup + fix->fixup; regs->iaoq[0] &= ~3; /* -- cgit v1.1 From 476e75a44b56038bee9207242d4bc718f6b4de06 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 29 Mar 2017 08:25:30 +0200 Subject: parisc: Avoid stalled CPU warnings after system shutdown Commit 73580dac7618 ("parisc: Fix system shutdown halt") introduced an endless loop for systems which don't provide a software power off function. But the soft lockup detector will detect this and report stalled CPUs after some time. Avoid those unwanted warnings by disabling the soft lockup detector. Fixes: 73580dac7618 ("parisc: Fix system shutdown halt") Signed-off-by: Helge Deller Cc: stable@vger.kernel.org # 4.9+ --- arch/parisc/kernel/process.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'arch') diff --git a/arch/parisc/kernel/process.c b/arch/parisc/kernel/process.c index b76f503..4516a5b 100644 --- a/arch/parisc/kernel/process.c +++ b/arch/parisc/kernel/process.c @@ -143,6 +143,8 @@ void machine_power_off(void) printk(KERN_EMERG "System shut down completed.\n" "Please power this system off now."); + /* prevent soft lockup/stalled CPU messages for endless loop. */ + rcu_sysrq_start(); for (;;); } -- cgit v1.1 From e13909a4acc4b68b30527d8442d4267658963757 Mon Sep 17 00:00:00 2001 From: Vineet Gupta Date: Wed, 29 Mar 2017 11:53:33 -0700 Subject: ARC: uaccess: enable INLINE_COPY_{TO,FROM}_USER ... ... and switch to generic out of line version in lib/usercopy.c Signed-off-by: Vineet Gupta Signed-off-by: Al Viro --- arch/arc/include/asm/uaccess.h | 16 ++++++---------- arch/arc/mm/extable.c | 14 -------------- 2 files changed, 6 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/arc/include/asm/uaccess.h b/arch/arc/include/asm/uaccess.h index c4d26e8..f35974e 100644 --- a/arch/arc/include/asm/uaccess.h +++ b/arch/arc/include/asm/uaccess.h @@ -168,7 +168,7 @@ static inline unsigned long -__arc_copy_from_user(void *to, const void __user *from, unsigned long n) +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { long res = 0; char val; @@ -395,7 +395,7 @@ __arc_copy_from_user(void *to, const void __user *from, unsigned long n) } static inline unsigned long -__arc_copy_to_user(void __user *to, const void *from, unsigned long n) +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { long res = 0; char val; @@ -721,24 +721,20 @@ static inline long __arc_strnlen_user(const char __user *s, long n) } #ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE -#define raw_copy_from_user __arc_copy_from_user -#define raw_copy_to_user __arc_copy_to_user + +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER + #define __clear_user(d, n) __arc_clear_user(d, n) #define __strncpy_from_user(d, s, n) __arc_strncpy_from_user(d, s, n) #define __strnlen_user(s, n) __arc_strnlen_user(s, n) #else -extern long arc_copy_from_user_noinline(void *to, const void __user * from, - unsigned long n); -extern long arc_copy_to_user_noinline(void __user *to, const void *from, - unsigned long n); extern unsigned long arc_clear_user_noinline(void __user *to, unsigned long n); extern long arc_strncpy_from_user_noinline (char *dst, const char __user *src, long count); extern long arc_strnlen_user_noinline(const char __user *src, long n); -#define raw_copy_from_user arc_copy_from_user_noinline -#define raw_copy_to_user arc_copy_to_user_noinline #define __clear_user(d, n) arc_clear_user_noinline(d, n) #define __strncpy_from_user(d, s, n) arc_strncpy_from_user_noinline(d, s, n) #define __strnlen_user(s, n) arc_strnlen_user_noinline(s, n) diff --git a/arch/arc/mm/extable.c b/arch/arc/mm/extable.c index c86906b..72125a3 100644 --- a/arch/arc/mm/extable.c +++ b/arch/arc/mm/extable.c @@ -28,20 +28,6 @@ int fixup_exception(struct pt_regs *regs) #ifdef CONFIG_CC_OPTIMIZE_FOR_SIZE -long arc_copy_from_user_noinline(void *to, const void __user *from, - unsigned long n) -{ - return __arc_copy_from_user(to, from, n); -} -EXPORT_SYMBOL(arc_copy_from_user_noinline); - -long arc_copy_to_user_noinline(void __user *to, const void *from, - unsigned long n) -{ - return __arc_copy_to_user(to, from, n); -} -EXPORT_SYMBOL(arc_copy_to_user_noinline); - unsigned long arc_clear_user_noinline(void __user *to, unsigned long n) { -- cgit v1.1 From 37096003c8a7de5d24c6cf86234004c635fdd617 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 15:06:24 -0400 Subject: s390: get rid of zeroing, switch to RAW_COPY_USER [folded a fix from Martin] Signed-off-by: Al Viro --- arch/s390/Kconfig | 1 + arch/s390/include/asm/uaccess.h | 120 +++------------------------------------- arch/s390/lib/uaccess.c | 68 ++++++++--------------- 3 files changed, 33 insertions(+), 156 deletions(-) (limited to 'arch') diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index a2dcef0..0724dbf 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig @@ -178,6 +178,7 @@ config S390 select ARCH_HAS_SCALED_CPUTIME select VIRT_TO_BUS select HAVE_NMI + select ARCH_HAS_RAW_COPY_USER config SCHED_OMIT_FRAME_POINTER diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 29f5bf2..72d69b9 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h @@ -60,47 +60,14 @@ static inline int __range_ok(unsigned long addr, unsigned long size) #define access_ok(type, addr, size) __access_ok(addr, size) -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -unsigned long __must_check __copy_from_user(void *to, const void __user *from, - unsigned long n); +unsigned long __must_check +raw_copy_from_user(void *to, const void __user *from, unsigned long n); -/** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -unsigned long __must_check __copy_to_user(void __user *to, const void *from, - unsigned long n); +unsigned long __must_check +raw_copy_to_user(void __user *to, const void *from, unsigned long n); -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #ifdef CONFIG_HAVE_MARCH_Z10_FEATURES @@ -189,13 +156,13 @@ static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long s static inline int __put_user_fn(void *x, void __user *ptr, unsigned long size) { - size = __copy_to_user(ptr, x, size); + size = raw_copy_to_user(ptr, x, size); return size ? -EFAULT : 0; } static inline int __get_user_fn(void *x, const void __user *ptr, unsigned long size) { - size = __copy_from_user(x, ptr, size); + size = raw_copy_from_user(x, ptr, size); return size ? -EFAULT : 0; } @@ -285,77 +252,8 @@ int __get_user_bad(void) __attribute__((noreturn)); #define __put_user_unaligned __put_user #define __get_user_unaligned __get_user -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -static inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - might_fault(); - return __copy_to_user(to, from, n); -} - -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -static inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned int sz = __compiletime_object_size(to); - - might_fault(); - if (unlikely(sz != -1 && sz < n)) { - if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - return n; - } - return __copy_from_user(to, from, n); -} - unsigned long __must_check -__copy_in_user(void __user *to, const void __user *from, unsigned long n); - -static inline unsigned long __must_check -copy_in_user(void __user *to, const void __user *from, unsigned long n) -{ - might_fault(); - return __copy_in_user(to, from, n); -} +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n); /* * Copy a null terminated string from userspace. diff --git a/arch/s390/lib/uaccess.c b/arch/s390/lib/uaccess.c index f481fcd..1e5bb2b 100644 --- a/arch/s390/lib/uaccess.c +++ b/arch/s390/lib/uaccess.c @@ -26,7 +26,7 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr tmp1 = -4096UL; asm volatile( "0: .insn ss,0xc80000000000,0(%0,%2),0(%1),0\n" - "9: jz 7f\n" + "6: jz 4f\n" "1: algr %0,%3\n" " slgr %1,%3\n" " slgr %2,%3\n" @@ -35,23 +35,13 @@ static inline unsigned long copy_from_user_mvcos(void *x, const void __user *ptr " nr %4,%3\n" /* %4 = (ptr + 4095) & -4096 */ " slgr %4,%1\n" " clgr %0,%4\n" /* copy crosses next page boundary? */ - " jnh 4f\n" + " jnh 5f\n" "3: .insn ss,0xc80000000000,0(%4,%2),0(%1),0\n" - "10:slgr %0,%4\n" - " algr %2,%4\n" - "4: lghi %4,-1\n" - " algr %4,%0\n" /* copy remaining size, subtract 1 */ - " bras %3,6f\n" /* memset loop */ - " xc 0(1,%2),0(%2)\n" - "5: xc 0(256,%2),0(%2)\n" - " la %2,256(%2)\n" - "6: aghi %4,-256\n" - " jnm 5b\n" - " ex %4,0(%3)\n" - " j 8f\n" - "7: slgr %0,%0\n" - "8:\n" - EX_TABLE(0b,2b) EX_TABLE(3b,4b) EX_TABLE(9b,2b) EX_TABLE(10b,4b) + "7: slgr %0,%4\n" + " j 5f\n" + "4: slgr %0,%0\n" + "5:\n" + EX_TABLE(0b,2b) EX_TABLE(3b,5b) EX_TABLE(6b,2b) EX_TABLE(7b,5b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : "d" (reg0) : "cc", "memory"); return size; @@ -67,49 +57,38 @@ static inline unsigned long copy_from_user_mvcp(void *x, const void __user *ptr, asm volatile( " sacf 0\n" "0: mvcp 0(%0,%2),0(%1),%3\n" - "10:jz 8f\n" + "7: jz 5f\n" "1: algr %0,%3\n" " la %1,256(%1)\n" " la %2,256(%2)\n" "2: mvcp 0(%0,%2),0(%1),%3\n" - "11:jnz 1b\n" - " j 8f\n" + "8: jnz 1b\n" + " j 5f\n" "3: la %4,255(%1)\n" /* %4 = ptr + 255 */ " lghi %3,-4096\n" " nr %4,%3\n" /* %4 = (ptr + 255) & -4096 */ " slgr %4,%1\n" " clgr %0,%4\n" /* copy crosses next page boundary? */ - " jnh 5f\n" + " jnh 6f\n" "4: mvcp 0(%4,%2),0(%1),%3\n" - "12:slgr %0,%4\n" - " algr %2,%4\n" - "5: lghi %4,-1\n" - " algr %4,%0\n" /* copy remaining size, subtract 1 */ - " bras %3,7f\n" /* memset loop */ - " xc 0(1,%2),0(%2)\n" - "6: xc 0(256,%2),0(%2)\n" - " la %2,256(%2)\n" - "7: aghi %4,-256\n" - " jnm 6b\n" - " ex %4,0(%3)\n" - " j 9f\n" - "8: slgr %0,%0\n" - "9: sacf 768\n" - EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,5b) - EX_TABLE(10b,3b) EX_TABLE(11b,3b) EX_TABLE(12b,5b) + "9: slgr %0,%4\n" + " j 6f\n" + "5: slgr %0,%0\n" + "6: sacf 768\n" + EX_TABLE(0b,3b) EX_TABLE(2b,3b) EX_TABLE(4b,6b) + EX_TABLE(7b,3b) EX_TABLE(8b,3b) EX_TABLE(9b,6b) : "+a" (size), "+a" (ptr), "+a" (x), "+a" (tmp1), "=a" (tmp2) : : "cc", "memory"); return size; } -unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - check_object_size(to, n, false); if (static_branch_likely(&have_mvcos)) return copy_from_user_mvcos(to, from, n); return copy_from_user_mvcp(to, from, n); } -EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(raw_copy_from_user); static inline unsigned long copy_to_user_mvcos(void __user *ptr, const void *x, unsigned long size) @@ -176,14 +155,13 @@ static inline unsigned long copy_to_user_mvcs(void __user *ptr, const void *x, return size; } -unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) +unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - check_object_size(from, n, true); if (static_branch_likely(&have_mvcos)) return copy_to_user_mvcos(to, from, n); return copy_to_user_mvcs(to, from, n); } -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(raw_copy_to_user); static inline unsigned long copy_in_user_mvcos(void __user *to, const void __user *from, unsigned long size) @@ -240,13 +218,13 @@ static inline unsigned long copy_in_user_mvc(void __user *to, const void __user return size; } -unsigned long __copy_in_user(void __user *to, const void __user *from, unsigned long n) +unsigned long raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) { if (static_branch_likely(&have_mvcos)) return copy_in_user_mvcos(to, from, n); return copy_in_user_mvc(to, from, n); } -EXPORT_SYMBOL(__copy_in_user); +EXPORT_SYMBOL(raw_copy_in_user); static inline unsigned long clear_user_mvcos(void __user *to, unsigned long size) { -- cgit v1.1 From f64fd180ec2c0bcca2a141314db3b5d3b6bfef2f Mon Sep 17 00:00:00 2001 From: Al Viro Date: Fri, 24 Mar 2017 23:11:26 -0400 Subject: parisc: switch to RAW_COPY_USER ... and remove dead declarations, while we are at it Signed-off-by: Al Viro --- arch/parisc/Kconfig | 1 + arch/parisc/include/asm/uaccess.h | 64 +++++---------------------------------- arch/parisc/lib/memcpy.c | 14 ++++----- 3 files changed, 16 insertions(+), 63 deletions(-) (limited to 'arch') diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index ad294b3..15b7b27 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig @@ -41,6 +41,7 @@ config PARISC select GENERIC_CLOCKEVENTS select ARCH_NO_COHERENT_DMA_MMAP select CPU_NO_EFFICIENT_FFS + select ARCH_HAS_RAW_COPY_USER help The PA-RISC microprocessor is designed by Hewlett-Packard and used diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h index cbb0aac..852322f 100644 --- a/arch/parisc/include/asm/uaccess.h +++ b/arch/parisc/include/asm/uaccess.h @@ -187,9 +187,6 @@ struct exception_data { * Complex access routines -- external declarations */ -extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); -extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); -extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); extern long strncpy_from_user(char *, const char __user *, long); extern unsigned lclear_user(void __user *, unsigned long); extern long lstrnlen_user(const char __user *, long); @@ -203,59 +200,14 @@ extern long lstrnlen_user(const char __user *, long); #define clear_user lclear_user #define __clear_user lclear_user -unsigned long __must_check __copy_to_user(void __user *dst, const void *src, - unsigned long len); -unsigned long __must_check __copy_from_user(void *dst, const void __user *src, - unsigned long len); -unsigned long copy_in_user(void __user *dst, const void __user *src, - unsigned long len); -#define __copy_in_user copy_in_user -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -extern void __compiletime_error("usercopy buffer size is too small") -__bad_copy_user(void); - -static inline void copy_user_overflow(int size, unsigned long count) -{ - WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count); -} - -static __always_inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - int sz = __compiletime_object_size(to); - unsigned long ret = n; - - if (likely(sz < 0 || sz >= n)) { - check_object_size(to, n, false); - ret = __copy_from_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - if (unlikely(ret)) - memset(to + (n - ret), 0, ret); - - return ret; -} - -static __always_inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long n) -{ - int sz = __compiletime_object_size(from); - - if (likely(sz < 0 || sz >= n)) { - check_object_size(from, n, true); - n = __copy_to_user(to, from, n); - } else if (!__builtin_constant_p(n)) - copy_user_overflow(sz, n); - else - __bad_copy_user(); - - return n; -} +unsigned long __must_check raw_copy_to_user(void __user *dst, const void *src, + unsigned long len); +unsigned long __must_check raw_copy_from_user(void *dst, const void __user *src, + unsigned long len); +unsigned long __must_check raw_copy_in_user(void __user *dst, const void __user *src, + unsigned long len); +#define INLINE_COPY_TO_USER +#define INLINE_COPY_FROM_USER struct pt_regs; int fixup_exception(struct pt_regs *regs); diff --git a/arch/parisc/lib/memcpy.c b/arch/parisc/lib/memcpy.c index e88e4a5..99115cd 100644 --- a/arch/parisc/lib/memcpy.c +++ b/arch/parisc/lib/memcpy.c @@ -36,25 +36,25 @@ DECLARE_PER_CPU(struct exception_data, exception_data); extern unsigned long pa_memcpy(void *dst, const void *src, unsigned long len); -unsigned long __copy_to_user(void __user *dst, const void *src, - unsigned long len) +unsigned long raw_copy_to_user(void __user *dst, const void *src, + unsigned long len) { mtsp(get_kernel_space(), 1); mtsp(get_user_space(), 2); return pa_memcpy((void __force *)dst, src, len); } -EXPORT_SYMBOL(__copy_to_user); +EXPORT_SYMBOL(raw_copy_to_user); -unsigned long __copy_from_user(void *dst, const void __user *src, +unsigned long raw_copy_from_user(void *dst, const void __user *src, unsigned long len) { mtsp(get_user_space(), 1); mtsp(get_kernel_space(), 2); return pa_memcpy(dst, (void __force *)src, len); } -EXPORT_SYMBOL(__copy_from_user); +EXPORT_SYMBOL(raw_copy_from_user); -unsigned long copy_in_user(void __user *dst, const void __user *src, unsigned long len) +unsigned long raw_copy_in_user(void __user *dst, const void __user *src, unsigned long len) { mtsp(get_user_space(), 1); mtsp(get_user_space(), 2); @@ -70,7 +70,7 @@ void * memcpy(void * dst,const void *src, size_t count) return dst; } -EXPORT_SYMBOL(copy_in_user); +EXPORT_SYMBOL(raw_copy_in_user); EXPORT_SYMBOL(memcpy); long probe_kernel_read(void *dst, const void *src, size_t size) -- cgit v1.1 From 31af2f36d50e3b9b2fb7f17aa430c11c91f946c4 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 17:04:45 -0400 Subject: sparc: switch to RAW_COPY_USER ... and drop zeroing in sparc32. Signed-off-by: Al Viro --- arch/sparc/Kconfig | 1 + arch/sparc/include/asm/uaccess_32.h | 29 ++++------------------------- arch/sparc/include/asm/uaccess_64.h | 33 +++++---------------------------- arch/sparc/lib/GENcopy_from_user.S | 2 +- arch/sparc/lib/GENcopy_to_user.S | 2 +- arch/sparc/lib/GENpatch.S | 4 ++-- arch/sparc/lib/NG2copy_from_user.S | 2 +- arch/sparc/lib/NG2copy_to_user.S | 2 +- arch/sparc/lib/NG2patch.S | 4 ++-- arch/sparc/lib/NG4copy_from_user.S | 2 +- arch/sparc/lib/NG4copy_to_user.S | 2 +- arch/sparc/lib/NG4patch.S | 4 ++-- arch/sparc/lib/NGcopy_from_user.S | 2 +- arch/sparc/lib/NGcopy_to_user.S | 2 +- arch/sparc/lib/NGpatch.S | 4 ++-- arch/sparc/lib/U1copy_from_user.S | 4 ++-- arch/sparc/lib/U1copy_to_user.S | 4 ++-- arch/sparc/lib/U3copy_to_user.S | 2 +- arch/sparc/lib/U3patch.S | 4 ++-- arch/sparc/lib/copy_in_user.S | 6 +++--- arch/sparc/lib/copy_user.S | 16 +--------------- 21 files changed, 37 insertions(+), 94 deletions(-) (limited to 'arch') diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 68ac5c7..6835304 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -45,6 +45,7 @@ config SPARC select HAVE_ARCH_HARDENED_USERCOPY select PROVE_LOCKING_SMALL if PROVE_LOCKING select ARCH_WANT_RELAX_ORDER + select ARCH_HAS_RAW_COPY_USER config SPARC32 def_bool !64BIT diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h index 9391084..12ebee2 100644 --- a/arch/sparc/include/asm/uaccess_32.h +++ b/arch/sparc/include/asm/uaccess_32.h @@ -235,39 +235,18 @@ int __get_user_bad(void); unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size); -static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) +static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - if (n && __access_ok((unsigned long) to, n)) { - check_object_size(from, n, true); - return __copy_user(to, (__force void __user *) from, n); - } else - return n; -} - -static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) -{ - check_object_size(from, n, true); return __copy_user(to, (__force void __user *) from, n); } -static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (n && __access_ok((unsigned long) from, n)) { - check_object_size(to, n, false); - return __copy_user((__force void __user *) to, from, n); - } else { - memset(to, 0, n); - return n; - } -} - -static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) +static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { return __copy_user((__force void __user *) to, from, n); } -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER static inline unsigned long __clear_user(void __user *addr, unsigned long size) { diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h index 7afb4f6..6096d67 100644 --- a/arch/sparc/include/asm/uaccess_64.h +++ b/arch/sparc/include/asm/uaccess_64.h @@ -176,39 +176,19 @@ __asm__ __volatile__( \ int __get_user_bad(void); -unsigned long __must_check ___copy_from_user(void *to, +unsigned long __must_check raw_copy_from_user(void *to, const void __user *from, unsigned long size); -static inline unsigned long __must_check -copy_from_user(void *to, const void __user *from, unsigned long size) -{ - check_object_size(to, size, false); - - return ___copy_from_user(to, from, size); -} -#define __copy_from_user copy_from_user -unsigned long __must_check ___copy_to_user(void __user *to, +unsigned long __must_check raw_copy_to_user(void __user *to, const void *from, unsigned long size); -static inline unsigned long __must_check -copy_to_user(void __user *to, const void *from, unsigned long size) -{ - check_object_size(from, size, true); +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER - return ___copy_to_user(to, from, size); -} -#define __copy_to_user copy_to_user - -unsigned long __must_check ___copy_in_user(void __user *to, +unsigned long __must_check raw_copy_in_user(void __user *to, const void __user *from, unsigned long size); -static inline unsigned long __must_check -copy_in_user(void __user *to, void __user *from, unsigned long size) -{ - return ___copy_in_user(to, from, size); -} -#define __copy_in_user copy_in_user unsigned long __must_check __clear_user(void __user *, unsigned long); @@ -217,9 +197,6 @@ unsigned long __must_check __clear_user(void __user *, unsigned long); __must_check long strlen_user(const char __user *str); __must_check long strnlen_user(const char __user *str, long n); -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - struct pt_regs; unsigned long compute_effective_address(struct pt_regs *, unsigned int insn, diff --git a/arch/sparc/lib/GENcopy_from_user.S b/arch/sparc/lib/GENcopy_from_user.S index 69a439f..8aa16ef 100644 --- a/arch/sparc/lib/GENcopy_from_user.S +++ b/arch/sparc/lib/GENcopy_from_user.S @@ -23,7 +23,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/GENcopy_to_user.S b/arch/sparc/lib/GENcopy_to_user.S index 9947427..311c8fa 100644 --- a/arch/sparc/lib/GENcopy_to_user.S +++ b/arch/sparc/lib/GENcopy_to_user.S @@ -27,7 +27,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/GENpatch.S b/arch/sparc/lib/GENpatch.S index fab9e89..95e2f1f 100644 --- a/arch/sparc/lib/GENpatch.S +++ b/arch/sparc/lib/GENpatch.S @@ -26,8 +26,8 @@ .type generic_patch_copyops,#function generic_patch_copyops: GEN_DO_PATCH(memcpy, GENmemcpy) - GEN_DO_PATCH(___copy_from_user, GENcopy_from_user) - GEN_DO_PATCH(___copy_to_user, GENcopy_to_user) + GEN_DO_PATCH(raw_copy_from_user, GENcopy_from_user) + GEN_DO_PATCH(raw_copy_to_user, GENcopy_to_user) retl nop .size generic_patch_copyops,.-generic_patch_copyops diff --git a/arch/sparc/lib/NG2copy_from_user.S b/arch/sparc/lib/NG2copy_from_user.S index b79a699..0d8a018 100644 --- a/arch/sparc/lib/NG2copy_from_user.S +++ b/arch/sparc/lib/NG2copy_from_user.S @@ -36,7 +36,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/NG2copy_to_user.S b/arch/sparc/lib/NG2copy_to_user.S index dcec55f..a7a0ea0 100644 --- a/arch/sparc/lib/NG2copy_to_user.S +++ b/arch/sparc/lib/NG2copy_to_user.S @@ -45,7 +45,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/NG2patch.S b/arch/sparc/lib/NG2patch.S index 28c36f0..56ccc19 100644 --- a/arch/sparc/lib/NG2patch.S +++ b/arch/sparc/lib/NG2patch.S @@ -26,8 +26,8 @@ .type niagara2_patch_copyops,#function niagara2_patch_copyops: NG_DO_PATCH(memcpy, NG2memcpy) - NG_DO_PATCH(___copy_from_user, NG2copy_from_user) - NG_DO_PATCH(___copy_to_user, NG2copy_to_user) + NG_DO_PATCH(raw_copy_from_user, NG2copy_from_user) + NG_DO_PATCH(raw_copy_to_user, NG2copy_to_user) retl nop .size niagara2_patch_copyops,.-niagara2_patch_copyops diff --git a/arch/sparc/lib/NG4copy_from_user.S b/arch/sparc/lib/NG4copy_from_user.S index 16a286c..5bb506b 100644 --- a/arch/sparc/lib/NG4copy_from_user.S +++ b/arch/sparc/lib/NG4copy_from_user.S @@ -31,7 +31,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/NG4copy_to_user.S b/arch/sparc/lib/NG4copy_to_user.S index 6b0276f..a82d4d4 100644 --- a/arch/sparc/lib/NG4copy_to_user.S +++ b/arch/sparc/lib/NG4copy_to_user.S @@ -40,7 +40,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/NG4patch.S b/arch/sparc/lib/NG4patch.S index a114cbc..3cc0f8c 100644 --- a/arch/sparc/lib/NG4patch.S +++ b/arch/sparc/lib/NG4patch.S @@ -26,8 +26,8 @@ .type niagara4_patch_copyops,#function niagara4_patch_copyops: NG_DO_PATCH(memcpy, NG4memcpy) - NG_DO_PATCH(___copy_from_user, NG4copy_from_user) - NG_DO_PATCH(___copy_to_user, NG4copy_to_user) + NG_DO_PATCH(raw_copy_from_user, NG4copy_from_user) + NG_DO_PATCH(raw_copy_to_user, NG4copy_to_user) retl nop .size niagara4_patch_copyops,.-niagara4_patch_copyops diff --git a/arch/sparc/lib/NGcopy_from_user.S b/arch/sparc/lib/NGcopy_from_user.S index 9cd42fc..2333b6f 100644 --- a/arch/sparc/lib/NGcopy_from_user.S +++ b/arch/sparc/lib/NGcopy_from_user.S @@ -25,7 +25,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/NGcopy_to_user.S b/arch/sparc/lib/NGcopy_to_user.S index 5c358af..07ba20b 100644 --- a/arch/sparc/lib/NGcopy_to_user.S +++ b/arch/sparc/lib/NGcopy_to_user.S @@ -28,7 +28,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop #endif diff --git a/arch/sparc/lib/NGpatch.S b/arch/sparc/lib/NGpatch.S index 3b0674f..62ccda7 100644 --- a/arch/sparc/lib/NGpatch.S +++ b/arch/sparc/lib/NGpatch.S @@ -26,8 +26,8 @@ .type niagara_patch_copyops,#function niagara_patch_copyops: NG_DO_PATCH(memcpy, NGmemcpy) - NG_DO_PATCH(___copy_from_user, NGcopy_from_user) - NG_DO_PATCH(___copy_to_user, NGcopy_to_user) + NG_DO_PATCH(raw_copy_from_user, NGcopy_from_user) + NG_DO_PATCH(raw_copy_to_user, NGcopy_to_user) retl nop .size niagara_patch_copyops,.-niagara_patch_copyops diff --git a/arch/sparc/lib/U1copy_from_user.S b/arch/sparc/lib/U1copy_from_user.S index bb6ff73..9a6e68a 100644 --- a/arch/sparc/lib/U1copy_from_user.S +++ b/arch/sparc/lib/U1copy_from_user.S @@ -19,7 +19,7 @@ .text; \ .align 4; -#define FUNC_NAME ___copy_from_user +#define FUNC_NAME raw_copy_from_user #define LOAD(type,addr,dest) type##a [addr] %asi, dest #define LOAD_BLK(addr,dest) ldda [addr] ASI_BLK_AIUS, dest #define EX_RETVAL(x) 0 @@ -31,7 +31,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop; \ #include "U1memcpy.S" diff --git a/arch/sparc/lib/U1copy_to_user.S b/arch/sparc/lib/U1copy_to_user.S index ed92ce73..d7b2849 100644 --- a/arch/sparc/lib/U1copy_to_user.S +++ b/arch/sparc/lib/U1copy_to_user.S @@ -19,7 +19,7 @@ .text; \ .align 4; -#define FUNC_NAME ___copy_to_user +#define FUNC_NAME raw_copy_to_user #define STORE(type,src,addr) type##a src, [addr] ASI_AIUS #define STORE_BLK(src,addr) stda src, [addr] ASI_BLK_AIUS #define EX_RETVAL(x) 0 @@ -31,7 +31,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop; \ #include "U1memcpy.S" diff --git a/arch/sparc/lib/U3copy_to_user.S b/arch/sparc/lib/U3copy_to_user.S index c4ee858..f48fb87 100644 --- a/arch/sparc/lib/U3copy_to_user.S +++ b/arch/sparc/lib/U3copy_to_user.S @@ -31,7 +31,7 @@ #define PREAMBLE \ rd %asi, %g1; \ cmp %g1, ASI_AIUS; \ - bne,pn %icc, ___copy_in_user; \ + bne,pn %icc, raw_copy_in_user; \ nop; \ #include "U3memcpy.S" diff --git a/arch/sparc/lib/U3patch.S b/arch/sparc/lib/U3patch.S index ecc3026..91cd653 100644 --- a/arch/sparc/lib/U3patch.S +++ b/arch/sparc/lib/U3patch.S @@ -26,8 +26,8 @@ .type cheetah_patch_copyops,#function cheetah_patch_copyops: ULTRA3_DO_PATCH(memcpy, U3memcpy) - ULTRA3_DO_PATCH(___copy_from_user, U3copy_from_user) - ULTRA3_DO_PATCH(___copy_to_user, U3copy_to_user) + ULTRA3_DO_PATCH(raw_copy_from_user, U3copy_from_user) + ULTRA3_DO_PATCH(raw_copy_to_user, U3copy_to_user) retl nop .size cheetah_patch_copyops,.-cheetah_patch_copyops diff --git a/arch/sparc/lib/copy_in_user.S b/arch/sparc/lib/copy_in_user.S index 0252b21..1b73bb8 100644 --- a/arch/sparc/lib/copy_in_user.S +++ b/arch/sparc/lib/copy_in_user.S @@ -44,7 +44,7 @@ __retl_o2_plus_1: * to copy register windows around during thread cloning. */ -ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ +ENTRY(raw_copy_in_user) /* %o0=dst, %o1=src, %o2=len */ cmp %o2, 0 be,pn %XCC, 85f or %o0, %o1, %o3 @@ -105,5 +105,5 @@ ENTRY(___copy_in_user) /* %o0=dst, %o1=src, %o2=len */ add %o0, 1, %o0 retl clr %o0 -ENDPROC(___copy_in_user) -EXPORT_SYMBOL(___copy_in_user) +ENDPROC(raw_copy_in_user) +EXPORT_SYMBOL(raw_copy_in_user) diff --git a/arch/sparc/lib/copy_user.S b/arch/sparc/lib/copy_user.S index cea644d..bc243ee 100644 --- a/arch/sparc/lib/copy_user.S +++ b/arch/sparc/lib/copy_user.S @@ -364,21 +364,7 @@ short_aligned_end: 97: mov %o2, %g3 fixupretl: - sethi %hi(PAGE_OFFSET), %g1 - cmp %o0, %g1 - blu 1f - cmp %o1, %g1 - bgeu 1f - ld [%g6 + TI_PREEMPT], %g1 - cmp %g1, 0 - bne 1f - nop - save %sp, -64, %sp - mov %i0, %o0 - call __bzero - mov %g3, %o1 - restore -1: retl + retl mov %g3, %o0 /* exception routine sets %g2 to (broken_insn - first_insn)>>2 */ -- cgit v1.1 From 7d4914db8fda6d38d92b1b8a740bafbd6c6d89a1 Mon Sep 17 00:00:00 2001 From: Max Filippov Date: Tue, 4 Apr 2017 13:26:29 -0700 Subject: xtensa: fix prefetch in the raw_copy_to_user 'from' is the input buffer, it should be prefetched with prefetch, not prefetchw. Tested-by: Max Filippov Signed-off-by: Max Filippov Signed-off-by: Al Viro --- arch/xtensa/include/asm/uaccess.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'arch') diff --git a/arch/xtensa/include/asm/uaccess.h b/arch/xtensa/include/asm/uaccess.h index 8e93ed8..2e7bac0 100644 --- a/arch/xtensa/include/asm/uaccess.h +++ b/arch/xtensa/include/asm/uaccess.h @@ -245,7 +245,7 @@ raw_copy_from_user(void *to, const void __user *from, unsigned long n) static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - prefetchw(from); + prefetch(from); return __xtensa_copy_user((__force void *)to, from, n); } #define INLINE_COPY_FROM_USER -- cgit v1.1 From ef62a2d81f73d9cddef14bc3d9097a57010d551c Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 31 Mar 2017 10:37:44 +0100 Subject: metag/usercopy: Drop unused macros Metag's lib/usercopy.c has a bunch of copy_from_user macros for larger copies between 5 and 16 bytes which are completely unused. Before fixing zeroing lets drop these macros so there is less to fix. Signed-off-by: James Hogan Cc: Al Viro Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/lib/usercopy.c | 113 ---------------------------------------------- 1 file changed, 113 deletions(-) (limited to 'arch') diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index b3ebfe9..b4eb1f1 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -651,119 +651,6 @@ EXPORT_SYMBOL(__copy_user); #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") -#define __asm_copy_from_user_5(to, from, ret) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 4b,5b\n") - -#define __asm_copy_from_user_6x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "4: SETW [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_6(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_7(to, from, ret) \ - __asm_copy_from_user_6x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_8x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_4x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "4: SETD [%0++],D1Ar1\n" COPY, \ - "5: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 4b,5b\n" TENTRY) - -#define __asm_copy_from_user_8(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_9(to, from, ret) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "6: SETB [%0++],D1Ar1\n", \ - "7: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 6b,7b\n") - -#define __asm_copy_from_user_10x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "6: SETW [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_10(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_11(to, from, ret) \ - __asm_copy_from_user_10x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_12x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_8x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "6: SETD [%0++],D1Ar1\n" COPY, \ - "7: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 6b,7b\n" TENTRY) - -#define __asm_copy_from_user_12(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_13(to, from, ret) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "8: SETB [%0++],D1Ar1\n", \ - "9: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 8b,9b\n") - -#define __asm_copy_from_user_14x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETW D1Ar1,[%1++]\n" \ - "8: SETW [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_14(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, "", "", "") - -#define __asm_copy_from_user_15(to, from, ret) \ - __asm_copy_from_user_14x_cont(to, from, ret, \ - " GETB D1Ar1,[%1++]\n" \ - "10: SETB [%0++],D1Ar1\n", \ - "11: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ - " .long 10b,11b\n") - -#define __asm_copy_from_user_16x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ - __asm_copy_from_user_12x_cont(to, from, ret, \ - " GETD D1Ar1,[%1++]\n" \ - "8: SETD [%0++],D1Ar1\n" COPY, \ - "9: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ - " .long 8b,9b\n" TENTRY) - -#define __asm_copy_from_user_16(to, from, ret) \ - __asm_copy_from_user_16x_cont(to, from, ret, "", "", "") #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ -- cgit v1.1 From 2257211942bbbf6c798ab70b487d7e62f7835a1a Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 31 Mar 2017 11:23:18 +0100 Subject: metag/usercopy: Fix alignment error checking Fix the error checking of the alignment adjustment code in raw_copy_from_user(), which mistakenly considers it safe to skip the error check when aligning the source buffer on a 2 or 4 byte boundary. If the destination buffer was unaligned it may have started to copy using byte or word accesses, which could well be at the start of a new (valid) source page. This would result in it appearing to have copied 1 or 2 bytes at the end of the first (invalid) page rather than none at all. Fixes: 373cd784d0fc ("metag: Memory handling") Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/lib/usercopy.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) (limited to 'arch') diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index b4eb1f1..a6ced96 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -717,6 +717,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if ((unsigned long) src & 1) { __asm_copy_from_user_1(dst, src, retn); n--; + if (retn) + goto copy_exception_bytes; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ @@ -730,6 +732,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; + if (retn) + goto copy_exception_bytes; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ @@ -741,12 +745,6 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, } } - /* We only need one check after the unalignment-adjustments, - because if both adjustments were done, either both or - neither reference had an exception. */ - if (retn != 0) - goto copy_exception_bytes; - #ifdef USE_RAPF /* 64 bit copy loop */ if (!(((unsigned long) src | (unsigned long) dst) & 7)) { -- cgit v1.1 From fb8ea062a8f2e85256e13f55696c5c5f0dfdcc8b Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 31 Mar 2017 13:35:01 +0100 Subject: metag/usercopy: Add early abort to copy_to_user When copying to userland on Meta, if any faults are encountered immediately abort the copy instead of continuing on and repeatedly faulting, and worse potentially copying further bytes successfully to subsequent valid pages. Fixes: 373cd784d0fc ("metag: Memory handling") Reported-by: Al Viro Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/lib/usercopy.c | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'arch') diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index a6ced96..714d856 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -538,23 +538,31 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, if ((unsigned long) src & 1) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ while (n > 0) { __asm_copy_to_user_1(dst, src, retn); n--; + if (retn) + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ while (n >= 2) { __asm_copy_to_user_2(dst, src, retn); n -= 2; + if (retn) + return retn + n; } } @@ -569,6 +577,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } if (n >= RAPF_MIN_BUF_SIZE) { @@ -581,6 +591,8 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 8) { __asm_copy_to_user_8x64(dst, src, retn); n -= 8; + if (retn) + return retn + n; } } #endif @@ -588,11 +600,15 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, while (n >= 16) { __asm_copy_to_user_16(dst, src, retn); n -= 16; + if (retn) + return retn + n; } while (n >= 4) { __asm_copy_to_user_4(dst, src, retn); n -= 4; + if (retn) + return retn + n; } switch (n) { @@ -609,6 +625,10 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, break; } + /* + * If we get here, retn correctly reflects the number of failing + * bytes. + */ return retn; } EXPORT_SYMBOL(__copy_user); -- cgit v1.1 From 563ddc1076109f2b3f88e6d355eab7b6fd4662cb Mon Sep 17 00:00:00 2001 From: James Hogan Date: Fri, 31 Mar 2017 11:14:02 +0100 Subject: metag/usercopy: Zero rest of buffer from copy_from_user Currently we try to zero the destination for a failed read from userland in fixup code in the usercopy.c macros. The rest of the destination buffer is then zeroed from __copy_user_zeroing(), which is used for both copy_from_user() and __copy_from_user(). Unfortunately we fail to zero in the fixup code as D1Ar1 is set to 0 before the fixup code entry labels, and __copy_from_user() shouldn't even be zeroing the rest of the buffer. Move the zeroing out into copy_from_user() and rename __copy_user_zeroing() to raw_copy_from_user() since it no longer does any zeroing. This also conveniently matches the name needed for RAW_COPY_USER support in a later patch. Fixes: 373cd784d0fc ("metag: Memory handling") Reported-by: Al Viro Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/include/asm/uaccess.h | 15 ++++++----- arch/metag/lib/usercopy.c | 57 +++++++++++++--------------------------- 2 files changed, 26 insertions(+), 46 deletions(-) (limited to 'arch') diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 273e612..07238b3 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -197,20 +197,21 @@ extern long __must_check strnlen_user(const char __user *src, long count); #define strlen_user(str) strnlen_user(str, 32767) -extern unsigned long __must_check __copy_user_zeroing(void *to, - const void __user *from, - unsigned long n); +extern unsigned long raw_copy_from_user(void *to, const void __user *from, + unsigned long n); static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { + unsigned long res = n; if (likely(access_ok(VERIFY_READ, from, n))) - return __copy_user_zeroing(to, from, n); - memset(to, 0, n); - return n; + res = raw_copy_from_user(to, from, n); + if (unlikely(res)) + memset(to + (n - res), 0, res); + return res; } -#define __copy_from_user(to, from, n) __copy_user_zeroing(to, from, n) +#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) #define __copy_from_user_inatomic __copy_from_user extern unsigned long __must_check __copy_user(void __user *to, diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index 714d856..e1d5538 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -29,7 +29,6 @@ COPY \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ FIXUP \ " MOVT D1Ar1,#HI(1b)\n" \ " JUMP D1Ar1,#LO(1b)\n" \ @@ -637,16 +636,14 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_user_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "2: SETB [%0++],D1Ar1\n", \ - "3: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "3: ADD %2,%2,#1\n", \ " .long 2b,3b\n") #define __asm_copy_from_user_2x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETW D1Ar1,[%1++]\n" \ "2: SETW [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#2\n" \ - " SETW [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#2\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_2(to, from, ret) \ @@ -656,32 +653,26 @@ EXPORT_SYMBOL(__copy_user); __asm_copy_from_user_2x_cont(to, from, ret, \ " GETB D1Ar1,[%1++]\n" \ "4: SETB [%0++],D1Ar1\n", \ - "5: ADD %2,%2,#1\n" \ - " SETB [%0++],D1Ar1\n", \ + "5: ADD %2,%2,#1\n", \ " .long 4b,5b\n") #define __asm_copy_from_user_4x_cont(to, from, ret, COPY, FIXUP, TENTRY) \ __asm_copy_user_cont(to, from, ret, \ " GETD D1Ar1,[%1++]\n" \ "2: SETD [%0++],D1Ar1\n" COPY, \ - "3: ADD %2,%2,#4\n" \ - " SETD [%0++],D1Ar1\n" FIXUP, \ + "3: ADD %2,%2,#4\n" FIXUP, \ " .long 2b,3b\n" TENTRY) #define __asm_copy_from_user_4(to, from, ret) \ __asm_copy_from_user_4x_cont(to, from, ret, "", "", "") - #define __asm_copy_from_user_8x64(to, from, ret) \ asm volatile ( \ " GETL D0Ar2,D1Ar1,[%1++]\n" \ "2: SETL [%0++],D0Ar2,D1Ar1\n" \ "1:\n" \ " .section .fixup,\"ax\"\n" \ - " MOV D1Ar1,#0\n" \ - " MOV D0Ar2,#0\n" \ "3: ADD %2,%2,#8\n" \ - " SETL [%0++],D0Ar2,D1Ar1\n" \ " MOVT D0Ar2,#HI(1b)\n" \ " JUMP D0Ar2,#LO(1b)\n" \ " .previous\n" \ @@ -721,11 +712,12 @@ EXPORT_SYMBOL(__copy_user); "SUB %1, %1, #4\n") -/* Copy from user to kernel, zeroing the bytes that were inaccessible in - userland. The return-value is the number of bytes that were - inaccessible. */ -unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, - unsigned long n) +/* + * Copy from user to kernel. The return-value is the number of bytes that were + * inaccessible. + */ +unsigned long raw_copy_from_user(void *pdst, const void __user *psrc, + unsigned long n) { register char *dst asm ("A0.2") = pdst; register const char __user *src asm ("A1.2") = psrc; @@ -738,7 +730,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) - goto copy_exception_bytes; + return retn + n; } if ((unsigned long) dst & 1) { /* Worst case - byte copy */ @@ -746,14 +738,14 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_1(dst, src, retn); n--; if (retn) - goto copy_exception_bytes; + return retn + n; } } if (((unsigned long) src & 2) && n >= 2) { __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) - goto copy_exception_bytes; + return retn + n; } if ((unsigned long) dst & 2) { /* Second worst case - word copy */ @@ -761,7 +753,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_2(dst, src, retn); n -= 2; if (retn) - goto copy_exception_bytes; + return retn + n; } } @@ -777,7 +769,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } @@ -793,7 +785,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, __asm_copy_from_user_8x64(dst, src, retn); n -= 8; if (retn) - goto copy_exception_bytes; + return retn + n; } } #endif @@ -803,7 +795,7 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, n -= 4; if (retn) - goto copy_exception_bytes; + return retn + n; } /* If we get here, there were no memory read faults. */ @@ -829,21 +821,8 @@ unsigned long __copy_user_zeroing(void *pdst, const void __user *psrc, /* If we get here, retn correctly reflects the number of failing bytes. */ return retn; - - copy_exception_bytes: - /* We already have "retn" bytes cleared, and need to clear the - remaining "n" bytes. A non-optimized simple byte-for-byte in-line - memset is preferred here, since this isn't speed-critical code and - we'd rather have this a leaf-function than calling memset. */ - { - char *endp; - for (endp = dst + n; dst < endp; dst++) - *dst = 0; - } - - return retn + n; } -EXPORT_SYMBOL(__copy_user_zeroing); +EXPORT_SYMBOL(raw_copy_from_user); #define __asm_clear_8x64(to, ret) \ asm volatile ( \ -- cgit v1.1 From fd40eee1290ad7add7aa665e3ce6b0f9fe9734b4 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Apr 2017 11:43:26 +0100 Subject: metag/usercopy: Set flags before ADDZ The fixup code for the copy_to_user rapf loops reads TXStatus.LSM_STEP to decide how far to rewind the source pointer. There is a special case for the last execution of an MGETL/MGETD, since it leaves LSM_STEP=0 even though the number of MGETLs/MGETDs attempted was 4. This uses ADDZ which is conditional upon the Z condition flag, but the AND instruction which masked the TXStatus.LSM_STEP field didn't set the condition flags based on the result. Fix that now by using ANDS which does set the flags, and also marking the condition codes as clobbered by the inline assembly. Fixes: 373cd784d0fc ("metag: Memory handling") Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/lib/usercopy.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index e1d5538..4422928 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -315,7 +315,7 @@ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -341,7 +341,7 @@ #define __asm_copy_to_user_64bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ @@ -486,7 +486,7 @@ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ - : "D1Ar1", "D0Ar2", "memory") + : "D1Ar1", "D0Ar2", "cc", "memory") /* rewind 'to' and 'from' pointers when a fault occurs * @@ -512,7 +512,7 @@ #define __asm_copy_to_user_32bit_rapf_loop(to, from, ret, n, id)\ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ "LSR D0Ar2, D0Ar2, #8\n" \ - "AND D0Ar2, D0Ar2, #0x7\n" \ + "ANDS D0Ar2, D0Ar2, #0x7\n" \ "ADDZ D0Ar2, D0Ar2, #4\n" \ "SUB D0Ar2, D0Ar2, #1\n" \ "MOV D1Ar1, #4\n" \ -- cgit v1.1 From 2c0b1df88b987a12d95ea1d6beaf01894f3cc725 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Mon, 3 Apr 2017 17:41:40 +0100 Subject: metag/usercopy: Fix src fixup in from user rapf loops The fixup code to rewind the source pointer in __asm_copy_from_user_{32,64}bit_rapf_loop() always rewound the source by a single unit (4 or 8 bytes), however this is insufficient if the fault didn't occur on the first load in the loop, as the source pointer will have been incremented but nothing will have been stored until all 4 register [pairs] are loaded. Read the LSM_STEP field of TXSTATUS (which is already loaded into a register), a bit like the copy_to_user versions, to determine how many iterations of MGET[DL] have taken place, all of which need rewinding. Fixes: 373cd784d0fc ("metag: Memory handling") Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/lib/usercopy.c | 36 ++++++++++++++++++++++++++++-------- 1 file changed, 28 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index 4422928..e09c95b 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -687,29 +687,49 @@ EXPORT_SYMBOL(__copy_user); * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 8 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*8 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_64bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_64bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #8\n") + "LSR D0Ar2, D0Ar2, #5\n" \ + "ANDS D0Ar2, D0Ar2, #0x38\n" \ + "ADDZ D0Ar2, D0Ar2, #32\n" \ + "SUB %1, %1, D0Ar2\n") /* rewind 'from' pointer when a fault occurs * * Rationale: * A fault occurs while reading from user buffer, which is the - * source. Since the fault is at a single address, we only - * need to rewind by 4 bytes. + * source. * Since we don't write to kernel buffer until we read first, * the kernel buffer is at the right state and needn't be - * corrected. + * corrected, but the source must be rewound to the beginning of + * the block, which is LSM_STEP*4 bytes. + * LSM_STEP is bits 10:8 in TXSTATUS which is already read + * and stored in D0Ar2 + * + * NOTE: If a fault occurs at the last operation in M{G,S}ETL + * LSM_STEP will be 0. ie: we do 4 writes in our case, if + * a fault happens at the 4th write, LSM_STEP will be 0 + * instead of 4. The code copes with that. */ #define __asm_copy_from_user_32bit_rapf_loop(to, from, ret, n, id) \ __asm_copy_user_32bit_rapf_loop(to, from, ret, n, id, \ - "SUB %1, %1, #4\n") + "LSR D0Ar2, D0Ar2, #6\n" \ + "ANDS D0Ar2, D0Ar2, #0x1c\n" \ + "ADDZ D0Ar2, D0Ar2, #16\n" \ + "SUB %1, %1, D0Ar2\n") /* -- cgit v1.1 From b884a190afcecdbef34ca508ea5ee88bb7c77861 Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Apr 2017 08:51:34 +0100 Subject: metag/usercopy: Add missing fixups The rapf copy loops in the Meta usercopy code is missing some extable entries for HTP cores with unaligned access checking enabled, where faults occur on the instruction immediately after the faulting access. Add the fixup labels and extable entries for these cases so that corner case user copy failures don't cause kernel crashes. Fixes: 373cd784d0fc ("metag: Memory handling") Signed-off-by: James Hogan Cc: linux-metag@vger.kernel.org Cc: stable@vger.kernel.org --- arch/metag/lib/usercopy.c | 72 +++++++++++++++++++++++++++++++---------------- 1 file changed, 48 insertions(+), 24 deletions(-) (limited to 'arch') diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index e09c95b..2792fc6 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -259,27 +259,31 @@ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #32\n" \ "23:\n" \ - "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "SUB %3, %3, #32\n" \ "24:\n" \ + "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "25:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #32\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "25:\n" \ + "27:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "28:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ "SUB %3, %3, #32\n" \ - "27:\n" \ + "30:\n" \ "MGETL D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "28:\n" \ + "31:\n" \ "MSETL [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %0, %0, #8\n" \ - "29:\n" \ + "33:\n" \ "SETL [%0++], D0.7, D1.7\n" \ "SUB %3, %3, #32\n" \ "1:" \ @@ -311,7 +315,11 @@ " .long 26b,3b\n" \ " .long 27b,3b\n" \ " .long 28b,3b\n" \ - " .long 29b,4b\n" \ + " .long 29b,3b\n" \ + " .long 30b,3b\n" \ + " .long 31b,3b\n" \ + " .long 32b,3b\n" \ + " .long 33b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ @@ -402,47 +410,55 @@ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "22:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ - "SUB %3, %3, #16\n" \ "23:\n" \ - "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "24:\n" \ - "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ "SUB %3, %3, #16\n" \ - "25:\n" \ + "24:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "26:\n" \ + "25:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "26:\n" \ "SUB %3, %3, #16\n" \ "27:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ "28:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "29:\n" \ + "SUB %3, %3, #16\n" \ + "30:\n" \ + "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ + "31:\n" \ + "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "32:\n" \ "SUB %3, %3, #16\n" \ "DCACHE [%1+#-64], D0Ar6\n" \ "BR $Lloop"id"\n" \ \ "MOV RAPF, %1\n" \ - "29:\n" \ + "33:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "30:\n" \ + "34:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "35:\n" \ "SUB %3, %3, #16\n" \ - "31:\n" \ + "36:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "32:\n" \ + "37:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "38:\n" \ "SUB %3, %3, #16\n" \ - "33:\n" \ + "39:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "34:\n" \ + "40:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "41:\n" \ "SUB %3, %3, #16\n" \ - "35:\n" \ + "42:\n" \ "MGETD D0FrT, D0.5, D0.6, D0.7, [%1++]\n" \ - "36:\n" \ + "43:\n" \ "MSETD [%0++], D0FrT, D0.5, D0.6, D0.7\n" \ + "44:\n" \ "SUB %0, %0, #4\n" \ - "37:\n" \ + "45:\n" \ "SETD [%0++], D0.7\n" \ "SUB %3, %3, #16\n" \ "1:" \ @@ -482,7 +498,15 @@ " .long 34b,3b\n" \ " .long 35b,3b\n" \ " .long 36b,3b\n" \ - " .long 37b,4b\n" \ + " .long 37b,3b\n" \ + " .long 38b,3b\n" \ + " .long 39b,3b\n" \ + " .long 40b,3b\n" \ + " .long 41b,3b\n" \ + " .long 42b,3b\n" \ + " .long 43b,3b\n" \ + " .long 44b,3b\n" \ + " .long 45b,4b\n" \ " .previous\n" \ : "=r" (to), "=r" (from), "=r" (ret), "=d" (n) \ : "0" (to), "1" (from), "2" (ret), "3" (n) \ -- cgit v1.1 From 840db3f93849830c4a55c795ddd6057ad40d4f2d Mon Sep 17 00:00:00 2001 From: James Hogan Date: Tue, 4 Apr 2017 15:46:52 +0100 Subject: metag/usercopy: Switch to RAW_COPY_USER Switch to using raw user copy instead of providing metag specific [__]copy_{to,from}_user[_inatomic](). This simplifies the metag uaccess.h and allows us to take advantage of extra checking in the generic versions. Signed-off-by: James Hogan Cc: Al Viro Cc: linux-metag@vger.kernel.org Signed-off-by: Al Viro --- arch/metag/Kconfig | 1 + arch/metag/include/asm/uaccess.h | 31 ++----------------------------- arch/metag/lib/usercopy.c | 6 +++--- 3 files changed, 6 insertions(+), 32 deletions(-) (limited to 'arch') diff --git a/arch/metag/Kconfig b/arch/metag/Kconfig index 5b7a45d..ecce0c5 100644 --- a/arch/metag/Kconfig +++ b/arch/metag/Kconfig @@ -1,5 +1,6 @@ config METAG def_bool y + select ARCH_HAS_RAW_COPY_USER select EMBEDDED select GENERIC_ATOMIC64 select GENERIC_CLOCKEVENTS diff --git a/arch/metag/include/asm/uaccess.h b/arch/metag/include/asm/uaccess.h index 61805d9..5ebc285 100644 --- a/arch/metag/include/asm/uaccess.h +++ b/arch/metag/include/asm/uaccess.h @@ -174,35 +174,8 @@ extern long __must_check strnlen_user(const char __user *src, long count); extern unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); - -static inline unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long res = n; - if (likely(access_ok(VERIFY_READ, from, n))) - res = raw_copy_from_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; -} - -#define __copy_from_user(to, from, n) raw_copy_from_user(to, from, n) -#define __copy_from_user_inatomic __copy_from_user - -extern unsigned long __must_check __copy_user(void __user *to, - const void *from, - unsigned long n); - -static inline unsigned long copy_to_user(void __user *to, const void *from, - unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_user(to, from, n); - return n; -} - -#define __copy_to_user(to, from, n) __copy_user(to, from, n) -#define __copy_to_user_inatomic __copy_to_user +extern unsigned long raw_copy_to_user(void __user *to, const void *from, + unsigned long n); /* * Zero Userspace diff --git a/arch/metag/lib/usercopy.c b/arch/metag/lib/usercopy.c index 2792fc6..e8a4ea8 100644 --- a/arch/metag/lib/usercopy.c +++ b/arch/metag/lib/usercopy.c @@ -548,8 +548,8 @@ "SUB %1, %1, D0Ar2\n" \ "SUB %3, %3, D1Ar1\n") -unsigned long __copy_user(void __user *pdst, const void *psrc, - unsigned long n) +unsigned long raw_copy_to_user(void __user *pdst, const void *psrc, + unsigned long n) { register char __user *dst asm ("A0.2") = pdst; register const char *src asm ("A1.2") = psrc; @@ -654,7 +654,7 @@ unsigned long __copy_user(void __user *pdst, const void *psrc, */ return retn; } -EXPORT_SYMBOL(__copy_user); +EXPORT_SYMBOL(raw_copy_to_user); #define __asm_copy_from_user_1(to, from, ret) \ __asm_copy_user_cont(to, from, ret, \ -- cgit v1.1 From f0a955f4eeec0f16bdbdd0fb15d8ec0937d1de23 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 10:10:53 -0500 Subject: mips: sanitize __access_ok() for one thing, the last argument is always __access_mask and had been such since 2.4.0-test3pre8; for another, it can bloody well be a static inline - -O2 or -Os, __builtin_constant_p() propagates through static inline calls. Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 21 ++++++--------------- arch/mips/kernel/mips-r2-to-r6-emul.c | 24 ++++++++++++------------ arch/mips/kernel/syscall.c | 2 +- arch/mips/oprofile/backtrace.c | 2 +- 4 files changed, 20 insertions(+), 29 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 70ca8ee..0c59911 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -128,23 +128,14 @@ static inline bool eva_kernel_access(void) * this function, memory access functions may still return -EFAULT. */ -#define __access_mask get_fs().seg - -#define __access_ok(addr, size, mask) \ -({ \ - unsigned long __addr = (unsigned long) (addr); \ - unsigned long __size = size; \ - unsigned long __mask = mask; \ - unsigned long __ok; \ - \ - __chk_user_ptr(addr); \ - __ok = (signed long)(__mask & (__addr | (__addr + __size) | \ - __ua_size(__size))); \ - __ok == 0; \ -}) +static inline int __access_ok(const void __user *p, unsigned long size) +{ + unsigned long addr = (unsigned long)p; + return (get_fs().seg & (addr | (addr + size) | __ua_size(size))) == 0; +} #define access_ok(type, addr, size) \ - likely(__access_ok((addr), (size), __access_mask)) + likely(__access_ok((addr), (size))) /* * put_user: - Write a simple value into user space. diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c index d8f1cf1..550e7d0 100644 --- a/arch/mips/kernel/mips-r2-to-r6-emul.c +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c @@ -1200,7 +1200,7 @@ fpu_emul: case lwl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1273,7 +1273,7 @@ fpu_emul: case lwr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1347,7 +1347,7 @@ fpu_emul: case swl_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1417,7 +1417,7 @@ fpu_emul: case swr_op: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1492,7 +1492,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1611,7 +1611,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1730,7 +1730,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1848,7 +1848,7 @@ fpu_emul: rt = regs->regs[MIPSInst_RT(inst)]; vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGSEGV; break; @@ -1965,7 +1965,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, vaddr, 4)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2021,7 +2021,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, vaddr, 4)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 4)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2084,7 +2084,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_READ, vaddr, 8)) { + if (!access_ok(VERIFY_READ, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; @@ -2145,7 +2145,7 @@ fpu_emul: err = SIGBUS; break; } - if (!access_ok(VERIFY_WRITE, vaddr, 8)) { + if (!access_ok(VERIFY_WRITE, (void __user *)vaddr, 8)) { current->thread.cp0_baduaddr = vaddr; err = SIGBUS; break; diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index f1d17ec..1dfa7f5 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c @@ -98,7 +98,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new) if (unlikely(addr & 3)) return -EINVAL; - if (unlikely(!access_ok(VERIFY_WRITE, addr, 4))) + if (unlikely(!access_ok(VERIFY_WRITE, (const void __user *)addr, 4))) return -EINVAL; if (cpu_has_llsc && R10000_LLSC_WAR) { diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c index 5e645c9..16ace55 100644 --- a/arch/mips/oprofile/backtrace.c +++ b/arch/mips/oprofile/backtrace.c @@ -18,7 +18,7 @@ struct stackframe { static inline int get_mem(unsigned long addr, unsigned long *result) { unsigned long *address = (unsigned long *) addr; - if (!access_ok(VERIFY_READ, addr, sizeof(unsigned long))) + if (!access_ok(VERIFY_READ, address, sizeof(unsigned long))) return -1; if (__copy_from_user_inatomic(result, address, sizeof(unsigned long))) return -3; -- cgit v1.1 From c12a1d7ad916105fc5f7809f9273ea4fe7ce4b28 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 11:40:15 -0400 Subject: mips: consolidate __invoke_... wrappers Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 144 ++++++++++++---------------------------- 1 file changed, 44 insertions(+), 100 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 0c59911..0cd0acc 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -797,8 +797,30 @@ extern void __put_user_unaligned_unknown(void); extern size_t __copy_user(void *__to, const void *__from, size_t __n); -#ifndef CONFIG_EVA -#define __invoke_copy_to_user(to, from, n) \ +#define __invoke_copy_from(func, to, from, n) \ +({ \ + register void *__cu_to_r __asm__("$4"); \ + register const void __user *__cu_from_r __asm__("$5"); \ + register long __cu_len_r __asm__("$6"); \ + \ + __cu_to_r = (to); \ + __cu_from_r = (from); \ + __cu_len_r = (n); \ + __asm__ __volatile__( \ + ".set\tnoreorder\n\t" \ + __MODULE_JAL(func) \ + ".set\tnoat\n\t" \ + __UA_ADDU "\t$1, %1, %2\n\t" \ + ".set\tat\n\t" \ + ".set\treorder" \ + : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ + : \ + : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ + DADDI_SCRATCH, "memory"); \ + __cu_len_r; \ +}) + +#define __invoke_copy_to(func, to, from, n) \ ({ \ register void __user *__cu_to_r __asm__("$4"); \ register const void *__cu_from_r __asm__("$5"); \ @@ -808,7 +830,7 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_from_r = (from); \ __cu_len_r = (n); \ __asm__ __volatile__( \ - __MODULE_JAL(__copy_user) \ + __MODULE_JAL(func) \ : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ : \ : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ @@ -816,8 +838,12 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#ifndef CONFIG_EVA +#define __invoke_copy_to_user(to, from, n) \ + __invoke_copy_to(__copy_user, to, from, n) + #define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to_user(to, from, n) + __invoke_copy_to(__copy_user, to, from, n) #endif @@ -948,64 +974,24 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); #ifndef CONFIG_EVA #define __invoke_copy_from_user(to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) + __invoke_copy_from(__copy_user, to, from, n) #define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from_user(to, from, n) + __invoke_copy_from(__copy_user, to, from, n) /* For userland <-> userland operations */ #define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from_user(to, from, n) + __invoke_copy_from(__copy_user, to, from, n) /* For kernel <-> kernel operations */ #define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from_user(to, from, n) + __invoke_copy_from(__copy_user, to, from, n) #define __invoke_copy_from_user_inatomic(to, from, n) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(__copy_user_inatomic) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) + __invoke_copy_from(__copy_user_inatomic, to, from, n) #define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic, to, from, n) #else @@ -1019,79 +1005,37 @@ extern size_t __copy_to_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); -#define __invoke_copy_from_user_eva_generic(to, from, n, func_ptr) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - ".set\tnoreorder\n\t" \ - __MODULE_JAL(func_ptr) \ - ".set\tnoat\n\t" \ - __UA_ADDU "\t$1, %1, %2\n\t" \ - ".set\tat\n\t" \ - ".set\treorder" \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - -#define __invoke_copy_to_user_eva_generic(to, from, n, func_ptr) \ -({ \ - register void *__cu_to_r __asm__("$4"); \ - register const void __user *__cu_from_r __asm__("$5"); \ - register long __cu_len_r __asm__("$6"); \ - \ - __cu_to_r = (to); \ - __cu_from_r = (from); \ - __cu_len_r = (n); \ - __asm__ __volatile__( \ - __MODULE_JAL(func_ptr) \ - : "+r" (__cu_to_r), "+r" (__cu_from_r), "+r" (__cu_len_r) \ - : \ - : "$8", "$9", "$10", "$11", "$12", "$14", "$15", "$24", "$31", \ - DADDI_SCRATCH, "memory"); \ - __cu_len_r; \ -}) - /* * Source or destination address is in userland. We need to go through * the TLB */ #define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_from_user_eva) + __invoke_copy_from(__copy_from_user_eva, to, from, n) #define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, \ - __copy_user_inatomic_eva) + __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) #define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to_user_eva_generic(to, from, n, __copy_to_user_eva) + __invoke_copy_to(__copy_to_user_eva, to, from, n) #define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_in_user_eva) + __invoke_copy_from(__copy_in_user_eva, to, from, n) /* * Source or destination address in the kernel. We are not going through * the TLB */ #define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_from(__copy_user, to, from, n) #define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user_inatomic) + __invoke_copy_from(__copy_user_inatomic, to, from, n) #define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_to(__copy_user, to, from, n) #define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from_user_eva_generic(to, from, n, __copy_user) + __invoke_copy_from(__copy_user, to, from, n) #endif /* CONFIG_EVA */ -- cgit v1.1 From b0bb945c14e81b3b33075d59469d5bf5b736a4fb Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 11:51:34 -0400 Subject: mips: clean and reorder the forest of macros... Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 120 ++++++++++++++++------------------------ 1 file changed, 49 insertions(+), 71 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 0cd0acc..9c05262 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -838,14 +838,60 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); __cu_len_r; \ }) +#define __invoke_copy_from_kernel(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#define __invoke_copy_from_kernel_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic, to, from, n) + +#define __invoke_copy_to_kernel(to, from, n) \ + __invoke_copy_to(__copy_user, to, from, n) + +#define ___invoke_copy_in_kernel(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + #ifndef CONFIG_EVA +#define __invoke_copy_from_user(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) + +#define __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic, to, from, n) + #define __invoke_copy_to_user(to, from, n) \ __invoke_copy_to(__copy_user, to, from, n) -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from(__copy_user, to, from, n) -#endif +#else + +/* EVA specific functions */ + +extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_from_user_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_to_user_eva(void *__to, const void *__from, + size_t __n); +extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); + +/* + * Source or destination address is in userland. We need to go through + * the TLB + */ +#define __invoke_copy_from_user(to, from, n) \ + __invoke_copy_from(__copy_from_user_eva, to, from, n) + +#define __invoke_copy_from_user_inatomic(to, from, n) \ + __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) + +#define __invoke_copy_to_user(to, from, n) \ + __invoke_copy_to(__copy_to_user_eva, to, from, n) + +#define ___invoke_copy_in_user(to, from, n) \ + __invoke_copy_from(__copy_in_user_eva, to, from, n) + +#endif /* CONFIG_EVA */ /* * __copy_to_user: - Copy a block of data into user space, with less checking. @@ -971,74 +1017,6 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); __cu_len; \ }) -#ifndef CONFIG_EVA - -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -/* For userland <-> userland operations */ -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -/* For kernel <-> kernel operations */ -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - -#else - -/* EVA specific functions */ - -extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_from_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_to_user_eva(void *__to, const void *__from, - size_t __n); -extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); - -/* - * Source or destination address is in userland. We need to go through - * the TLB - */ -#define __invoke_copy_from_user(to, from, n) \ - __invoke_copy_from(__copy_from_user_eva, to, from, n) - -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) - -#define __invoke_copy_to_user(to, from, n) \ - __invoke_copy_to(__copy_to_user_eva, to, from, n) - -#define ___invoke_copy_in_user(to, from, n) \ - __invoke_copy_from(__copy_in_user_eva, to, from, n) - -/* - * Source or destination address in the kernel. We are not going through - * the TLB - */ -#define __invoke_copy_from_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - -#define __invoke_copy_to_kernel(to, from, n) \ - __invoke_copy_to(__copy_user, to, from, n) - -#define ___invoke_copy_in_kernel(to, from, n) \ - __invoke_copy_from(__copy_user, to, from, n) - -#endif /* CONFIG_EVA */ - /* * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. -- cgit v1.1 From ab0aca27c4dfcfa72eb1339ae94edbfbdd751ae8 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 11:54:21 -0400 Subject: mips: make copy_from_user() zero tail explicitly Signed-off-by: Al Viro --- arch/mips/include/asm/uaccess.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) (limited to 'arch') diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 9c05262..712dc40 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -1080,29 +1080,29 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); ({ \ void *__cu_to; \ const void __user *__cu_from; \ - long __cu_len; \ + long __cu_len, __cu_res; \ \ __cu_to = (to); \ __cu_from = (from); \ - __cu_len = (n); \ + __cu_res = __cu_len = (n); \ \ check_object_size(__cu_to, __cu_len, false); \ \ if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_from_kernel(__cu_to, \ + __cu_res = __invoke_copy_from_kernel(__cu_to, \ __cu_from, \ __cu_len); \ } else { \ if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, \ + __cu_res = __invoke_copy_from_user(__cu_to, \ __cu_from, \ __cu_len); \ - } else { \ - memset(__cu_to, 0, __cu_len); \ } \ } \ - __cu_len; \ + if (unlikely(__cu_res)) \ + memset(__cu_to + __cu_len - __cu_res, 0, __cu_res); \ + __cu_res; \ }) #define __copy_in_user(to, from, n) \ -- cgit v1.1 From 1a4fded6d32596053d4f0bc8c49028faf4114a17 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Mon, 20 Mar 2017 12:03:16 -0400 Subject: mips: get rid of tail-zeroing in primitives Signed-off-by: Al Viro --- arch/mips/cavium-octeon/octeon-memcpy.S | 31 +-------------------- arch/mips/include/asm/uaccess.h | 19 ++----------- arch/mips/lib/memcpy.S | 49 --------------------------------- 3 files changed, 3 insertions(+), 96 deletions(-) (limited to 'arch') diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index cfd97f6..0a7c983 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -140,15 +140,6 @@ .set noat /* - * t7 is used as a flag to note inatomic mode. - */ -LEAF(__copy_user_inatomic) -EXPORT_SYMBOL(__copy_user_inatomic) - b __copy_user_common - li t7, 1 - END(__copy_user_inatomic) - -/* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of * the number of uncopied bytes. @@ -161,8 +152,6 @@ EXPORT_SYMBOL(memcpy) __memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) - li t7, 0 /* not inatomic */ -__copy_user_common: /* * Note: dst & src may be unaligned, len may be 0 * Temps @@ -414,25 +403,7 @@ l_exc: LOAD t0, TI_TASK($28) LOAD t0, THREAD_BUADDR(t0) # t0 is just past last good address SUB len, AT, t0 # len number of uncopied bytes - bnez t7, 2f /* Skip the zeroing out part if inatomic */ - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - beqz len, done - SUB src, len, 1 -1: sb zero, 0(dst) - ADD dst, dst, 1 - bnez src, 1b - SUB src, src, 1 -2: jr ra + jr ra nop diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 712dc40..988cd3b 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -841,9 +841,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_kernel(to, from, n) \ __invoke_copy_from(__copy_user, to, from, n) -#define __invoke_copy_from_kernel_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - #define __invoke_copy_to_kernel(to, from, n) \ __invoke_copy_to(__copy_user, to, from, n) @@ -854,9 +851,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user(to, from, n) \ __invoke_copy_from(__copy_user, to, from, n) -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic, to, from, n) - #define __invoke_copy_to_user(to, from, n) \ __invoke_copy_to(__copy_user, to, from, n) @@ -867,8 +861,6 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n); /* EVA specific functions */ -extern size_t __copy_user_inatomic_eva(void *__to, const void *__from, - size_t __n); extern size_t __copy_from_user_eva(void *__to, const void *__from, size_t __n); extern size_t __copy_to_user_eva(void *__to, const void *__from, @@ -882,9 +874,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); #define __invoke_copy_from_user(to, from, n) \ __invoke_copy_from(__copy_from_user_eva, to, from, n) -#define __invoke_copy_from_user_inatomic(to, from, n) \ - __invoke_copy_from(__copy_user_inatomic_eva, to, from, n) - #define __invoke_copy_to_user(to, from, n) \ __invoke_copy_to(__copy_to_user_eva, to, from, n) @@ -930,8 +919,6 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); __cu_len; \ }) -extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); - #define __copy_to_user_inatomic(to, from, n) \ ({ \ void __user *__cu_to; \ @@ -966,12 +953,10 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n); check_object_size(__cu_to, __cu_len, false); \ \ if (eva_kernel_access()) \ - __cu_len = __invoke_copy_from_kernel_inatomic(__cu_to, \ - __cu_from,\ + __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\ __cu_len);\ else \ - __cu_len = __invoke_copy_from_user_inatomic(__cu_to, \ - __cu_from, \ + __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ __cu_len); \ __cu_len; \ }) diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S index c3031f1..3114a2e 100644 --- a/arch/mips/lib/memcpy.S +++ b/arch/mips/lib/memcpy.S @@ -562,39 +562,9 @@ LOADK t0, THREAD_BUADDR(t0) # t0 is just past last good address nop SUB len, AT, t0 # len number of uncopied bytes - bnez t6, .Ldone\@ /* Skip the zeroing part if inatomic */ - /* - * Here's where we rely on src and dst being incremented in tandem, - * See (3) above. - * dst += (fault addr - src) to put dst at first byte to clear - */ - ADD dst, t0 # compute start address in a1 - SUB dst, src - /* - * Clear len bytes starting at dst. Can't call __bzero because it - * might modify len. An inefficient loop for these rare times... - */ - .set reorder /* DADDI_WAR */ - SUB src, len, 1 - beqz len, .Ldone\@ - .set noreorder -1: sb zero, 0(dst) - ADD dst, dst, 1 -#ifndef CONFIG_CPU_DADDI_WORKAROUNDS - bnez src, 1b - SUB src, src, 1 -#else - .set push - .set noat - li v1, 1 - bnez src, 1b - SUB src, src, v1 - .set pop -#endif jr ra nop - #define SEXC(n) \ .set reorder; /* DADDI_WAR */ \ .Ls_exc_p ## n ## u\@: \ @@ -673,15 +643,6 @@ LEAF(__rmemcpy) /* a0=dst a1=src a2=len */ END(__rmemcpy) /* - * t6 is used as a flag to note inatomic mode. - */ -LEAF(__copy_user_inatomic) -EXPORT_SYMBOL(__copy_user_inatomic) - b __copy_user_common - li t6, 1 - END(__copy_user_inatomic) - -/* * A combined memcpy/__copy_user * __copy_user sets len to 0 for success; else to an upper bound of * the number of uncopied bytes. @@ -694,8 +655,6 @@ EXPORT_SYMBOL(memcpy) .L__memcpy: FEXPORT(__copy_user) EXPORT_SYMBOL(__copy_user) - li t6, 0 /* not inatomic */ -__copy_user_common: /* Legacy Mode, user <-> user */ __BUILD_COPY_USER LEGACY_MODE USEROP USEROP @@ -708,20 +667,12 @@ __copy_user_common: * space */ -LEAF(__copy_user_inatomic_eva) -EXPORT_SYMBOL(__copy_user_inatomic_eva) - b __copy_from_user_common - li t6, 1 - END(__copy_user_inatomic_eva) - /* * __copy_from_user (EVA) */ LEAF(__copy_from_user_eva) EXPORT_SYMBOL(__copy_from_user_eva) - li t6, 0 /* not inatomic */ -__copy_from_user_common: __BUILD_COPY_USER EVA_MODE USEROP KERNELOP END(__copy_from_user_eva) -- cgit v1.1 From 2260ea86c0e7623d4e99886638b2b83ef21efa60 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 15:59:26 -0400 Subject: mips: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/mips/Kconfig | 1 + arch/mips/include/asm/uaccess.h | 274 ++++------------------------------------ 2 files changed, 27 insertions(+), 248 deletions(-) (limited to 'arch') diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index a008a9f..aff5633 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig @@ -69,6 +69,7 @@ config MIPS select HAVE_EXIT_THREAD select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_ARCH_HARDENED_USERCOPY + select ARCH_HAS_RAW_COPY_USER menu "Machine selection" diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h index 988cd3b..99e629a 100644 --- a/arch/mips/include/asm/uaccess.h +++ b/arch/mips/include/asm/uaccess.h @@ -882,257 +882,35 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n); #endif /* CONFIG_EVA */ -/* - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define __copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - might_fault(); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ - __cu_len); \ - else \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -#define __copy_to_user_inatomic(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_to_kernel(__cu_to, __cu_from, \ - __cu_len); \ - else \ - __cu_len = __invoke_copy_to_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -#define __copy_from_user_inatomic(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) \ - __cu_len = __invoke_copy_from_kernel(__cu_to, __cu_from,\ - __cu_len);\ - else \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ - __cu_len; \ -}) - -/* - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_from, __cu_len, true); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_to_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - if (access_ok(VERIFY_WRITE, __cu_to, __cu_len)) { \ - might_fault(); \ - __cu_len = __invoke_copy_to_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - __cu_len; \ -}) - -/* - * __copy_from_user: - Copy a block of data from user space, with less checking. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define __copy_from_user(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) { \ - __cu_len = __invoke_copy_from_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - might_fault(); \ - __cu_len = __invoke_copy_from_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + if (eva_kernel_access()) + return __invoke_copy_to_kernel(to, from, n); + else + return __invoke_copy_to_user(to, from, n); +} -/* - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define copy_from_user(to, from, n) \ -({ \ - void *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len, __cu_res; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_res = __cu_len = (n); \ - \ - check_object_size(__cu_to, __cu_len, false); \ - \ - if (eva_kernel_access()) { \ - __cu_res = __invoke_copy_from_kernel(__cu_to, \ - __cu_from, \ - __cu_len); \ - } else { \ - if (access_ok(VERIFY_READ, __cu_from, __cu_len)) { \ - might_fault(); \ - __cu_res = __invoke_copy_from_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - if (unlikely(__cu_res)) \ - memset(__cu_to + __cu_len - __cu_res, 0, __cu_res); \ - __cu_res; \ -}) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + if (eva_kernel_access()) + return __invoke_copy_from_kernel(to, from, n); + else + return __invoke_copy_from_user(to, from, n); +} -#define __copy_in_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - if (eva_kernel_access()) { \ - __cu_len = ___invoke_copy_in_kernel(__cu_to, __cu_from, \ - __cu_len); \ - } else { \ - might_fault(); \ - __cu_len = ___invoke_copy_in_user(__cu_to, __cu_from, \ - __cu_len); \ - } \ - __cu_len; \ -}) +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER -#define copy_in_user(to, from, n) \ -({ \ - void __user *__cu_to; \ - const void __user *__cu_from; \ - long __cu_len; \ - \ - __cu_to = (to); \ - __cu_from = (from); \ - __cu_len = (n); \ - if (eva_kernel_access()) { \ - __cu_len = ___invoke_copy_in_kernel(__cu_to,__cu_from, \ - __cu_len); \ - } else { \ - if (likely(access_ok(VERIFY_READ, __cu_from, __cu_len) &&\ - access_ok(VERIFY_WRITE, __cu_to, __cu_len))) {\ - might_fault(); \ - __cu_len = ___invoke_copy_in_user(__cu_to, \ - __cu_from, \ - __cu_len); \ - } \ - } \ - __cu_len; \ -}) +static inline unsigned long +raw_copy_in_user(void __user*to, const void __user *from, unsigned long n) +{ + if (eva_kernel_access()) + return ___invoke_copy_in_kernel(to, from, n); + else + return ___invoke_copy_in_user(to, from, n); +} extern __kernel_size_t __bzero_kernel(void __user *addr, __kernel_size_t size); extern __kernel_size_t __bzero(void __user *addr, __kernel_size_t size); -- cgit v1.1 From 3448890c32c32c482c3ec20baa8fdd2ab4f94cc0 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 16:35:08 -0400 Subject: powerpc: get rid of zeroing, switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/powerpc/Kconfig | 1 + arch/powerpc/include/asm/uaccess.h | 59 ++++++-------------------------------- arch/powerpc/lib/Makefile | 2 +- arch/powerpc/lib/copy_32.S | 14 --------- arch/powerpc/lib/copyuser_64.S | 35 ++-------------------- arch/powerpc/lib/usercopy_64.c | 41 -------------------------- 6 files changed, 14 insertions(+), 138 deletions(-) delete mode 100644 arch/powerpc/lib/usercopy_64.c (limited to 'arch') diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 97a8bc8..839f088 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -87,6 +87,7 @@ config PPC select ARCH_HAS_DMA_SET_COHERENT_MASK select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_GCOV_PROFILE_ALL + select ARCH_HAS_RAW_COPY_USER select ARCH_HAS_SCALED_CPUTIME if VIRT_CPU_ACCOUNTING_NATIVE select ARCH_HAS_SG_CHAIN select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h index b19883f..5c0d8a8 100644 --- a/arch/powerpc/include/asm/uaccess.h +++ b/arch/powerpc/include/asm/uaccess.h @@ -269,42 +269,19 @@ extern unsigned long __copy_tofrom_user(void __user *to, #ifndef __powerpc64__ -static inline unsigned long copy_from_user(void *to, - const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) { - check_object_size(to, n, false); - return __copy_tofrom_user((__force void __user *)to, from, n); - } - memset(to, 0, n); - return n; -} - -static inline unsigned long copy_to_user(void __user *to, - const void *from, unsigned long n) -{ - if (access_ok(VERIFY_WRITE, to, n)) { - check_object_size(from, n, true); - return __copy_tofrom_user(to, (__force void __user *)from, n); - } - return n; -} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER #else /* __powerpc64__ */ -#define __copy_in_user(to, from, size) \ - __copy_tofrom_user((to), (from), (size)) - -extern unsigned long copy_from_user(void *to, const void __user *from, - unsigned long n); -extern unsigned long copy_to_user(void __user *to, const void *from, - unsigned long n); -extern unsigned long copy_in_user(void __user *to, const void __user *from, - unsigned long n); - +static inline unsigned long +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) +{ + return __copy_tofrom_user(to, from, n); +} #endif /* __powerpc64__ */ -static inline unsigned long __copy_from_user_inatomic(void *to, +static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) { if (__builtin_constant_p(n) && (n <= 8)) { @@ -328,12 +305,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to, return 0; } - check_object_size(to, n, false); - return __copy_tofrom_user((__force void __user *)to, from, n); } -static inline unsigned long __copy_to_user_inatomic(void __user *to, +static inline unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n) { if (__builtin_constant_p(n) && (n <= 8)) { @@ -357,25 +332,9 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to, return 0; } - check_object_size(from, n, true); - return __copy_tofrom_user(to, (__force const void __user *)from, n); } -static inline unsigned long __copy_from_user(void *to, - const void __user *from, unsigned long size) -{ - might_fault(); - return __copy_from_user_inatomic(to, from, size); -} - -static inline unsigned long __copy_to_user(void __user *to, - const void *from, unsigned long size) -{ - might_fault(); - return __copy_to_user_inatomic(to, from, size); -} - extern unsigned long __clear_user(void __user *addr, unsigned long size); static inline unsigned long clear_user(void __user *addr, unsigned long size) diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 2b5e090..ed7dfce 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile @@ -14,7 +14,7 @@ obj-y += string.o alloc.o crtsavres.o code-patching.o \ obj-$(CONFIG_PPC32) += div64.o copy_32.o -obj64-y += copypage_64.o copyuser_64.o usercopy_64.o mem_64.o hweight_64.o \ +obj64-y += copypage_64.o copyuser_64.o mem_64.o hweight_64.o \ copyuser_power7.o string_64.o copypage_power7.o memcpy_power7.o \ memcpy_64.o memcmp_64.o diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index ff0d894..8aedbb5 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S @@ -477,18 +477,6 @@ _GLOBAL(__copy_tofrom_user) bdnz 130b /* then clear out the destination: r3 bytes starting at 4(r6) */ 132: mfctr r3 - srwi. r0,r3,2 - li r9,0 - mtctr r0 - beq 113f -112: stwu r9,4(r6) - bdnz 112b -113: andi. r0,r3,3 - mtctr r0 - beq 120f -114: stb r9,4(r6) - addi r6,r6,1 - bdnz 114b 120: blr EX_TABLE(30b,108b) @@ -497,7 +485,5 @@ _GLOBAL(__copy_tofrom_user) EX_TABLE(41b,111b) EX_TABLE(130b,132b) EX_TABLE(131b,120b) - EX_TABLE(112b,120b) - EX_TABLE(114b,120b) EXPORT_SYMBOL(__copy_tofrom_user) diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S index aee6e24..08da06e 100644 --- a/arch/powerpc/lib/copyuser_64.S +++ b/arch/powerpc/lib/copyuser_64.S @@ -319,32 +319,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) blr /* - * here we have trapped again, need to clear ctr bytes starting at r3 + * here we have trapped again, amount remaining is in ctr. */ -143: mfctr r5 - li r0,0 - mr r4,r3 - mr r3,r5 /* return the number of bytes not copied */ -1: andi. r9,r4,7 - beq 3f -90: stb r0,0(r4) - addic. r5,r5,-1 - addi r4,r4,1 - bne 1b - blr -3: cmpldi cr1,r5,8 - srdi r9,r5,3 - andi. r5,r5,7 - blt cr1,93f - mtctr r9 -91: std r0,0(r4) - addi r4,r4,8 - bdnz 91b -93: beqlr - mtctr r5 -92: stb r0,0(r4) - addi r4,r4,1 - bdnz 92b +143: mfctr r3 blr /* @@ -389,10 +366,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) ld r5,-8(r1) add r6,r6,r5 subf r3,r3,r6 /* #bytes not copied */ -190: -191: -192: - blr /* #bytes not copied in r3 */ + blr EX_TABLE(20b,120b) EX_TABLE(220b,320b) @@ -451,9 +425,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD) EX_TABLE(88b,188b) EX_TABLE(43b,143b) EX_TABLE(89b,189b) - EX_TABLE(90b,190b) - EX_TABLE(91b,191b) - EX_TABLE(92b,192b) /* * Routine to copy a whole page of data, optimized for POWER4. diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c deleted file mode 100644 index 9bd3a3d..0000000 --- a/arch/powerpc/lib/usercopy_64.c +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Functions which are too large to be inlined. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#include -#include - -unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n))) - n = __copy_from_user(to, from, n); - else - memset(to, 0, n); - return n; -} - -unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_WRITE, to, n))) - n = __copy_to_user(to, from, n); - return n; -} - -unsigned long copy_in_user(void __user *to, const void __user *from, - unsigned long n) -{ - might_sleep(); - if (likely(access_ok(VERIFY_READ, from, n) && - access_ok(VERIFY_WRITE, to, n))) - n =__copy_tofrom_user(to, from, n); - return n; -} - -EXPORT_SYMBOL(copy_from_user); -EXPORT_SYMBOL(copy_to_user); -EXPORT_SYMBOL(copy_in_user); - -- cgit v1.1 From 8bec2717265601516bfe129acb4100a3070806ce Mon Sep 17 00:00:00 2001 From: Al Viro Date: Sun, 25 Dec 2016 14:24:35 -0500 Subject: ia64: add extable.h Signed-off-by: Al Viro --- arch/ia64/include/asm/extable.h | 11 +++++++++++ arch/ia64/include/asm/uaccess.h | 8 +------- arch/ia64/mm/extable.c | 5 ++++- 3 files changed, 16 insertions(+), 8 deletions(-) create mode 100644 arch/ia64/include/asm/extable.h (limited to 'arch') diff --git a/arch/ia64/include/asm/extable.h b/arch/ia64/include/asm/extable.h new file mode 100644 index 0000000..20376e7 --- /dev/null +++ b/arch/ia64/include/asm/extable.h @@ -0,0 +1,11 @@ +#ifndef _ASM_IA64_EXTABLE_H +#define _ASM_IA64_EXTABLE_H + +#define ARCH_HAS_RELATIVE_EXTABLE + +struct exception_table_entry { + int insn; /* location-relative address of insn this fixup is for */ + int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ +}; + +#endif diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index d471d1a..3f5ca0b 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -39,6 +39,7 @@ #include #include #include +#include /* * For historical reasons, the following macros are grossly misnamed: @@ -341,13 +342,6 @@ extern unsigned long __strnlen_user (const char __user *, long); __su_ret; \ }) -#define ARCH_HAS_RELATIVE_EXTABLE - -struct exception_table_entry { - int insn; /* location-relative address of insn this fixup is for */ - int fixup; /* location-relative continuation addr.; if bit 2 is set, r9 is set to 0 */ -}; - #define ARCH_HAS_TRANSLATE_MEM_PTR 1 static __inline__ void * xlate_dev_mem_ptr(phys_addr_t p) diff --git a/arch/ia64/mm/extable.c b/arch/ia64/mm/extable.c index 4edb816..10dd4a6 100644 --- a/arch/ia64/mm/extable.c +++ b/arch/ia64/mm/extable.c @@ -5,7 +5,10 @@ * David Mosberger-Tang */ -#include +#include +#include +#include +#include void ia64_handle_exception (struct pt_regs *regs, const struct exception_table_entry *e) -- cgit v1.1 From 1bd5986bbccf1c44bc365f3cd5280350e8aa76bc Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 10:19:22 -0500 Subject: ia64: get rid of 'segment' argument of __{get,put}_user_check() always equal to get_fs() Signed-off-by: Al Viro --- arch/ia64/include/asm/uaccess.h | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 3f5ca0b..4132497 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -76,8 +76,8 @@ * (a) re-use the arguments for side effects (sizeof/typeof is ok) * (b) require any knowledge of processes at this stage */ -#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr)), get_fs()) -#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr)), get_fs()) +#define put_user(x, ptr) __put_user_check((__typeof__(*(ptr))) (x), (ptr), sizeof(*(ptr))) +#define get_user(x, ptr) __get_user_check((x), (ptr), sizeof(*(ptr))) /* * The "__xxx" versions do not do address space checking, useful when @@ -199,7 +199,7 @@ extern void __get_user_unknown (void); }) #define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) -#define __get_user_check(x, ptr, size, segment) __do_get_user(1, x, ptr, size, segment) +#define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size, get_fs()) extern void __put_user_unknown (void); @@ -226,7 +226,7 @@ extern void __put_user_unknown (void); }) #define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) -#define __put_user_check(x, ptr, size, segment) __do_put_user(1, x, ptr, size, segment) +#define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size, get_fs()) /* * Complex access routines -- cgit v1.1 From 11836ecee96e8e1a62d5bd3ddd60eb9ecc01c375 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 10:21:09 -0500 Subject: ia64: get rid of 'segment' argument of __do_{get,put}_user() it's only evaluated if the first argument is not 0, and in those cases it's always equal to get_fs() Signed-off-by: Al Viro --- arch/ia64/include/asm/uaccess.h | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 4132497..24b3934 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -180,13 +180,13 @@ extern void __get_user_unknown (void); * could clobber r8 and r9 (among others). Thus, be careful not to evaluate it while * using r8/r9. */ -#define __do_get_user(check, x, ptr, size, segment) \ +#define __do_get_user(check, x, ptr, size) \ ({ \ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ __typeof__ (size) __gu_size = (size); \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ - if (!check || __access_ok(__gu_ptr, size, segment)) \ + if (!check || __access_ok(__gu_ptr, size, get_fs())) \ switch (__gu_size) { \ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ @@ -198,8 +198,8 @@ extern void __get_user_unknown (void); __gu_err; \ }) -#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size, KERNEL_DS) -#define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size, get_fs()) +#define __get_user_nocheck(x, ptr, size) __do_get_user(0, x, ptr, size) +#define __get_user_check(x, ptr, size) __do_get_user(1, x, ptr, size) extern void __put_user_unknown (void); @@ -207,14 +207,14 @@ extern void __put_user_unknown (void); * Evaluating arguments X, PTR, SIZE, and SEGMENT may involve subroutine-calls, which * could clobber r8 (among others). Thus, be careful not to evaluate them while using r8. */ -#define __do_put_user(check, x, ptr, size, segment) \ +#define __do_put_user(check, x, ptr, size) \ ({ \ __typeof__ (x) __pu_x = (x); \ __typeof__ (*(ptr)) __user *__pu_ptr = (ptr); \ __typeof__ (size) __pu_size = (size); \ long __pu_err = -EFAULT; \ \ - if (!check || __access_ok(__pu_ptr, __pu_size, segment)) \ + if (!check || __access_ok(__pu_ptr, __pu_size, get_fs())) \ switch (__pu_size) { \ case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ @@ -225,8 +225,8 @@ extern void __put_user_unknown (void); __pu_err; \ }) -#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size, KERNEL_DS) -#define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size, get_fs()) +#define __put_user_nocheck(x, ptr, size) __do_put_user(0, x, ptr, size) +#define __put_user_check(x, ptr, size) __do_put_user(1, x, ptr, size) /* * Complex access routines -- cgit v1.1 From 7bb8a503e8fad51e4fb5ed2453b9e23bedd82012 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 27 Dec 2016 10:29:34 -0500 Subject: ia64: sanitize __access_ok() turn into static inline, kill the 'segment' argument. Signed-off-by: Al Viro --- arch/ia64/include/asm/uaccess.h | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 24b3934..22c4e6d 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -59,14 +59,14 @@ * address TASK_SIZE is never valid. We also need to make sure that the address doesn't * point inside the virtually mapped linear page table. */ -#define __access_ok(addr, size, segment) \ -({ \ - __chk_user_ptr(addr); \ - (likely((unsigned long) (addr) <= (segment).seg) \ - && ((segment).seg == KERNEL_DS.seg \ - || likely(REGION_OFFSET((unsigned long) (addr)) < RGN_MAP_LIMIT))); \ -}) -#define access_ok(type, addr, size) __access_ok((addr), (size), get_fs()) +static inline int __access_ok(const void __user *p, unsigned long size) +{ + unsigned long addr = (unsigned long)p; + unsigned long seg = get_fs().seg; + return likely(addr <= seg) && + (seg == KERNEL_DS.seg || likely(REGION_OFFSET(addr) < RGN_MAP_LIMIT)); +} +#define access_ok(type, addr, size) __access_ok((addr), (size)) /* * These are the main single-value transfer routines. They automatically @@ -186,7 +186,7 @@ extern void __get_user_unknown (void); __typeof__ (size) __gu_size = (size); \ long __gu_err = -EFAULT; \ unsigned long __gu_val = 0; \ - if (!check || __access_ok(__gu_ptr, size, get_fs())) \ + if (!check || __access_ok(__gu_ptr, size)) \ switch (__gu_size) { \ case 1: __get_user_size(__gu_val, __gu_ptr, 1, __gu_err); break; \ case 2: __get_user_size(__gu_val, __gu_ptr, 2, __gu_err); break; \ @@ -214,7 +214,7 @@ extern void __put_user_unknown (void); __typeof__ (size) __pu_size = (size); \ long __pu_err = -EFAULT; \ \ - if (!check || __access_ok(__pu_ptr, __pu_size, get_fs())) \ + if (!check || __access_ok(__pu_ptr, __pu_size)) \ switch (__pu_size) { \ case 1: __put_user_size(__pu_x, __pu_ptr, 1, __pu_err); break; \ case 2: __put_user_size(__pu_x, __pu_ptr, 2, __pu_err); break; \ @@ -258,7 +258,7 @@ __copy_from_user (void *to, const void __user *from, unsigned long count) const void *__cu_from = (from); \ long __cu_len = (n); \ \ - if (__access_ok(__cu_to, __cu_len, get_fs())) { \ + if (__access_ok(__cu_to, __cu_len)) { \ check_object_size(__cu_from, __cu_len, true); \ __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ } \ @@ -269,7 +269,7 @@ static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) { check_object_size(to, n, false); - if (likely(__access_ok(from, n, get_fs()))) + if (likely(__access_ok(from, n))) n = __copy_user((__force void __user *) to, from, n); else memset(to, 0, n); @@ -293,7 +293,7 @@ extern unsigned long __do_clear_user (void __user *, unsigned long); #define clear_user(to, n) \ ({ \ unsigned long __cu_len = (n); \ - if (__access_ok(to, __cu_len, get_fs())) \ + if (__access_ok(to, __cu_len)) \ __cu_len = __do_clear_user(to, __cu_len); \ __cu_len; \ }) @@ -309,7 +309,7 @@ extern long __must_check __strncpy_from_user (char *to, const char __user *from, ({ \ const char __user * __sfu_from = (from); \ long __sfu_ret = -EFAULT; \ - if (__access_ok(__sfu_from, 0, get_fs())) \ + if (__access_ok(__sfu_from, 0)) \ __sfu_ret = __strncpy_from_user((to), __sfu_from, (n)); \ __sfu_ret; \ }) @@ -321,7 +321,7 @@ extern unsigned long __strlen_user (const char __user *); ({ \ const char __user *__su_str = (str); \ unsigned long __su_ret = 0; \ - if (__access_ok(__su_str, 0, get_fs())) \ + if (__access_ok(__su_str, 0)) \ __su_ret = __strlen_user(__su_str); \ __su_ret; \ }) @@ -337,7 +337,7 @@ extern unsigned long __strnlen_user (const char __user *, long); ({ \ const char __user *__su_str = (str); \ unsigned long __su_ret = 0; \ - if (__access_ok(__su_str, 0, get_fs())) \ + if (__access_ok(__su_str, 0)) \ __su_ret = __strnlen_user(__su_str, len); \ __su_ret; \ }) -- cgit v1.1 From 652c1aaca2a026d374d139058e8e7f3175e861d6 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 17:29:42 -0400 Subject: ia64: get rid of copy_in_user() it hadn't been biarch for years Signed-off-by: Al Viro --- arch/ia64/include/asm/uaccess.h | 10 ---------- 1 file changed, 10 deletions(-) (limited to 'arch') diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index 22c4e6d..bd15972 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -276,16 +276,6 @@ copy_from_user(void *to, const void __user *from, unsigned long n) return n; } -#define __copy_in_user(to, from, size) __copy_user((to), (from), (size)) - -static inline unsigned long -copy_in_user (void __user *to, const void __user *from, unsigned long n) -{ - if (likely(access_ok(VERIFY_READ, from, n) && access_ok(VERIFY_WRITE, to, n))) - n = __copy_user(to, from, n); - return n; -} - extern unsigned long __do_clear_user (void __user *, unsigned long); #define __clear_user(to, n) __do_clear_user(to, n) -- cgit v1.1 From b3622d3217eb351717b157d64ce0895e662b7657 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Wed, 5 Apr 2017 18:56:55 -0400 Subject: get rid of padding, switch to RAW_COPY_USER Merced is fucked, so what else is new? Signed-off-by: Al Viro --- arch/ia64/Kconfig | 1 + arch/ia64/include/asm/uaccess.h | 35 ++++------------------------------- arch/ia64/lib/memcpy_mck.S | 13 +------------ 3 files changed, 6 insertions(+), 43 deletions(-) (limited to 'arch') diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index 18ca6a9..0ed0e44 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig @@ -53,6 +53,7 @@ config IA64 select ARCH_USE_CMPXCHG_LOCKREF select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_HARDENED_USERCOPY + select ARCH_HAS_RAW_COPY_USER default y help The Itanium Processor Family is Intel's 64-bit successor to diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h index bd15972..82a7646 100644 --- a/arch/ia64/include/asm/uaccess.h +++ b/arch/ia64/include/asm/uaccess.h @@ -235,46 +235,19 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use unsigned long count); static inline unsigned long -__copy_to_user (void __user *to, const void *from, unsigned long count) +raw_copy_to_user(void __user *to, const void *from, unsigned long count) { - check_object_size(from, count, true); - return __copy_user(to, (__force void __user *) from, count); } static inline unsigned long -__copy_from_user (void *to, const void __user *from, unsigned long count) +raw_copy_from_user(void *to, const void __user *from, unsigned long count) { - check_object_size(to, count, false); - return __copy_user((__force void __user *) to, from, count); } -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user -#define copy_to_user(to, from, n) \ -({ \ - void __user *__cu_to = (to); \ - const void *__cu_from = (from); \ - long __cu_len = (n); \ - \ - if (__access_ok(__cu_to, __cu_len)) { \ - check_object_size(__cu_from, __cu_len, true); \ - __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ - } \ - __cu_len; \ -}) - -static inline unsigned long -copy_from_user(void *to, const void __user *from, unsigned long n) -{ - check_object_size(to, n, false); - if (likely(__access_ok(from, n))) - n = __copy_user((__force void __user *) to, from, n); - else - memset(to, 0, n); - return n; -} +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER extern unsigned long __do_clear_user (void __user *, unsigned long); diff --git a/arch/ia64/lib/memcpy_mck.S b/arch/ia64/lib/memcpy_mck.S index b264b6a..bbbadc4 100644 --- a/arch/ia64/lib/memcpy_mck.S +++ b/arch/ia64/lib/memcpy_mck.S @@ -556,9 +556,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ #define D r22 #define F r28 -#define memset_arg0 r32 -#define memset_arg2 r33 - #define saved_retval loc0 #define saved_rtlink loc1 #define saved_pfs_stack loc2 @@ -622,7 +619,7 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ * (faulting_addr - orig_dst) -> len to faulting st address * B = (cur_dst - orig_dst) -> len copied so far * C = A - B -> len need to be copied - * D = orig_len - A -> len need to be zeroed + * D = orig_len - A -> len need to be left along */ (p6) sub A = F, saved_in0 (p7) sub A = F, saved_in1 @@ -638,9 +635,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ sub D = saved_in2, A ;; cmp.gt p8,p0=C,r0 // more than 1 byte? - add memset_arg0=saved_in0, A -(p6) mov memset_arg2=0 // copy_to_user should not call memset -(p7) mov memset_arg2=D // copy_from_user need to have kbuf zeroed mov r8=0 mov saved_retval = D mov saved_rtlink = b0 @@ -652,11 +646,6 @@ EK(.ex_handler, (p17) st8 [dst1]=r39,8); \ ;; add saved_retval=saved_retval,r8 // above might return non-zero value - cmp.gt p8,p0=memset_arg2,r0 // more than 1 byte? - mov out0=memset_arg0 // *s - mov out1=r0 // c - mov out2=memset_arg2 // n -(p8) br.call.sptk.few b0=memset ;; mov retval=saved_retval -- cgit v1.1 From d491afb8654e84f972ed00cb0e087ad2fae5a1ac Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 12:10:12 -0400 Subject: microblaze: switch to RAW_COPY_USER [kudos to Piotr Sroka for spotting a braino in the previous variant] Signed-off-by: Al Viro --- arch/microblaze/Kconfig | 1 + arch/microblaze/include/asm/uaccess.h | 36 ++++++++--------------------------- 2 files changed, 9 insertions(+), 28 deletions(-) (limited to 'arch') diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index 85885a5..1aff365 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig @@ -34,6 +34,7 @@ config MICROBLAZE select TRACING_SUPPORT select VIRT_TO_BUS select CPU_NO_EFFICIENT_FFS + select ARCH_HAS_RAW_COPY_USER config SWAP def_bool n diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h index 2c39375..38f2c9c 100644 --- a/arch/microblaze/include/asm/uaccess.h +++ b/arch/microblaze/include/asm/uaccess.h @@ -336,39 +336,19 @@ extern long __user_bad(void); __gu_err; \ }) - -/* copy_to_from_user */ -#define __copy_from_user(to, from, n) \ - __copy_tofrom_user((__force void __user *)(to), \ - (void __user *)(from), (n)) -#define __copy_from_user_inatomic(to, from, n) \ - __copy_from_user((to), (from), (n)) - -static inline long copy_from_user(void *to, - const void __user *from, unsigned long n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { - unsigned long res = n; - might_fault(); - if (likely(access_ok(VERIFY_READ, from, n))) - res = __copy_from_user(to, from, n); - if (unlikely(res)) - memset(to + (n - res), 0, res); - return res; + return __copy_tofrom_user((__force void __user *)to, from, n); } -#define __copy_to_user(to, from, n) \ - __copy_tofrom_user((void __user *)(to), \ - (__force const void __user *)(from), (n)) -#define __copy_to_user_inatomic(to, from, n) __copy_to_user((to), (from), (n)) - -static inline long copy_to_user(void __user *to, - const void *from, unsigned long n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { - might_fault(); - if (access_ok(VERIFY_WRITE, to, n)) - return __copy_to_user(to, from, n); - return n; + return __copy_tofrom_user(to, (__force const void __user *)from, n); } +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER /* * Copy a null terminated string from userspace. -- cgit v1.1 From ac4691fac8ad9bbb72268b6e4c73a168e670a196 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 28 Mar 2017 01:28:09 -0400 Subject: hexagon: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/hexagon/Kconfig | 1 + arch/hexagon/include/asm/uaccess.h | 23 ++++++++++------------- arch/hexagon/kernel/hexagon_ksyms.c | 4 ++-- arch/hexagon/mm/copy_from_user.S | 2 +- arch/hexagon/mm/copy_to_user.S | 2 +- 5 files changed, 15 insertions(+), 17 deletions(-) (limited to 'arch') diff --git a/arch/hexagon/Kconfig b/arch/hexagon/Kconfig index 1941e4b..0c536a8 100644 --- a/arch/hexagon/Kconfig +++ b/arch/hexagon/Kconfig @@ -26,6 +26,7 @@ config HEXAGON select GENERIC_CLOCKEVENTS_BROADCAST select MODULES_USE_ELF_RELA select GENERIC_CPU_DEVICES + select ARCH_HAS_RAW_COPY_USER ---help--- Qualcomm Hexagon is a processor architecture designed for high performance and low power across a wide variety of applications. diff --git a/arch/hexagon/include/asm/uaccess.h b/arch/hexagon/include/asm/uaccess.h index 3a7f818..458b698 100644 --- a/arch/hexagon/include/asm/uaccess.h +++ b/arch/hexagon/include/asm/uaccess.h @@ -65,19 +65,12 @@ */ /* Assembly somewhat optimized copy routines */ -unsigned long __copy_from_user_hexagon(void *to, const void __user *from, +unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n); -unsigned long __copy_to_user_hexagon(void __user *to, const void *from, +unsigned long raw_copy_to_user(void __user *to, const void *from, unsigned long n); - -#define __copy_from_user(to, from, n) __copy_from_user_hexagon(to, from, n) -#define __copy_to_user(to, from, n) __copy_to_user_hexagon(to, from, n) - -/* - * XXX todo: some additonal performance gain is possible by - * implementing __copy_to/from_user_inatomic, which is much - * like __copy_to/from_user, but performs slightly less checking. - */ +#define INLINE_COPY_FROM_USER +#define INLINE_COPY_TO_USER __kernel_size_t __clear_user_hexagon(void __user *dest, unsigned long count); #define __clear_user(a, s) __clear_user_hexagon((a), (s)) @@ -104,10 +97,14 @@ static inline long hexagon_strncpy_from_user(char *dst, const char __user *src, return -EFAULT; if (res > n) { - copy_from_user(dst, src, n); + long left = raw_copy_from_user(dst, src, n); + if (unlikely(left)) + memset(dst + (n - left), 0, left); return n; } else { - copy_from_user(dst, src, res); + long left = raw_copy_from_user(dst, src, res); + if (unlikely(left)) + memset(dst + (res - left), 0, left); return res-1; } } diff --git a/arch/hexagon/kernel/hexagon_ksyms.c b/arch/hexagon/kernel/hexagon_ksyms.c index af9dec4..00bcad9 100644 --- a/arch/hexagon/kernel/hexagon_ksyms.c +++ b/arch/hexagon/kernel/hexagon_ksyms.c @@ -25,8 +25,8 @@ /* Additional functions */ EXPORT_SYMBOL(__clear_user_hexagon); -EXPORT_SYMBOL(__copy_from_user_hexagon); -EXPORT_SYMBOL(__copy_to_user_hexagon); +EXPORT_SYMBOL(raw_copy_from_user); +EXPORT_SYMBOL(raw_copy_to_user); EXPORT_SYMBOL(__iounmap); EXPORT_SYMBOL(__strnlen_user); EXPORT_SYMBOL(__vmgetie); diff --git a/arch/hexagon/mm/copy_from_user.S b/arch/hexagon/mm/copy_from_user.S index 7fc94f3..7da066f 100644 --- a/arch/hexagon/mm/copy_from_user.S +++ b/arch/hexagon/mm/copy_from_user.S @@ -44,7 +44,7 @@ #define bytes r2 #define loopcount r5 -#define FUNCNAME __copy_from_user_hexagon +#define FUNCNAME raw_copy_from_user #include "copy_user_template.S" /* LOAD FAULTS from COPY_FROM_USER */ diff --git a/arch/hexagon/mm/copy_to_user.S b/arch/hexagon/mm/copy_to_user.S index 0cfbcc0..a7b7f8d 100644 --- a/arch/hexagon/mm/copy_to_user.S +++ b/arch/hexagon/mm/copy_to_user.S @@ -43,7 +43,7 @@ #define bytes r2 #define loopcount r5 -#define FUNCNAME __copy_to_user_hexagon +#define FUNCNAME raw_copy_to_user #include "copy_user_template.S" /* STORE FAULTS from COPY_TO_USER */ -- cgit v1.1 From 9a677341cd8de01a9eaf2c24a4b735939e507943 Mon Sep 17 00:00:00 2001 From: Al Viro Date: Tue, 21 Mar 2017 12:02:25 -0400 Subject: m32r: switch to RAW_COPY_USER Signed-off-by: Al Viro --- arch/m32r/Kconfig | 1 + arch/m32r/include/asm/uaccess.h | 98 +++-------------------------------------- arch/m32r/kernel/m32r_ksyms.c | 2 - arch/m32r/lib/usercopy.c | 22 --------- 4 files changed, 8 insertions(+), 115 deletions(-) (limited to 'arch') diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 9547446..b3e82bd 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig @@ -19,6 +19,7 @@ config M32R select HAVE_DEBUG_STACKOVERFLOW select CPU_NO_EFFICIENT_FFS select DMA_NOOP_OPS + select ARCH_HAS_RAW_COPY_USER config SBUS bool diff --git a/arch/m32r/include/asm/uaccess.h b/arch/m32r/include/asm/uaccess.h index d5c5e68..07be349 100644 --- a/arch/m32r/include/asm/uaccess.h +++ b/arch/m32r/include/asm/uaccess.h @@ -13,6 +13,7 @@ */ #include #include +#include /* * The fs value determines whether argument validity checking should be @@ -463,107 +464,22 @@ do { \ /* We let the __ versions of copy_from/to_user inline, because they're often * used in fast paths and have only a small space overhead. */ -static inline unsigned long __generic_copy_from_user_nocheck(void *to, - const void __user *from, unsigned long n) +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) { + prefetchw(to); __copy_user(to, from, n); return n; } -static inline unsigned long __generic_copy_to_user_nocheck(void __user *to, - const void *from, unsigned long n) +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) { + prefetch(from); __copy_user(to, from, n); return n; } -unsigned long __generic_copy_to_user(void __user *, const void *, unsigned long); -unsigned long __generic_copy_from_user(void *, const void __user *, unsigned long); - -/** - * __copy_to_user: - Copy a block of data into user space, with less checking. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define __copy_to_user(to, from, n) \ - __generic_copy_to_user_nocheck((to), (from), (n)) - -#define __copy_to_user_inatomic __copy_to_user -#define __copy_from_user_inatomic __copy_from_user - -/** - * copy_to_user: - Copy a block of data into user space. - * @to: Destination address, in user space. - * @from: Source address, in kernel space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from kernel space to user space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - */ -#define copy_to_user(to, from, n) \ -({ \ - might_fault(); \ - __generic_copy_to_user((to), (from), (n)); \ -}) - -/** - * __copy_from_user: - Copy a block of data from user space, with less checking. * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. Caller must check - * the specified block with access_ok() before calling this function. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define __copy_from_user(to, from, n) \ - __generic_copy_from_user_nocheck((to), (from), (n)) - -/** - * copy_from_user: - Copy a block of data from user space. - * @to: Destination address, in kernel space. - * @from: Source address, in user space. - * @n: Number of bytes to copy. - * - * Context: User context only. This function may sleep if pagefaults are - * enabled. - * - * Copy data from user space to kernel space. - * - * Returns number of bytes that could not be copied. - * On success, this will be zero. - * - * If some data could not be copied, this function will pad the copied - * data to the requested size using zero bytes. - */ -#define copy_from_user(to, from, n) \ -({ \ - might_fault(); \ - __generic_copy_from_user((to), (from), (n)); \ -}) - long __must_check strncpy_from_user(char *dst, const char __user *src, long count); long __must_check __strncpy_from_user(char *dst, diff --git a/arch/m32r/kernel/m32r_ksyms.c b/arch/m32r/kernel/m32r_ksyms.c index d763f0b..a4d43b5 100644 --- a/arch/m32r/kernel/m32r_ksyms.c +++ b/arch/m32r/kernel/m32r_ksyms.c @@ -26,8 +26,6 @@ EXPORT_SYMBOL(strncpy_from_user); EXPORT_SYMBOL(__strncpy_from_user); EXPORT_SYMBOL(clear_user); EXPORT_SYMBOL(__clear_user); -EXPORT_SYMBOL(__generic_copy_from_user); -EXPORT_SYMBOL(__generic_copy_to_user); EXPORT_SYMBOL(strnlen_user); #ifdef CONFIG_SMP diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c index 6aacf5b..b3ef2c8 100644 --- a/arch/m32r/lib/usercopy.c +++ b/arch/m32r/lib/usercopy.c @@ -11,28 +11,6 @@ #include #include -unsigned long -__generic_copy_to_user(void __user *to, const void *from, unsigned long n) -{ - prefetch(from); - if (access_ok(VERIFY_WRITE, to, n)) - __copy_user(to,from,n); - return n; -} - -unsigned long -__generic_copy_from_user(void *to, const void __user *from, unsigned long n) -{ - unsigned long ret = n; - prefetchw(to); - if (access_ok(VERIFY_READ, from, n)) - ret = __copy_user(to,from,n); - if (unlikely(ret)) - memset(to + n - ret, 0, ret); - return ret; -} - - /* * Copy a null terminated string from userspace. */ -- cgit v1.1