diff options
author | Dave Airlie <airlied@redhat.com> | 2015-01-22 10:44:41 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2015-01-22 10:44:41 +1000 |
commit | 281d1bbd34b734e4f22b30b6f3b673dda46a7470 (patch) | |
tree | f67a2d45d248e9deaa1c68d3f8f33cdf476daacd | |
parent | bfa55bd4990815b055162b6d5d031f37a39942a5 (diff) | |
parent | b942c653ae265abbd31032f3b4f5f857e5c7c723 (diff) | |
download | op-kernel-dev-281d1bbd34b734e4f22b30b6f3b673dda46a7470.zip op-kernel-dev-281d1bbd34b734e4f22b30b6f3b673dda46a7470.tar.gz |
Merge remote-tracking branch 'origin/master' into drm-next
Backmerge Linus tree after rc5 + drm-fixes went in.
There were a few amdkfd conflicts I wanted to avoid,
and Ben requested this for nouveau also.
Conflicts:
drivers/gpu/drm/amd/amdkfd/Makefile
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/include/kgd_kfd_interface.h
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/radeon/radeon_kfd.c
551 files changed, 5301 insertions, 2821 deletions
@@ -51,6 +51,7 @@ Greg Kroah-Hartman <gregkh@suse.de> Greg Kroah-Hartman <greg@kroah.com> Henk Vergonet <Henk.Vergonet@gmail.com> Henrik Kretzschmar <henne@nachtwindheim.de> +Henrik Rydberg <rydberg@bitmath.org> Herbert Xu <herbert@gondor.apana.org.au> Jacob Shin <Jacob.Shin@amd.com> James Bottomley <jejb@mulgrave.(none)> diff --git a/Documentation/ABI/testing/sysfs-class-mei b/Documentation/ABI/testing/sysfs-class-mei index 0ec8b81..80d9888 100644 --- a/Documentation/ABI/testing/sysfs-class-mei +++ b/Documentation/ABI/testing/sysfs-class-mei @@ -14,3 +14,18 @@ Description: The /sys/class/mei/meiN directory is created for each probed mei device +What: /sys/class/mei/meiN/fw_status +Date: Nov 2014 +KernelVersion: 3.19 +Contact: Tomas Winkler <tomas.winkler@intel.com> +Description: Display fw status registers content + + The ME FW writes its status information into fw status + registers for BIOS and OS to monitor fw health. + + The register contains running state, power management + state, error codes, and others. The way the registers + are decoded depends on PCH or SoC generation. + Also number of registers varies between 1 and 6 + depending on generation. + diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt index 0328088..24c5cda 100644 --- a/Documentation/devicetree/bindings/net/davinci_emac.txt +++ b/Documentation/devicetree/bindings/net/davinci_emac.txt @@ -4,7 +4,8 @@ This file provides information, what the device node for the davinci_emac interface contains. Required properties: -- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac" +- compatible: "ti,davinci-dm6467-emac", "ti,am3517-emac" or + "ti,dm816-emac" - reg: Offset and length of the register set for the device - ti,davinci-ctrl-reg-offset: offset to control register - ti,davinci-ctrl-mod-reg-offset: offset to control module register diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4df73da..176d4fe 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1277,6 +1277,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. i8042.notimeout [HW] Ignore timeout condition signalled by controller i8042.reset [HW] Reset the controller during init and cleanup i8042.unlock [HW] Unlock (ignore) the keylock + i8042.kbdreset [HW] Reset device connected to KBD port i810= [HW,DRM] diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt index 9bffdfc..85b0221 100644 --- a/Documentation/networking/ip-sysctl.txt +++ b/Documentation/networking/ip-sysctl.txt @@ -66,6 +66,8 @@ fwmark_reflect - BOOLEAN route/max_size - INTEGER Maximum number of routes allowed in the kernel. Increase this when using large numbers of interfaces and/or routes. + From linux kernel 3.6 onwards, this is deprecated for ipv4 + as route cache is no longer used. neigh/default/gc_thresh1 - INTEGER Minimum number of entries to keep. Garbage collector will not diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py index 230ce71..2b47704 100755 --- a/Documentation/target/tcm_mod_builder.py +++ b/Documentation/target/tcm_mod_builder.py @@ -389,9 +389,6 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " .release_cmd = " + fabric_mod_name + "_release_cmd,\n" buf += " .shutdown_session = " + fabric_mod_name + "_shutdown_session,\n" buf += " .close_session = " + fabric_mod_name + "_close_session,\n" - buf += " .stop_session = " + fabric_mod_name + "_stop_session,\n" - buf += " .fall_back_to_erl0 = " + fabric_mod_name + "_reset_nexus,\n" - buf += " .sess_logged_in = " + fabric_mod_name + "_sess_logged_in,\n" buf += " .sess_get_index = " + fabric_mod_name + "_sess_get_index,\n" buf += " .sess_get_initiator_sid = NULL,\n" buf += " .write_pending = " + fabric_mod_name + "_write_pending,\n" @@ -402,7 +399,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " .queue_data_in = " + fabric_mod_name + "_queue_data_in,\n" buf += " .queue_status = " + fabric_mod_name + "_queue_status,\n" buf += " .queue_tm_rsp = " + fabric_mod_name + "_queue_tm_rsp,\n" - buf += " .is_state_remove = " + fabric_mod_name + "_is_state_remove,\n" + buf += " .aborted_task = " + fabric_mod_name + "_aborted_task,\n" buf += " /*\n" buf += " * Setup function pointers for generic logic in target_core_fabric_configfs.c\n" buf += " */\n" @@ -428,7 +425,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += " /*\n" buf += " * Register the top level struct config_item_type with TCM core\n" buf += " */\n" - buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name[4:] + "\");\n" + buf += " fabric = target_fabric_configfs_init(THIS_MODULE, \"" + fabric_mod_name + "\");\n" buf += " if (IS_ERR(fabric)) {\n" buf += " printk(KERN_ERR \"target_fabric_configfs_init() failed\\n\");\n" buf += " return PTR_ERR(fabric);\n" @@ -595,7 +592,7 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): if re.search('get_fabric_name', fo): buf += "char *" + fabric_mod_name + "_get_fabric_name(void)\n" buf += "{\n" - buf += " return \"" + fabric_mod_name[4:] + "\";\n" + buf += " return \"" + fabric_mod_name + "\";\n" buf += "}\n\n" bufi += "char *" + fabric_mod_name + "_get_fabric_name(void);\n" continue @@ -820,27 +817,6 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): buf += "}\n\n" bufi += "void " + fabric_mod_name + "_close_session(struct se_session *);\n" - if re.search('stop_session\)\(', fo): - buf += "void " + fabric_mod_name + "_stop_session(struct se_session *se_sess, int sess_sleep , int conn_sleep)\n" - buf += "{\n" - buf += " return;\n" - buf += "}\n\n" - bufi += "void " + fabric_mod_name + "_stop_session(struct se_session *, int, int);\n" - - if re.search('fall_back_to_erl0\)\(', fo): - buf += "void " + fabric_mod_name + "_reset_nexus(struct se_session *se_sess)\n" - buf += "{\n" - buf += " return;\n" - buf += "}\n\n" - bufi += "void " + fabric_mod_name + "_reset_nexus(struct se_session *);\n" - - if re.search('sess_logged_in\)\(', fo): - buf += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *se_sess)\n" - buf += "{\n" - buf += " return 0;\n" - buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_sess_logged_in(struct se_session *);\n" - if re.search('sess_get_index\)\(', fo): buf += "u32 " + fabric_mod_name + "_sess_get_index(struct se_session *se_sess)\n" buf += "{\n" @@ -898,19 +874,18 @@ def tcm_mod_dump_fabric_ops(proto_ident, fabric_mod_dir_var, fabric_mod_name): bufi += "int " + fabric_mod_name + "_queue_status(struct se_cmd *);\n" if re.search('queue_tm_rsp\)\(', fo): - buf += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" + buf += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *se_cmd)\n" buf += "{\n" - buf += " return 0;\n" + buf += " return;\n" buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" + bufi += "void " + fabric_mod_name + "_queue_tm_rsp(struct se_cmd *);\n" - if re.search('is_state_remove\)\(', fo): - buf += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *se_cmd)\n" + if re.search('aborted_task\)\(', fo): + buf += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *se_cmd)\n" buf += "{\n" - buf += " return 0;\n" + buf += " return;\n" buf += "}\n\n" - bufi += "int " + fabric_mod_name + "_is_state_remove(struct se_cmd *);\n" - + bufi += "void " + fabric_mod_name + "_aborted_task(struct se_cmd *);\n" ret = p.write(buf) if ret: @@ -1018,11 +993,11 @@ def main(modname, proto_ident): tcm_mod_build_kbuild(fabric_mod_dir, fabric_mod_name) tcm_mod_build_kconfig(fabric_mod_dir, fabric_mod_name) - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Makefile..? [yes,no]: ") + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Makefile..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kbuild(tcm_dir, fabric_mod_name) - input = raw_input("Would you like to add " + fabric_mod_name + "to drivers/target/Kconfig..? [yes,no]: ") + input = raw_input("Would you like to add " + fabric_mod_name + " to drivers/target/Kconfig..? [yes,no]: ") if input == "yes" or input == "y": tcm_mod_add_kconfig(tcm_dir, fabric_mod_name) diff --git a/Documentation/thermal/cpu-cooling-api.txt b/Documentation/thermal/cpu-cooling-api.txt index fca24c9..753e47c 100644 --- a/Documentation/thermal/cpu-cooling-api.txt +++ b/Documentation/thermal/cpu-cooling-api.txt @@ -3,7 +3,7 @@ CPU cooling APIs How To Written by Amit Daniel Kachhap <amit.kachhap@linaro.org> -Updated: 12 May 2012 +Updated: 6 Jan 2015 Copyright (c) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com) @@ -25,7 +25,18 @@ the user. The registration APIs returns the cooling device pointer. clip_cpus: cpumask of cpus where the frequency constraints will happen. -1.1.2 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) +1.1.2 struct thermal_cooling_device *of_cpufreq_cooling_register( + struct device_node *np, const struct cpumask *clip_cpus) + + This interface function registers the cpufreq cooling device with + the name "thermal-cpufreq-%x" linking it with a device tree node, in + order to bind it via the thermal DT code. This api can support multiple + instances of cpufreq cooling devices. + + np: pointer to the cooling device device tree node + clip_cpus: cpumask of cpus where the frequency constraints will happen. + +1.1.3 void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev) This interface function unregisters the "thermal-cpufreq-%x" cooling device. diff --git a/MAINTAINERS b/MAINTAINERS index 23fa31d..aa97dff 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -726,15 +726,15 @@ F: include/uapi/linux/apm_bios.h F: drivers/char/apm-emulation.c APPLE BCM5974 MULTITOUCH DRIVER -M: Henrik Rydberg <rydberg@euromail.se> +M: Henrik Rydberg <rydberg@bitmath.org> L: linux-input@vger.kernel.org -S: Maintained +S: Odd fixes F: drivers/input/mouse/bcm5974.c APPLE SMC DRIVER -M: Henrik Rydberg <rydberg@euromail.se> +M: Henrik Rydberg <rydberg@bitmath.org> L: lm-sensors@lm-sensors.org -S: Maintained +S: Odd fixes F: drivers/hwmon/applesmc.c APPLETALK NETWORK LAYER @@ -756,13 +756,6 @@ L: linux-media@vger.kernel.org S: Maintained F: drivers/media/i2c/aptina-pll.* -ARASAN COMPACT FLASH PATA CONTROLLER -M: Viresh Kumar <viresh.linux@gmail.com> -L: linux-ide@vger.kernel.org -S: Maintained -F: include/linux/pata_arasan_cf_data.h -F: drivers/ata/pata_arasan_cf.c - ARC FRAMEBUFFER DRIVER M: Jaya Kumar <jayalk@intworks.biz> S: Maintained @@ -2261,6 +2254,7 @@ F: drivers/gpio/gpio-bt8xx.c BTRFS FILE SYSTEM M: Chris Mason <clm@fb.com> M: Josef Bacik <jbacik@fb.com> +M: David Sterba <dsterba@suse.cz> L: linux-btrfs@vger.kernel.org W: http://btrfs.wiki.kernel.org/ Q: http://patchwork.kernel.org/project/linux-btrfs/list/ @@ -2347,7 +2341,8 @@ CAN NETWORK LAYER M: Oliver Hartkopp <socketcan@hartkopp.net> L: linux-can@vger.kernel.org W: http://gitorious.org/linux-can -T: git git://gitorious.org/linux-can/linux-can-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained F: Documentation/networking/can.txt F: net/can/ @@ -2362,7 +2357,8 @@ M: Wolfgang Grandegger <wg@grandegger.com> M: Marc Kleine-Budde <mkl@pengutronix.de> L: linux-can@vger.kernel.org W: http://gitorious.org/linux-can -T: git git://gitorious.org/linux-can/linux-can-next.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can.git +T: git git://git.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can-next.git S: Maintained F: drivers/net/can/ F: include/linux/can/dev.h @@ -3184,7 +3180,7 @@ L: dmaengine@vger.kernel.org Q: https://patchwork.kernel.org/project/linux-dmaengine/list/ S: Maintained F: drivers/dma/ -F: include/linux/dma* +F: include/linux/dmaengine.h F: Documentation/dmaengine/ T: git git://git.infradead.org/users/vkoul/slave-dma.git @@ -4750,7 +4746,7 @@ S: Supported F: drivers/scsi/ipr.* IBM Power Virtual Ethernet Device Driver -M: Santiago Leon <santil@linux.vnet.ibm.com> +M: Thomas Falcon <tlfalcon@linux.vnet.ibm.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmveth.* @@ -4931,7 +4927,6 @@ F: include/uapi/linux/inotify.h INPUT (KEYBOARD, MOUSE, JOYSTICK, TOUCHSCREEN) DRIVERS M: Dmitry Torokhov <dmitry.torokhov@gmail.com> -M: Dmitry Torokhov <dtor@mail.ru> L: linux-input@vger.kernel.org Q: http://patchwork.kernel.org/project/linux-input/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input.git @@ -4942,10 +4937,10 @@ F: include/uapi/linux/input.h F: include/linux/input/ INPUT MULTITOUCH (MT) PROTOCOL -M: Henrik Rydberg <rydberg@euromail.se> +M: Henrik Rydberg <rydberg@bitmath.org> L: linux-input@vger.kernel.org T: git git://git.kernel.org/pub/scm/linux/kernel/git/rydberg/input-mt.git -S: Maintained +S: Odd fixes F: Documentation/input/multi-touch-protocol.txt F: drivers/input/input-mt.c K: \b(ABS|SYN)_MT_ @@ -5281,6 +5276,15 @@ W: www.open-iscsi.org Q: http://patchwork.kernel.org/project/linux-rdma/list/ F: drivers/infiniband/ulp/iser/ +ISCSI EXTENSIONS FOR RDMA (ISER) TARGET +M: Sagi Grimberg <sagig@mellanox.com> +T: git git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending.git master +L: linux-rdma@vger.kernel.org +L: target-devel@vger.kernel.org +S: Supported +W: http://www.linux-iscsi.org +F: drivers/infiniband/ulp/isert + ISDN SUBSYSTEM M: Karsten Keil <isdn@linux-pingi.de> L: isdn4linux@listserv.isdn4linux.de (subscribers-only) @@ -5695,6 +5699,49 @@ F: drivers/lguest/ F: include/linux/lguest*.h F: tools/lguest/ +LIBATA SUBSYSTEM (Serial and Parallel ATA drivers) +M: Tejun Heo <tj@kernel.org> +L: linux-ide@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +S: Maintained +F: drivers/ata/ +F: include/linux/ata.h +F: include/linux/libata.h + +LIBATA PATA ARASAN COMPACT FLASH CONTROLLER +M: Viresh Kumar <viresh.linux@gmail.com> +L: linux-ide@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +S: Maintained +F: include/linux/pata_arasan_cf_data.h +F: drivers/ata/pata_arasan_cf.c + +LIBATA PATA DRIVERS +M: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> +M: Tejun Heo <tj@kernel.org> +L: linux-ide@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +S: Maintained +F: drivers/ata/pata_*.c +F: drivers/ata/ata_generic.c + +LIBATA SATA AHCI PLATFORM devices support +M: Hans de Goede <hdegoede@redhat.com> +M: Tejun Heo <tj@kernel.org> +L: linux-ide@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +S: Maintained +F: drivers/ata/ahci_platform.c +F: drivers/ata/libahci_platform.c +F: include/linux/ahci_platform.h + +LIBATA SATA PROMISE TX2/TX4 CONTROLLER DRIVER +M: Mikael Pettersson <mikpelinux@gmail.com> +L: linux-ide@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git +S: Maintained +F: drivers/ata/sata_promise.* + LIBLOCKDEP M: Sasha Levin <sasha.levin@oracle.com> S: Maintained @@ -7401,6 +7448,7 @@ F: drivers/crypto/picoxcell* PIN CONTROL SUBSYSTEM M: Linus Walleij <linus.walleij@linaro.org> L: linux-gpio@vger.kernel.org +T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-pinctrl.git S: Maintained F: drivers/pinctrl/ F: include/linux/pinctrl/ @@ -7568,12 +7616,6 @@ W: http://wireless.kernel.org/en/users/Drivers/p54 S: Obsolete F: drivers/net/wireless/prism54/ -PROMISE SATA TX2/TX4 CONTROLLER LIBATA DRIVER -M: Mikael Pettersson <mikpelinux@gmail.com> -L: linux-ide@vger.kernel.org -S: Maintained -F: drivers/ata/sata_promise.* - PS3 NETWORK SUPPORT M: Geoff Levand <geoff@infradead.org> L: netdev@vger.kernel.org @@ -7739,8 +7781,7 @@ F: Documentation/scsi/LICENSE.qla2xxx F: drivers/scsi/qla2xxx/ QLOGIC QLA4XXX iSCSI DRIVER -M: Vikas Chaudhary <vikas.chaudhary@qlogic.com> -M: iscsi-driver@qlogic.com +M: QLogic-Storage-Upstream@qlogic.com L: linux-scsi@vger.kernel.org S: Supported F: Documentation/scsi/LICENSE.qla4xxx @@ -8548,25 +8589,6 @@ S: Maintained F: drivers/misc/phantom.c F: include/uapi/linux/phantom.h -SERIAL ATA (SATA) SUBSYSTEM -M: Tejun Heo <tj@kernel.org> -L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git -S: Supported -F: drivers/ata/ -F: include/linux/ata.h -F: include/linux/libata.h - -SERIAL ATA AHCI PLATFORM devices support -M: Hans de Goede <hdegoede@redhat.com> -M: Tejun Heo <tj@kernel.org> -L: linux-ide@vger.kernel.org -T: git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git -S: Supported -F: drivers/ata/ahci_platform.c -F: drivers/ata/libahci_platform.c -F: include/linux/ahci_platform.h - SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER M: Jayamohan Kallickal <jayamohan.kallickal@emulex.com> L: linux-scsi@vger.kernel.org @@ -9535,7 +9557,8 @@ F: drivers/platform/x86/thinkpad_acpi.c TI BANDGAP AND THERMAL DRIVER M: Eduardo Valentin <edubezval@gmail.com> L: linux-pm@vger.kernel.org -S: Supported +L: linux-omap@vger.kernel.org +S: Maintained F: drivers/thermal/ti-soc-thermal/ TI CLOCK DRIVER @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 19 SUBLEVEL = 0 -EXTRAVERSION = -rc3 +EXTRAVERSION = -rc5 NAME = Diseased Newt # *DOCUMENTATION* @@ -391,6 +391,7 @@ USERINCLUDE := \ # Needed to be compatible with the O= option LINUXINCLUDE := \ -I$(srctree)/arch/$(hdr-arch)/include \ + -Iarch/$(hdr-arch)/include/generated/uapi \ -Iarch/$(hdr-arch)/include/generated \ $(if $(KBUILD_SRC), -I$(srctree)/include) \ -Iinclude \ diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi index 1467750..e8c6c60 100644 --- a/arch/arm/boot/dts/at91sam9263.dtsi +++ b/arch/arm/boot/dts/at91sam9263.dtsi @@ -953,6 +953,8 @@ interrupts = <26 IRQ_TYPE_LEVEL_HIGH 3>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fb>; + clocks = <&lcd_clk>, <&lcd_clk>; + clock-names = "lcdc_clk", "hclk"; status = "disabled"; }; diff --git a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts index 28e7e20..a98ac1b 100644 --- a/arch/arm/boot/dts/berlin2q-marvell-dmp.dts +++ b/arch/arm/boot/dts/berlin2q-marvell-dmp.dts @@ -65,6 +65,8 @@ }; &sdhci2 { + broken-cd; + bus-width = <8>; non-removable; status = "okay"; }; diff --git a/arch/arm/boot/dts/berlin2q.dtsi b/arch/arm/boot/dts/berlin2q.dtsi index 35253c9..e2f61f2 100644 --- a/arch/arm/boot/dts/berlin2q.dtsi +++ b/arch/arm/boot/dts/berlin2q.dtsi @@ -83,7 +83,8 @@ compatible = "mrvl,pxav3-mmc"; reg = <0xab1000 0x200>; interrupts = <GIC_SPI 28 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&chip CLKID_SDIO1XIN>; + clocks = <&chip CLKID_NFC_ECC>, <&chip CLKID_NFC>; + clock-names = "io", "core"; status = "disabled"; }; @@ -348,36 +349,6 @@ interrupt-parent = <&gic>; interrupts = <GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>; }; - - gpio4: gpio@5000 { - compatible = "snps,dw-apb-gpio"; - reg = <0x5000 0x400>; - #address-cells = <1>; - #size-cells = <0>; - - porte: gpio-port@4 { - compatible = "snps,dw-apb-gpio-port"; - gpio-controller; - #gpio-cells = <2>; - snps,nr-gpios = <32>; - reg = <0>; - }; - }; - - gpio5: gpio@c000 { - compatible = "snps,dw-apb-gpio"; - reg = <0xc000 0x400>; - #address-cells = <1>; - #size-cells = <0>; - - portf: gpio-port@5 { - compatible = "snps,dw-apb-gpio-port"; - gpio-controller; - #gpio-cells = <2>; - snps,nr-gpios = <32>; - reg = <0>; - }; - }; }; chip: chip-control@ea0000 { @@ -466,6 +437,21 @@ ranges = <0 0xfc0000 0x10000>; interrupt-parent = <&sic>; + sm_gpio1: gpio@5000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x5000 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + portf: gpio-port@5 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <32>; + reg = <0>; + }; + }; + i2c2: i2c@7000 { compatible = "snps,designware-i2c"; #address-cells = <1>; @@ -516,6 +502,21 @@ status = "disabled"; }; + sm_gpio0: gpio@c000 { + compatible = "snps,dw-apb-gpio"; + reg = <0xc000 0x400>; + #address-cells = <1>; + #size-cells = <0>; + + porte: gpio-port@4 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + snps,nr-gpios = <32>; + reg = <0>; + }; + }; + sysctrl: pin-controller@d000 { compatible = "marvell,berlin2q-system-ctrl"; reg = <0xd000 0x100>; diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts index 10b725c..ad4118f 100644 --- a/arch/arm/boot/dts/dra7-evm.dts +++ b/arch/arm/boot/dts/dra7-evm.dts @@ -499,23 +499,23 @@ }; partition@5 { label = "QSPI.u-boot-spl-os"; - reg = <0x00140000 0x00010000>; + reg = <0x00140000 0x00080000>; }; partition@6 { label = "QSPI.u-boot-env"; - reg = <0x00150000 0x00010000>; + reg = <0x001c0000 0x00010000>; }; partition@7 { label = "QSPI.u-boot-env.backup1"; - reg = <0x00160000 0x0010000>; + reg = <0x001d0000 0x0010000>; }; partition@8 { label = "QSPI.kernel"; - reg = <0x00170000 0x0800000>; + reg = <0x001e0000 0x0800000>; }; partition@9 { label = "QSPI.file-system"; - reg = <0x00970000 0x01690000>; + reg = <0x009e0000 0x01620000>; }; }; }; diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi index 0a229fc..d75c89d 100644 --- a/arch/arm/boot/dts/exynos5250.dtsi +++ b/arch/arm/boot/dts/exynos5250.dtsi @@ -736,7 +736,7 @@ dp_phy: video-phy@10040720 { compatible = "samsung,exynos5250-dp-video-phy"; - reg = <0x10040720 4>; + samsung,pmu-syscon = <&pmu_system_controller>; #phy-cells = <0>; }; diff --git a/arch/arm/boot/dts/exynos5420-arndale-octa.dts b/arch/arm/boot/dts/exynos5420-arndale-octa.dts index aa7a7d7..db2c1c4 100644 --- a/arch/arm/boot/dts/exynos5420-arndale-octa.dts +++ b/arch/arm/boot/dts/exynos5420-arndale-octa.dts @@ -372,3 +372,7 @@ &usbdrd_dwc3_1 { dr_mode = "host"; }; + +&cci { + status = "disabled"; +}; diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi index 517e50f..6d38f8b 100644 --- a/arch/arm/boot/dts/exynos5420.dtsi +++ b/arch/arm/boot/dts/exynos5420.dtsi @@ -120,7 +120,7 @@ }; }; - cci@10d20000 { + cci: cci@10d20000 { compatible = "arm,cci-400"; #address-cells = <1>; #size-cells = <1>; @@ -503,8 +503,8 @@ }; dp_phy: video-phy@10040728 { - compatible = "samsung,exynos5250-dp-video-phy"; - reg = <0x10040728 4>; + compatible = "samsung,exynos5420-dp-video-phy"; + samsung,pmu-syscon = <&pmu_system_controller>; #phy-cells = <0>; }; diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi index 58d3c3c..d238676 100644 --- a/arch/arm/boot/dts/imx25.dtsi +++ b/arch/arm/boot/dts/imx25.dtsi @@ -162,7 +162,7 @@ #size-cells = <0>; compatible = "fsl,imx25-cspi", "fsl,imx35-cspi"; reg = <0x43fa4000 0x4000>; - clocks = <&clks 62>, <&clks 62>; + clocks = <&clks 78>, <&clks 78>; clock-names = "ipg", "per"; interrupts = <14>; status = "disabled"; diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts index 56569ce..649befe 100644 --- a/arch/arm/boot/dts/imx51-babbage.dts +++ b/arch/arm/boot/dts/imx51-babbage.dts @@ -127,24 +127,12 @@ #address-cells = <1>; #size-cells = <0>; - reg_usbh1_vbus: regulator@0 { - compatible = "regulator-fixed"; - pinctrl-names = "default"; - pinctrl-0 = <&pinctrl_usbh1reg>; - reg = <0>; - regulator-name = "usbh1_vbus"; - regulator-min-microvolt = <5000000>; - regulator-max-microvolt = <5000000>; - gpio = <&gpio2 5 GPIO_ACTIVE_HIGH>; - enable-active-high; - }; - - reg_usbotg_vbus: regulator@1 { + reg_hub_reset: regulator@0 { compatible = "regulator-fixed"; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usbotgreg>; - reg = <1>; - regulator-name = "usbotg_vbus"; + reg = <0>; + regulator-name = "hub_reset"; regulator-min-microvolt = <5000000>; regulator-max-microvolt = <5000000>; gpio = <&gpio1 7 GPIO_ACTIVE_HIGH>; @@ -176,6 +164,7 @@ reg = <0>; clocks = <&clks IMX5_CLK_DUMMY>; clock-names = "main_clk"; + reset-gpios = <&gpio2 5 GPIO_ACTIVE_LOW>; }; }; }; @@ -419,7 +408,7 @@ &usbh1 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usbh1>; - vbus-supply = <®_usbh1_vbus>; + vbus-supply = <®_hub_reset>; fsl,usbphy = <&usbh1phy>; phy_type = "ulpi"; status = "okay"; @@ -429,7 +418,6 @@ dr_mode = "otg"; disable-over-current; phy_type = "utmi_wide"; - vbus-supply = <®_usbotg_vbus>; status = "okay"; }; diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi index 4fc03b7..2109d07 100644 --- a/arch/arm/boot/dts/imx6qdl.dtsi +++ b/arch/arm/boot/dts/imx6qdl.dtsi @@ -335,8 +335,8 @@ vpu: vpu@02040000 { compatible = "cnm,coda960"; reg = <0x02040000 0x3c000>; - interrupts = <0 3 IRQ_TYPE_LEVEL_HIGH>, - <0 12 IRQ_TYPE_LEVEL_HIGH>; + interrupts = <0 12 IRQ_TYPE_LEVEL_HIGH>, + <0 3 IRQ_TYPE_LEVEL_HIGH>; interrupt-names = "bit", "jpeg"; clocks = <&clks IMX6QDL_CLK_VPU_AXI>, <&clks IMX6QDL_CLK_MMDC_CH0_AXI>, diff --git a/arch/arm/boot/dts/imx6sx-sdb.dts b/arch/arm/boot/dts/imx6sx-sdb.dts index 1e6e5cc..8c1febd 100644 --- a/arch/arm/boot/dts/imx6sx-sdb.dts +++ b/arch/arm/boot/dts/imx6sx-sdb.dts @@ -159,13 +159,28 @@ pinctrl-0 = <&pinctrl_enet1>; phy-supply = <®_enet_3v3>; phy-mode = "rgmii"; + phy-handle = <ðphy1>; status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy1: ethernet-phy@0 { + reg = <0>; + }; + + ethphy2: ethernet-phy@1 { + reg = <1>; + }; + }; }; &fec2 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_enet2>; phy-mode = "rgmii"; + phy-handle = <ðphy2>; status = "okay"; }; diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi index 657da14..c70bb27 100644 --- a/arch/arm/boot/dts/ls1021a.dtsi +++ b/arch/arm/boot/dts/ls1021a.dtsi @@ -142,6 +142,7 @@ scfg: scfg@1570000 { compatible = "fsl,ls1021a-scfg", "syscon"; reg = <0x0 0x1570000 0x0 0x10000>; + big-endian; }; clockgen: clocking@1ee1000 { diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts index 53f3ca0..b550c41 100644 --- a/arch/arm/boot/dts/omap3-n900.dts +++ b/arch/arm/boot/dts/omap3-n900.dts @@ -700,11 +700,9 @@ }; }; + /* Ethernet is on some early development boards and qemu */ ethernet@gpmc { compatible = "smsc,lan91c94"; - - status = "disabled"; - interrupt-parent = <&gpio2>; interrupts = <22 IRQ_TYPE_LEVEL_HIGH>; /* gpio54 */ reg = <1 0x300 0xf>; /* 16 byte IO range at offset 0x300 */ diff --git a/arch/arm/boot/dts/rk3288-evb.dtsi b/arch/arm/boot/dts/rk3288-evb.dtsi index 3e067dd..6194d67 100644 --- a/arch/arm/boot/dts/rk3288-evb.dtsi +++ b/arch/arm/boot/dts/rk3288-evb.dtsi @@ -155,6 +155,15 @@ }; &pinctrl { + pcfg_pull_none_drv_8ma: pcfg-pull-none-drv-8ma { + drive-strength = <8>; + }; + + pcfg_pull_up_drv_8ma: pcfg-pull-up-drv-8ma { + bias-pull-up; + drive-strength = <8>; + }; + backlight { bl_en: bl-en { rockchip,pins = <7 2 RK_FUNC_GPIO &pcfg_pull_none>; @@ -173,6 +182,27 @@ }; }; + sdmmc { + /* + * Default drive strength isn't enough to achieve even + * high-speed mode on EVB board so bump up to 8ma. + */ + sdmmc_bus4: sdmmc-bus4 { + rockchip,pins = <6 16 RK_FUNC_1 &pcfg_pull_up_drv_8ma>, + <6 17 RK_FUNC_1 &pcfg_pull_up_drv_8ma>, + <6 18 RK_FUNC_1 &pcfg_pull_up_drv_8ma>, + <6 19 RK_FUNC_1 &pcfg_pull_up_drv_8ma>; + }; + + sdmmc_clk: sdmmc-clk { + rockchip,pins = <6 20 RK_FUNC_1 &pcfg_pull_none_drv_8ma>; + }; + + sdmmc_cmd: sdmmc-cmd { + rockchip,pins = <6 21 RK_FUNC_1 &pcfg_pull_up_drv_8ma>; + }; + }; + usb { host_vbus_drv: host-vbus-drv { rockchip,pins = <0 14 RK_FUNC_GPIO &pcfg_pull_none>; diff --git a/arch/arm/boot/dts/sama5d3xmb.dtsi b/arch/arm/boot/dts/sama5d3xmb.dtsi index 49c10d3..77e0365 100644 --- a/arch/arm/boot/dts/sama5d3xmb.dtsi +++ b/arch/arm/boot/dts/sama5d3xmb.dtsi @@ -176,7 +176,7 @@ "Headphone Jack", "HPOUTR", "IN2L", "Line In Jack", "IN2R", "Line In Jack", - "MICBIAS", "IN1L", + "Mic", "MICBIAS", "IN1L", "Mic"; atmel,ssc-controller = <&ssc0>; diff --git a/arch/arm/boot/dts/sama5d4.dtsi b/arch/arm/boot/dts/sama5d4.dtsi index 1b0f30c..b94995d 100644 --- a/arch/arm/boot/dts/sama5d4.dtsi +++ b/arch/arm/boot/dts/sama5d4.dtsi @@ -1008,7 +1008,7 @@ pit: timer@fc068630 { compatible = "atmel,at91sam9260-pit"; - reg = <0xfc068630 0xf>; + reg = <0xfc068630 0x10>; interrupts = <3 IRQ_TYPE_LEVEL_HIGH 5>; clocks = <&h32ck>; }; diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts index a8c00ee..3d0b875 100644 --- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts +++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts @@ -25,11 +25,11 @@ stmpe2401_1 { stmpe2401_1_nhk_mode: stmpe2401_1_nhk { nhk_cfg1 { - ste,pins = "GPIO76_B20"; // IRQ line + pins = "GPIO76_B20"; // IRQ line ste,input = <0>; }; nhk_cfg2 { - ste,pins = "GPIO77_B8"; // reset line + pins = "GPIO77_B8"; // reset line ste,output = <1>; }; }; @@ -37,11 +37,11 @@ stmpe2401_2 { stmpe2401_2_nhk_mode: stmpe2401_2_nhk { nhk_cfg1 { - ste,pins = "GPIO78_A8"; // IRQ line + pins = "GPIO78_A8"; // IRQ line ste,input = <0>; }; nhk_cfg2 { - ste,pins = "GPIO79_C9"; // reset line + pins = "GPIO79_C9"; // reset line ste,output = <1>; }; }; diff --git a/arch/arm/boot/dts/vf610-twr.dts b/arch/arm/boot/dts/vf610-twr.dts index a0f7621..f2b64b1 100644 --- a/arch/arm/boot/dts/vf610-twr.dts +++ b/arch/arm/boot/dts/vf610-twr.dts @@ -129,13 +129,28 @@ &fec0 { phy-mode = "rmii"; + phy-handle = <ðphy0>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec0>; status = "okay"; + + mdio { + #address-cells = <1>; + #size-cells = <0>; + + ethphy0: ethernet-phy@0 { + reg = <0>; + }; + + ethphy1: ethernet-phy@1 { + reg = <1>; + }; + }; }; &fec1 { phy-mode = "rmii"; + phy-handle = <ðphy1>; pinctrl-names = "default"; pinctrl-0 = <&pinctrl_fec1>; status = "okay"; diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig index 5ef14de..3d0c5d6 100644 --- a/arch/arm/configs/exynos_defconfig +++ b/arch/arm/configs/exynos_defconfig @@ -84,7 +84,8 @@ CONFIG_DEBUG_GPIO=y CONFIG_POWER_SUPPLY=y CONFIG_BATTERY_SBS=y CONFIG_CHARGER_TPS65090=y -# CONFIG_HWMON is not set +CONFIG_HWMON=y +CONFIG_SENSORS_LM90=y CONFIG_THERMAL=y CONFIG_EXYNOS_THERMAL=y CONFIG_EXYNOS_THERMAL_CORE=y @@ -109,11 +110,26 @@ CONFIG_REGULATOR_S2MPA01=y CONFIG_REGULATOR_S2MPS11=y CONFIG_REGULATOR_S5M8767=y CONFIG_REGULATOR_TPS65090=y +CONFIG_DRM=y +CONFIG_DRM_BRIDGE=y +CONFIG_DRM_PTN3460=y +CONFIG_DRM_PS8622=y +CONFIG_DRM_EXYNOS=y +CONFIG_DRM_EXYNOS_FIMD=y +CONFIG_DRM_EXYNOS_DP=y +CONFIG_DRM_PANEL=y +CONFIG_DRM_PANEL_SIMPLE=y CONFIG_FB=y CONFIG_FB_MODE_HELPERS=y CONFIG_FB_SIMPLE=y CONFIG_EXYNOS_VIDEO=y CONFIG_EXYNOS_MIPI_DSI=y +CONFIG_BACKLIGHT_LCD_SUPPORT=y +CONFIG_LCD_CLASS_DEVICE=y +CONFIG_LCD_PLATFORM=y +CONFIG_BACKLIGHT_CLASS_DEVICE=y +CONFIG_BACKLIGHT_GENERIC=y +CONFIG_BACKLIGHT_PWM=y CONFIG_FRAMEBUFFER_CONSOLE=y CONFIG_FONTS=y CONFIG_FONT_7x14=y diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index c2c3a85..667d9d5 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig @@ -68,7 +68,7 @@ CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y CONFIG_CPU_FREQ_GOV_POWERSAVE=y CONFIG_CPU_FREQ_GOV_USERSPACE=y CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -CONFIG_GENERIC_CPUFREQ_CPU0=y +CONFIG_CPUFREQ_DT=y # CONFIG_ARM_OMAP2PLUS_CPUFREQ is not set CONFIG_CPU_IDLE=y CONFIG_BINFMT_MISC=y diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h index 705bb76..0c3f5a0 100644 --- a/arch/arm/include/uapi/asm/unistd.h +++ b/arch/arm/include/uapi/asm/unistd.h @@ -413,6 +413,7 @@ #define __NR_getrandom (__NR_SYSCALL_BASE+384) #define __NR_memfd_create (__NR_SYSCALL_BASE+385) #define __NR_bpf (__NR_SYSCALL_BASE+386) +#define __NR_execveat (__NR_SYSCALL_BASE+387) /* * The following SWIs are ARM private. diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S index e51833f..05745eb 100644 --- a/arch/arm/kernel/calls.S +++ b/arch/arm/kernel/calls.S @@ -396,6 +396,7 @@ CALL(sys_getrandom) /* 385 */ CALL(sys_memfd_create) CALL(sys_bpf) + CALL(sys_execveat) #ifndef syscalls_counted .equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls #define syscalls_counted diff --git a/arch/arm/kernel/perf_regs.c b/arch/arm/kernel/perf_regs.c index 6e4379c..592dda3 100644 --- a/arch/arm/kernel/perf_regs.c +++ b/arch/arm/kernel/perf_regs.c @@ -28,3 +28,11 @@ u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/arm/mach-at91/board-dt-sama5.c b/arch/arm/mach-at91/board-dt-sama5.c index 8fb9ef5..97f7367 100644 --- a/arch/arm/mach-at91/board-dt-sama5.c +++ b/arch/arm/mach-at91/board-dt-sama5.c @@ -17,6 +17,7 @@ #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/clk-provider.h> +#include <linux/phy.h> #include <asm/setup.h> #include <asm/irq.h> @@ -26,8 +27,25 @@ #include "generic.h" +static int ksz8081_phy_fixup(struct phy_device *phy) +{ + int value; + + value = phy_read(phy, 0x16); + value &= ~0x20; + phy_write(phy, 0x16, value); + + return 0; +} + static void __init sama5_dt_device_init(void) { + if (of_machine_is_compatible("atmel,sama5d4ek") && + IS_ENABLED(CONFIG_PHYLIB)) { + phy_register_fixup_for_id("fc028000.etherne:00", + ksz8081_phy_fixup); + } + of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); } diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c index 5951660..2daef61 100644 --- a/arch/arm/mach-imx/clk-imx6q.c +++ b/arch/arm/mach-imx/clk-imx6q.c @@ -144,7 +144,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node) post_div_table[1].div = 1; post_div_table[2].div = 1; video_div_table[1].div = 1; - video_div_table[2].div = 1; + video_div_table[3].div = 1; } clk[IMX6QDL_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", base + 0x00, 14, 2, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels)); diff --git a/arch/arm/mach-imx/clk-imx6sx.c b/arch/arm/mach-imx/clk-imx6sx.c index 17354a1..5a3e5a1 100644 --- a/arch/arm/mach-imx/clk-imx6sx.c +++ b/arch/arm/mach-imx/clk-imx6sx.c @@ -558,6 +558,9 @@ static void __init imx6sx_clocks_init(struct device_node *ccm_node) clk_set_parent(clks[IMX6SX_CLK_GPU_CORE_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); clk_set_parent(clks[IMX6SX_CLK_GPU_AXI_SEL], clks[IMX6SX_CLK_PLL3_PFD0]); + clk_set_parent(clks[IMX6SX_CLK_QSPI1_SEL], clks[IMX6SX_CLK_PLL2_BUS]); + clk_set_parent(clks[IMX6SX_CLK_QSPI2_SEL], clks[IMX6SX_CLK_PLL2_BUS]); + /* Set initial power mode */ imx6q_set_lpm(WAIT_CLOCKED); } diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c index 608079a..b61c049 100644 --- a/arch/arm/mach-omap2/board-generic.c +++ b/arch/arm/mach-omap2/board-generic.c @@ -77,6 +77,24 @@ MACHINE_END #endif #ifdef CONFIG_ARCH_OMAP3 +/* Some boards need board name for legacy userspace in /proc/cpuinfo */ +static const char *const n900_boards_compat[] __initconst = { + "nokia,omap3-n900", + NULL, +}; + +DT_MACHINE_START(OMAP3_N900_DT, "Nokia RX-51 board") + .reserve = omap_reserve, + .map_io = omap3_map_io, + .init_early = omap3430_init_early, + .init_machine = omap_generic_init, + .init_late = omap3_init_late, + .init_time = omap3_sync32k_timer_init, + .dt_compat = n900_boards_compat, + .restart = omap3xxx_restart, +MACHINE_END + +/* Generic omap3 boards, most boards can use these */ static const char *const omap3_boards_compat[] __initconst = { "ti,omap3430", "ti,omap3", diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h index 377eea8..db57741 100644 --- a/arch/arm/mach-omap2/common.h +++ b/arch/arm/mach-omap2/common.h @@ -249,6 +249,7 @@ extern void omap4_cpu_die(unsigned int cpu); extern struct smp_operations omap4_smp_ops; extern void omap5_secondary_startup(void); +extern void omap5_secondary_hyp_startup(void); #endif #if defined(CONFIG_SMP) && defined(CONFIG_PM) diff --git a/arch/arm/mach-omap2/control.h b/arch/arm/mach-omap2/control.h index a3c0133..a80ac2d 100644 --- a/arch/arm/mach-omap2/control.h +++ b/arch/arm/mach-omap2/control.h @@ -286,6 +286,10 @@ #define OMAP5XXX_CONTROL_STATUS 0x134 #define OMAP5_DEVICETYPE_MASK (0x7 << 6) +/* DRA7XX CONTROL CORE BOOTSTRAP */ +#define DRA7_CTRL_CORE_BOOTSTRAP 0x6c4 +#define DRA7_SPEEDSELECT_MASK (0x3 << 8) + /* * REVISIT: This list of registers is not comprehensive - there are more * that should be added. diff --git a/arch/arm/mach-omap2/omap-headsmp.S b/arch/arm/mach-omap2/omap-headsmp.S index 4993d4b..6d1dffc 100644 --- a/arch/arm/mach-omap2/omap-headsmp.S +++ b/arch/arm/mach-omap2/omap-headsmp.S @@ -22,6 +22,7 @@ /* Physical address needed since MMU not enabled yet on secondary core */ #define AUX_CORE_BOOT0_PA 0x48281800 +#define API_HYP_ENTRY 0x102 /* * OMAP5 specific entry point for secondary CPU to jump from ROM @@ -41,6 +42,26 @@ wait: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 b secondary_startup ENDPROC(omap5_secondary_startup) /* + * Same as omap5_secondary_startup except we call into the ROM to + * enable HYP mode first. This is called instead of + * omap5_secondary_startup if the primary CPU was put into HYP mode by + * the boot loader. + */ +ENTRY(omap5_secondary_hyp_startup) +wait_2: ldr r2, =AUX_CORE_BOOT0_PA @ read from AuxCoreBoot0 + ldr r0, [r2] + mov r0, r0, lsr #5 + mrc p15, 0, r4, c0, c0, 5 + and r4, r4, #0x0f + cmp r0, r4 + bne wait_2 + ldr r12, =API_HYP_ENTRY + adr r0, hyp_boot + smc #0 +hyp_boot: + b secondary_startup +ENDPROC(omap5_secondary_hyp_startup) +/* * OMAP4 specific entry point for secondary CPU to jump from ROM * code. This routine also provides a holding flag into which * secondary core is held until we're ready for it to initialise. diff --git a/arch/arm/mach-omap2/omap-smp.c b/arch/arm/mach-omap2/omap-smp.c index 256e84e..5305ec7 100644 --- a/arch/arm/mach-omap2/omap-smp.c +++ b/arch/arm/mach-omap2/omap-smp.c @@ -22,6 +22,7 @@ #include <linux/irqchip/arm-gic.h> #include <asm/smp_scu.h> +#include <asm/virt.h> #include "omap-secure.h" #include "omap-wakeupgen.h" @@ -227,8 +228,16 @@ static void __init omap4_smp_prepare_cpus(unsigned int max_cpus) if (omap_secure_apis_support()) omap_auxcoreboot_addr(virt_to_phys(startup_addr)); else - writel_relaxed(virt_to_phys(omap5_secondary_startup), - base + OMAP_AUX_CORE_BOOT_1); + /* + * If the boot CPU is in HYP mode then start secondary + * CPU in HYP mode as well. + */ + if ((__boot_cpu_mode & MODE_MASK) == HYP_MODE) + writel_relaxed(virt_to_phys(omap5_secondary_hyp_startup), + base + OMAP_AUX_CORE_BOOT_1); + else + writel_relaxed(virt_to_phys(omap5_secondary_startup), + base + OMAP_AUX_CORE_BOOT_1); } diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c index 4f61148..7d45c84 100644 --- a/arch/arm/mach-omap2/timer.c +++ b/arch/arm/mach-omap2/timer.c @@ -54,6 +54,7 @@ #include "soc.h" #include "common.h" +#include "control.h" #include "powerdomain.h" #include "omap-secure.h" @@ -496,7 +497,8 @@ static void __init realtime_counter_init(void) void __iomem *base; static struct clk *sys_clk; unsigned long rate; - unsigned int reg, num, den; + unsigned int reg; + unsigned long long num, den; base = ioremap(REALTIME_COUNTER_BASE, SZ_32); if (!base) { @@ -511,13 +513,42 @@ static void __init realtime_counter_init(void) } rate = clk_get_rate(sys_clk); + + if (soc_is_dra7xx()) { + /* + * Errata i856 says the 32.768KHz crystal does not start at + * power on, so the CPU falls back to an emulated 32KHz clock + * based on sysclk / 610 instead. This causes the master counter + * frequency to not be 6.144MHz but at sysclk / 610 * 375 / 2 + * (OR sysclk * 75 / 244) + * + * This affects at least the DRA7/AM572x 1.0, 1.1 revisions. + * Of course any board built without a populated 32.768KHz + * crystal would also need this fix even if the CPU is fixed + * later. + * + * Either case can be detected by using the two speedselect bits + * If they are not 0, then the 32.768KHz clock driving the + * coarse counter that corrects the fine counter every time it + * ticks is actually rate/610 rather than 32.768KHz and we + * should compensate to avoid the 570ppm (at 20MHz, much worse + * at other rates) too fast system time. + */ + reg = omap_ctrl_readl(DRA7_CTRL_CORE_BOOTSTRAP); + if (reg & DRA7_SPEEDSELECT_MASK) { + num = 75; + den = 244; + goto sysclk1_based; + } + } + /* Numerator/denumerator values refer TRM Realtime Counter section */ switch (rate) { - case 1200000: + case 12000000: num = 64; den = 125; break; - case 1300000: + case 13000000: num = 768; den = 1625; break; @@ -529,11 +560,11 @@ static void __init realtime_counter_init(void) num = 192; den = 625; break; - case 2600000: + case 26000000: num = 384; den = 1625; break; - case 2700000: + case 27000000: num = 256; den = 1125; break; @@ -545,6 +576,7 @@ static void __init realtime_counter_init(void) break; } +sysclk1_based: /* Program numerator and denumerator registers */ reg = readl_relaxed(base + INCREMENTER_NUMERATOR_OFFSET) & NUMERATOR_DENUMERATOR_MASK; @@ -556,7 +588,7 @@ static void __init realtime_counter_init(void) reg |= den; writel_relaxed(reg, base + INCREMENTER_DENUMERATOR_RELOAD_OFFSET); - arch_timer_freq = (rate / den) * num; + arch_timer_freq = DIV_ROUND_UP_ULL(rate * num, den); set_cntfreq(); iounmap(base); diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c index d226b71..a611f48 100644 --- a/arch/arm/mach-rockchip/rockchip.c +++ b/arch/arm/mach-rockchip/rockchip.c @@ -19,11 +19,37 @@ #include <linux/init.h> #include <linux/of_platform.h> #include <linux/irqchip.h> +#include <linux/clk-provider.h> +#include <linux/clocksource.h> +#include <linux/mfd/syscon.h> +#include <linux/regmap.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/hardware/cache-l2x0.h> #include "core.h" +#define RK3288_GRF_SOC_CON0 0x244 + +static void __init rockchip_timer_init(void) +{ + if (of_machine_is_compatible("rockchip,rk3288")) { + struct regmap *grf; + + /* + * Disable auto jtag/sdmmc switching that causes issues + * with the mmc controllers making them unreliable + */ + grf = syscon_regmap_lookup_by_compatible("rockchip,rk3288-grf"); + if (!IS_ERR(grf)) + regmap_write(grf, RK3288_GRF_SOC_CON0, 0x10000000); + else + pr_err("rockchip: could not get grf syscon\n"); + } + + of_clk_init(NULL); + clocksource_of_init(); +} + static void __init rockchip_dt_init(void) { of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); @@ -42,6 +68,7 @@ static const char * const rockchip_board_dt_compat[] = { DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") .l2c_aux_val = 0, .l2c_aux_mask = ~0, + .init_time = rockchip_timer_init, .dt_compat = rockchip_board_dt_compat, .init_machine = rockchip_dt_init, MACHINE_END diff --git a/arch/arm/mach-shmobile/setup-r8a7740.c b/arch/arm/mach-shmobile/setup-r8a7740.c index 79ad93d..d191cf4 100644 --- a/arch/arm/mach-shmobile/setup-r8a7740.c +++ b/arch/arm/mach-shmobile/setup-r8a7740.c @@ -800,7 +800,14 @@ void __init r8a7740_init_irq_of(void) void __iomem *intc_msk_base = ioremap_nocache(0xe6900040, 0x10); void __iomem *pfc_inta_ctrl = ioremap_nocache(0xe605807c, 0x4); +#ifdef CONFIG_ARCH_SHMOBILE_LEGACY + void __iomem *gic_dist_base = ioremap_nocache(0xc2800000, 0x1000); + void __iomem *gic_cpu_base = ioremap_nocache(0xc2000000, 0x1000); + + gic_init(0, 29, gic_dist_base, gic_cpu_base); +#else irqchip_init(); +#endif /* route signals to GIC */ iowrite32(0x0, pfc_inta_ctrl); diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c index 93ebe34..fb5e1bb 100644 --- a/arch/arm/mach-shmobile/setup-sh73a0.c +++ b/arch/arm/mach-shmobile/setup-sh73a0.c @@ -595,6 +595,7 @@ static struct platform_device ipmmu_device = { static struct renesas_intc_irqpin_config irqpin0_platform_data = { .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ + .control_parent = true, }; static struct resource irqpin0_resources[] = { @@ -656,6 +657,7 @@ static struct platform_device irqpin1_device = { static struct renesas_intc_irqpin_config irqpin2_platform_data = { .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ + .control_parent = true, }; static struct resource irqpin2_resources[] = { @@ -686,6 +688,7 @@ static struct platform_device irqpin2_device = { static struct renesas_intc_irqpin_config irqpin3_platform_data = { .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ + .control_parent = true, }; static struct resource irqpin3_resources[] = { diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 5942493..9fe8e24 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -220,9 +220,6 @@ static void note_page(struct pg_state *st, unsigned long addr, unsigned level, u static const char units[] = "KMGTPE"; u64 prot = val & pg_level[level].mask; - if (addr < USER_PGTABLES_CEILING) - return; - if (!st->level) { st->level = level; st->current_prot = prot; @@ -308,15 +305,13 @@ static void walk_pgd(struct seq_file *m) pgd_t *pgd = swapper_pg_dir; struct pg_state st; unsigned long addr; - unsigned i, pgdoff = USER_PGTABLES_CEILING / PGDIR_SIZE; + unsigned i; memset(&st, 0, sizeof(st)); st.seq = m; st.marker = address_markers; - pgd += pgdoff; - - for (i = pgdoff; i < PTRS_PER_PGD; i++, pgd++) { + for (i = 0; i < PTRS_PER_PGD; i++, pgd++) { addr = i * PGDIR_SIZE; if (!pgd_none(*pgd)) { walk_pud(&st, pgd, addr); diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 98ad9c7..2495c8c 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c @@ -658,8 +658,8 @@ static struct section_perm ro_perms[] = { .start = (unsigned long)_stext, .end = (unsigned long)__init_begin, #ifdef CONFIG_ARM_LPAE - .mask = ~PMD_SECT_RDONLY, - .prot = PMD_SECT_RDONLY, + .mask = ~L_PMD_SECT_RDONLY, + .prot = L_PMD_SECT_RDONLY, #else .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE), .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE, diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index cda7c40..4e6ef89 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c @@ -1329,8 +1329,8 @@ static void __init kmap_init(void) static void __init map_lowmem(void) { struct memblock_region *reg; - unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); - unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); + phys_addr_t kernel_x_start = round_down(__pa(_stext), SECTION_SIZE); + phys_addr_t kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE); /* Map all the lowmem memory banks. */ for_each_memblock(memory, reg) { diff --git a/arch/arm64/include/asm/arch_timer.h b/arch/arm64/include/asm/arch_timer.h index b1fa4e6..fbe0ca3 100644 --- a/arch/arm64/include/asm/arch_timer.h +++ b/arch/arm64/include/asm/arch_timer.h @@ -21,6 +21,7 @@ #include <asm/barrier.h> +#include <linux/bug.h> #include <linux/init.h> #include <linux/types.h> diff --git a/arch/arm64/include/asm/cpu.h b/arch/arm64/include/asm/cpu.h index ace7068..8e797b2 100644 --- a/arch/arm64/include/asm/cpu.h +++ b/arch/arm64/include/asm/cpu.h @@ -39,6 +39,7 @@ struct cpuinfo_arm64 { u64 reg_id_aa64pfr0; u64 reg_id_aa64pfr1; + u32 reg_id_dfr0; u32 reg_id_isar0; u32 reg_id_isar1; u32 reg_id_isar2; @@ -51,6 +52,10 @@ struct cpuinfo_arm64 { u32 reg_id_mmfr3; u32 reg_id_pfr0; u32 reg_id_pfr1; + + u32 reg_mvfr0; + u32 reg_mvfr1; + u32 reg_mvfr2; }; DECLARE_PER_CPU(struct cpuinfo_arm64, cpu_data); diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 8127e45..865a7e2 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -41,6 +41,8 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) { vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; + if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features)) + vcpu->arch.hcr_el2 &= ~HCR_RW; } static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) diff --git a/arch/arm64/include/asm/processor.h b/arch/arm64/include/asm/processor.h index 286b1be..f9be30e 100644 --- a/arch/arm64/include/asm/processor.h +++ b/arch/arm64/include/asm/processor.h @@ -31,6 +31,7 @@ #include <asm/fpsimd.h> #include <asm/hw_breakpoint.h> +#include <asm/pgtable-hwdef.h> #include <asm/ptrace.h> #include <asm/types.h> @@ -123,9 +124,6 @@ struct task_struct; /* Free all resources held by a thread. */ extern void release_thread(struct task_struct *); -/* Prepare to copy thread state - unlazy all lazy status */ -#define prepare_to_copy(tsk) do { } while (0) - unsigned long get_wchan(struct task_struct *p); #define cpu_relax() barrier() diff --git a/arch/arm64/include/asm/unistd.h b/arch/arm64/include/asm/unistd.h index 49c9aef..23e9432 100644 --- a/arch/arm64/include/asm/unistd.h +++ b/arch/arm64/include/asm/unistd.h @@ -44,7 +44,7 @@ #define __ARM_NR_compat_cacheflush (__ARM_NR_COMPAT_BASE+2) #define __ARM_NR_compat_set_tls (__ARM_NR_COMPAT_BASE+5) -#define __NR_compat_syscalls 386 +#define __NR_compat_syscalls 388 #endif #define __ARCH_WANT_SYS_CLONE diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h index 8893ceb..2722442 100644 --- a/arch/arm64/include/asm/unistd32.h +++ b/arch/arm64/include/asm/unistd32.h @@ -795,3 +795,5 @@ __SYSCALL(__NR_getrandom, sys_getrandom) __SYSCALL(__NR_memfd_create, sys_memfd_create) #define __NR_bpf 386 __SYSCALL(__NR_bpf, sys_bpf) +#define __NR_execveat 387 +__SYSCALL(__NR_execveat, compat_sys_execveat) diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c index 57b6417..07d435c 100644 --- a/arch/arm64/kernel/cpuinfo.c +++ b/arch/arm64/kernel/cpuinfo.c @@ -147,6 +147,7 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) * If we have AArch32, we care about 32-bit features for compat. These * registers should be RES0 otherwise. */ + diff |= CHECK(id_dfr0, boot, cur, cpu); diff |= CHECK(id_isar0, boot, cur, cpu); diff |= CHECK(id_isar1, boot, cur, cpu); diff |= CHECK(id_isar2, boot, cur, cpu); @@ -165,6 +166,10 @@ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) diff |= CHECK(id_pfr0, boot, cur, cpu); diff |= CHECK(id_pfr1, boot, cur, cpu); + diff |= CHECK(mvfr0, boot, cur, cpu); + diff |= CHECK(mvfr1, boot, cur, cpu); + diff |= CHECK(mvfr2, boot, cur, cpu); + /* * Mismatched CPU features are a recipe for disaster. Don't even * pretend to support them. @@ -189,6 +194,7 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); + info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1); info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); @@ -202,6 +208,10 @@ static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); + info->reg_mvfr0 = read_cpuid(MVFR0_EL1); + info->reg_mvfr1 = read_cpuid(MVFR1_EL1); + info->reg_mvfr2 = read_cpuid(MVFR2_EL1); + cpuinfo_detect_icache_policy(info); check_local_cpu_errata(); diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c index 6fac253..2bb4347 100644 --- a/arch/arm64/kernel/efi.c +++ b/arch/arm64/kernel/efi.c @@ -326,6 +326,7 @@ void __init efi_idmap_init(void) /* boot time idmap_pg_dir is incomplete, so fill in missing parts */ efi_setup_idmap(); + early_memunmap(memmap.map, memmap.map_end - memmap.map); } static int __init remap_region(efi_memory_desc_t *md, void **new) @@ -380,7 +381,6 @@ static int __init arm64_enter_virtual_mode(void) } mapsize = memmap.map_end - memmap.map; - early_memunmap(memmap.map, mapsize); if (efi_runtime_disabled()) { pr_info("EFI runtime services will be disabled.\n"); diff --git a/arch/arm64/kernel/module.c b/arch/arm64/kernel/module.c index fd027b1..9b6f71d 100644 --- a/arch/arm64/kernel/module.c +++ b/arch/arm64/kernel/module.c @@ -25,6 +25,7 @@ #include <linux/mm.h> #include <linux/moduleloader.h> #include <linux/vmalloc.h> +#include <asm/alternative.h> #include <asm/insn.h> #include <asm/sections.h> diff --git a/arch/arm64/kernel/perf_regs.c b/arch/arm64/kernel/perf_regs.c index 6762ad7..3f62b35 100644 --- a/arch/arm64/kernel/perf_regs.c +++ b/arch/arm64/kernel/perf_regs.c @@ -50,3 +50,11 @@ u64 perf_reg_abi(struct task_struct *task) else return PERF_SAMPLE_REGS_ABI_64; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index b809911..20fe293 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -402,6 +402,7 @@ void __init setup_arch(char **cmdline_p) request_standard_resources(); efi_idmap_init(); + early_ioremap_reset(); unflatten_device_tree(); diff --git a/arch/arm64/kernel/smp_spin_table.c b/arch/arm64/kernel/smp_spin_table.c index 4f93c67..14944e5 100644 --- a/arch/arm64/kernel/smp_spin_table.c +++ b/arch/arm64/kernel/smp_spin_table.c @@ -25,6 +25,7 @@ #include <asm/cacheflush.h> #include <asm/cpu_ops.h> #include <asm/cputype.h> +#include <asm/io.h> #include <asm/smp_plat.h> extern void secondary_holding_pen(void); diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S index fbe909f..c3ca89c 100644 --- a/arch/arm64/kvm/hyp.S +++ b/arch/arm64/kvm/hyp.S @@ -1014,6 +1014,7 @@ ENTRY(__kvm_tlb_flush_vmid_ipa) * Instead, we invalidate Stage-2 for this IPA, and the * whole of Stage-1. Weep... */ + lsr x1, x1, #12 tlbi ipas2e1is, x1 /* * We have to ensure completion of the invalidation at Stage-2, diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c index 70a7816..0b43265 100644 --- a/arch/arm64/kvm/reset.c +++ b/arch/arm64/kvm/reset.c @@ -90,7 +90,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu) if (!cpu_has_32bit_el1()) return -EINVAL; cpu_reset = &default_regs_reset32; - vcpu->arch.hcr_el2 &= ~HCR_RW; } else { cpu_reset = &default_regs_reset; } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index bac492c..c95464a 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -335,14 +335,8 @@ static int keep_initrd; void free_initrd_mem(unsigned long start, unsigned long end) { - if (!keep_initrd) { - if (start == initrd_start) - start = round_down(start, PAGE_SIZE); - if (end == initrd_end) - end = round_up(end, PAGE_SIZE); - + if (!keep_initrd) free_reserved_area((void *)start, (void *)end, 0, "initrd"); - } } static int __init keepinitrd_setup(char *__unused) diff --git a/arch/blackfin/mach-bf533/boards/stamp.c b/arch/blackfin/mach-bf533/boards/stamp.c index 6f4bac9..23eada7 100644 --- a/arch/blackfin/mach-bf533/boards/stamp.c +++ b/arch/blackfin/mach-bf533/boards/stamp.c @@ -7,6 +7,7 @@ */ #include <linux/device.h> +#include <linux/delay.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c index 615ef81..e795cb8 100644 --- a/arch/ia64/kernel/acpi.c +++ b/arch/ia64/kernel/acpi.c @@ -893,13 +893,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } -EXPORT_SYMBOL(acpi_map_lsapic); +EXPORT_SYMBOL(acpi_map_cpu); -int acpi_unmap_lsapic(int cpu) +int acpi_unmap_cpu(int cpu) { ia64_cpu_to_sapicid[cpu] = -1; set_cpu_present(cpu, false); @@ -910,8 +910,7 @@ int acpi_unmap_lsapic(int cpu) return (0); } - -EXPORT_SYMBOL(acpi_unmap_lsapic); +EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ #ifdef CONFIG_ACPI_NUMA diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h index 75e75d7..244e0db 100644 --- a/arch/m68k/include/asm/unistd.h +++ b/arch/m68k/include/asm/unistd.h @@ -4,7 +4,7 @@ #include <uapi/asm/unistd.h> -#define NR_syscalls 355 +#define NR_syscalls 356 #define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_STAT diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h index 2c1bec9..61fb6cb 100644 --- a/arch/m68k/include/uapi/asm/unistd.h +++ b/arch/m68k/include/uapi/asm/unistd.h @@ -360,5 +360,6 @@ #define __NR_getrandom 352 #define __NR_memfd_create 353 #define __NR_bpf 354 +#define __NR_execveat 355 #endif /* _UAPI_ASM_M68K_UNISTD_H_ */ diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S index 2ca219e..a0ec430 100644 --- a/arch/m68k/kernel/syscalltable.S +++ b/arch/m68k/kernel/syscalltable.S @@ -375,4 +375,5 @@ ENTRY(sys_call_table) .long sys_getrandom .long sys_memfd_create .long sys_bpf + .long sys_execveat /* 355 */ diff --git a/arch/powerpc/crypto/sha1.c b/arch/powerpc/crypto/sha1.c index d3feba5a2..c154ceb 100644 --- a/arch/powerpc/crypto/sha1.c +++ b/arch/powerpc/crypto/sha1.c @@ -154,4 +154,5 @@ module_exit(sha1_powerpc_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); +MODULE_ALIAS_CRYPTO("sha1"); MODULE_ALIAS_CRYPTO("sha1-powerpc"); diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index ebc4f16..0be6c68 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h @@ -23,9 +23,9 @@ #define THREAD_SIZE (1 << THREAD_SHIFT) #ifdef CONFIG_PPC64 -#define CURRENT_THREAD_INFO(dest, sp) clrrdi dest, sp, THREAD_SHIFT +#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(clrrdi dest, sp, THREAD_SHIFT) #else -#define CURRENT_THREAD_INFO(dest, sp) rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT +#define CURRENT_THREAD_INFO(dest, sp) stringify_in_c(rlwinm dest, sp, 0, 0, 31-THREAD_SHIFT) #endif #ifndef __ASSEMBLY__ @@ -71,12 +71,13 @@ struct thread_info { #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) /* how to get the thread information struct from C */ -register unsigned long __current_r1 asm("r1"); static inline struct thread_info *current_thread_info(void) { - /* gcc4, at least, is smart enough to turn this into a single - * rlwinm for ppc32 and clrrdi for ppc64 */ - return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1)); + unsigned long val; + + asm (CURRENT_THREAD_INFO(%0,1) : "=r" (val)); + + return (struct thread_info *)val; } #endif /* __ASSEMBLY__ */ diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S index 54eca8b..0509bca 100644 --- a/arch/powerpc/platforms/powernv/opal-wrappers.S +++ b/arch/powerpc/platforms/powernv/opal-wrappers.S @@ -40,7 +40,6 @@ BEGIN_FTR_SECTION; \ b 1f; \ END_FTR_SECTION(0, 1); \ ld r12,opal_tracepoint_refcount@toc(r2); \ - std r12,32(r1); \ cmpdi r12,0; \ bne- LABEL; \ 1: diff --git a/arch/s390/hypfs/hypfs_vm.c b/arch/s390/hypfs/hypfs_vm.c index 32040ac..afbe079 100644 --- a/arch/s390/hypfs/hypfs_vm.c +++ b/arch/s390/hypfs/hypfs_vm.c @@ -231,7 +231,7 @@ failed: struct dbfs_d2fc_hdr { u64 len; /* Length of d2fc buffer without header */ u16 version; /* Version of header */ - char tod_ext[16]; /* TOD clock for d2fc */ + char tod_ext[STORE_CLOCK_EXT_SIZE]; /* TOD clock for d2fc */ u64 count; /* Number of VM guests in d2fc buffer */ char reserved[30]; } __attribute__ ((packed)); diff --git a/arch/s390/include/asm/irqflags.h b/arch/s390/include/asm/irqflags.h index 37b9091..16aa0c7 100644 --- a/arch/s390/include/asm/irqflags.h +++ b/arch/s390/include/asm/irqflags.h @@ -36,7 +36,7 @@ static inline notrace void __arch_local_irq_ssm(unsigned long flags) static inline notrace unsigned long arch_local_save_flags(void) { - return __arch_local_irq_stosm(0x00); + return __arch_local_irq_stnsm(0xff); } static inline notrace unsigned long arch_local_irq_save(void) diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h index 8beee1c..98eb2a5 100644 --- a/arch/s390/include/asm/timex.h +++ b/arch/s390/include/asm/timex.h @@ -67,20 +67,22 @@ static inline void local_tick_enable(unsigned long long comp) set_clock_comparator(S390_lowcore.clock_comparator); } -#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ +#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */ +#define STORE_CLOCK_EXT_SIZE 16 /* stcke writes 16 bytes */ typedef unsigned long long cycles_t; -static inline void get_tod_clock_ext(char clk[16]) +static inline void get_tod_clock_ext(char *clk) { - typedef struct { char _[sizeof(clk)]; } addrtype; + typedef struct { char _[STORE_CLOCK_EXT_SIZE]; } addrtype; asm volatile("stcke %0" : "=Q" (*(addrtype *) clk) : : "cc"); } static inline unsigned long long get_tod_clock(void) { - unsigned char clk[16]; + unsigned char clk[STORE_CLOCK_EXT_SIZE]; + get_tod_clock_ext(clk); return *((unsigned long long *)&clk[1]); } diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h index 2b446cf..67878af 100644 --- a/arch/s390/include/uapi/asm/unistd.h +++ b/arch/s390/include/uapi/asm/unistd.h @@ -289,7 +289,8 @@ #define __NR_bpf 351 #define __NR_s390_pci_mmio_write 352 #define __NR_s390_pci_mmio_read 353 -#define NR_syscalls 354 +#define __NR_execveat 354 +#define NR_syscalls 355 /* * There are some system calls that are not present on 64 bit, some diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S index a298724..939ec47 100644 --- a/arch/s390/kernel/syscalls.S +++ b/arch/s390/kernel/syscalls.S @@ -362,3 +362,4 @@ SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write) SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read) +SYSCALL(sys_execveat,sys_execveat,compat_sys_execveat) diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c index f6b3cd0..cc73280 100644 --- a/arch/s390/kernel/uprobes.c +++ b/arch/s390/kernel/uprobes.c @@ -48,6 +48,30 @@ bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) return false; } +static int check_per_event(unsigned short cause, unsigned long control, + struct pt_regs *regs) +{ + if (!(regs->psw.mask & PSW_MASK_PER)) + return 0; + /* user space single step */ + if (control == 0) + return 1; + /* over indication for storage alteration */ + if ((control & 0x20200000) && (cause & 0x2000)) + return 1; + if (cause & 0x8000) { + /* all branches */ + if ((control & 0x80800000) == 0x80000000) + return 1; + /* branch into selected range */ + if (((control & 0x80800000) == 0x80800000) && + regs->psw.addr >= current->thread.per_user.start && + regs->psw.addr <= current->thread.per_user.end) + return 1; + } + return 0; +} + int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) { int fixup = probe_get_fixup_type(auprobe->insn); @@ -71,9 +95,13 @@ int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) if (regs->psw.addr - utask->xol_vaddr == ilen) regs->psw.addr = utask->vaddr + ilen; } - /* If per tracing was active generate trap */ - if (regs->psw.mask & PSW_MASK_PER) - do_per_trap(regs); + if (check_per_event(current->thread.per_event.cause, + current->thread.per_user.control, regs)) { + /* fix per address */ + current->thread.per_event.address = utask->vaddr; + /* trigger per event */ + set_pt_regs_flag(regs, PIF_PER_TRAP); + } return 0; } @@ -106,6 +134,7 @@ void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) clear_thread_flag(TIF_UPROBE_SINGLESTEP); regs->int_code = auprobe->saved_int_code; regs->psw.addr = current->utask->vaddr; + current->thread.per_event.address = current->utask->vaddr; } unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline, @@ -146,17 +175,20 @@ static void adjust_psw_addr(psw_t *psw, unsigned long len) __rc; \ }) -#define emu_store_ril(ptr, input) \ +#define emu_store_ril(regs, ptr, input) \ ({ \ unsigned int mask = sizeof(*(ptr)) - 1; \ + __typeof__(ptr) __ptr = (ptr); \ int __rc = 0; \ \ if (!test_facility(34)) \ __rc = EMU_ILLEGAL_OP; \ - else if ((u64 __force)ptr & mask) \ + else if ((u64 __force)__ptr & mask) \ __rc = EMU_SPECIFICATION; \ - else if (put_user(*(input), ptr)) \ + else if (put_user(*(input), __ptr)) \ __rc = EMU_ADDRESSING; \ + if (__rc == 0) \ + sim_stor_event(regs, __ptr, mask + 1); \ __rc; \ }) @@ -198,6 +230,25 @@ union split_register { }; /* + * If user per registers are setup to trace storage alterations and an + * emulated store took place on a fitting address a user trap is generated. + */ +static void sim_stor_event(struct pt_regs *regs, void *addr, int len) +{ + if (!(regs->psw.mask & PSW_MASK_PER)) + return; + if (!(current->thread.per_user.control & PER_EVENT_STORE)) + return; + if ((void *)current->thread.per_user.start > (addr + len)) + return; + if ((void *)current->thread.per_user.end < addr) + return; + current->thread.per_event.address = regs->psw.addr; + current->thread.per_event.cause = PER_EVENT_STORE >> 16; + set_pt_regs_flag(regs, PIF_PER_TRAP); +} + +/* * pc relative instructions are emulated, since parameters may not be * accessible from the xol area due to range limitations. */ @@ -249,13 +300,13 @@ static void handle_insn_ril(struct arch_uprobe *auprobe, struct pt_regs *regs) rc = emu_load_ril((u32 __user *)uptr, &rx->u64); break; case 0x07: /* sthrl */ - rc = emu_store_ril((u16 __user *)uptr, &rx->u16[3]); + rc = emu_store_ril(regs, (u16 __user *)uptr, &rx->u16[3]); break; case 0x0b: /* stgrl */ - rc = emu_store_ril((u64 __user *)uptr, &rx->u64); + rc = emu_store_ril(regs, (u64 __user *)uptr, &rx->u64); break; case 0x0f: /* strl */ - rc = emu_store_ril((u32 __user *)uptr, &rx->u32[1]); + rc = emu_store_ril(regs, (u32 __user *)uptr, &rx->u32[1]); break; } break; diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 7f0089d..e34122e5 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c @@ -128,8 +128,6 @@ void vtime_account_irq_enter(struct task_struct *tsk) struct thread_info *ti = task_thread_info(tsk); u64 timer, system; - WARN_ON_ONCE(!irqs_disabled()); - timer = S390_lowcore.last_update_timer; S390_lowcore.last_update_timer = get_vtimer(); S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index be99357..3cf8cc0 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c @@ -322,11 +322,12 @@ static int gmap_alloc_table(struct gmap *gmap, unsigned long *table, static unsigned long __gmap_segment_gaddr(unsigned long *entry) { struct page *page; - unsigned long offset; + unsigned long offset, mask; offset = (unsigned long) entry / sizeof(unsigned long); offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE; - page = pmd_to_page((pmd_t *) entry); + mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1); + page = virt_to_page((void *)((unsigned long) entry & mask)); return page->index + offset; } diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c index c52ac77..524496d 100644 --- a/arch/s390/net/bpf_jit_comp.c +++ b/arch/s390/net/bpf_jit_comp.c @@ -431,8 +431,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct sock_filter *filter, EMIT4_DISP(0x88500000, K); break; case BPF_ALU | BPF_NEG: /* A = -A */ - /* lnr %r5,%r5 */ - EMIT2(0x1155); + /* lcr %r5,%r5 */ + EMIT2(0x1355); break; case BPF_JMP | BPF_JA: /* ip += K */ offset = addrs[i + K] + jit->start - jit->prg; @@ -502,8 +502,8 @@ branch: if (filter->jt == filter->jf) { xbranch: /* Emit compare if the branch targets are different */ if (filter->jt != filter->jf) { jit->seen |= SEEN_XREG; - /* cr %r5,%r12 */ - EMIT2(0x195c); + /* clr %r5,%r12 */ + EMIT2(0x155c); } goto branch; case BPF_JMP | BPF_JSET | BPF_X: /* ip += (A & X) ? jt : jf */ diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile index 5b016e2..3db07f3 100644 --- a/arch/x86/boot/Makefile +++ b/arch/x86/boot/Makefile @@ -51,6 +51,7 @@ targets += cpustr.h $(obj)/cpustr.h: $(obj)/mkcpustr FORCE $(call if_changed,cpustr) endif +clean-files += cpustr.h # --------------------------------------------------------------------------- diff --git a/arch/x86/crypto/Makefile b/arch/x86/crypto/Makefile index fd0f848..5a4a089 100644 --- a/arch/x86/crypto/Makefile +++ b/arch/x86/crypto/Makefile @@ -26,7 +26,6 @@ obj-$(CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL) += ghash-clmulni-intel.o obj-$(CONFIG_CRYPTO_CRC32C_INTEL) += crc32c-intel.o obj-$(CONFIG_CRYPTO_SHA1_SSSE3) += sha1-ssse3.o -obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ obj-$(CONFIG_CRYPTO_CRC32_PCLMUL) += crc32-pclmul.o obj-$(CONFIG_CRYPTO_SHA256_SSSE3) += sha256-ssse3.o obj-$(CONFIG_CRYPTO_SHA512_SSSE3) += sha512-ssse3.o @@ -46,6 +45,7 @@ endif ifeq ($(avx2_supported),yes) obj-$(CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64) += camellia-aesni-avx2.o obj-$(CONFIG_CRYPTO_SERPENT_AVX2_X86_64) += serpent-avx2.o + obj-$(CONFIG_CRYPTO_SHA1_MB) += sha-mb/ endif aes-i586-y := aes-i586-asm_32.o aes_glue.o diff --git a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S index 2df2a02..a916c4a 100644 --- a/arch/x86/crypto/aes_ctrby8_avx-x86_64.S +++ b/arch/x86/crypto/aes_ctrby8_avx-x86_64.S @@ -208,7 +208,7 @@ ddq_add_8: .if (klen == KEY_128) .if (load_keys) - vmovdqa 3*16(p_keys), xkeyA + vmovdqa 3*16(p_keys), xkey4 .endif .else vmovdqa 3*16(p_keys), xkeyA @@ -224,7 +224,7 @@ ddq_add_8: add $(16*by), p_in .if (klen == KEY_128) - vmovdqa 4*16(p_keys), xkey4 + vmovdqa 4*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 4*16(p_keys), xkey4 @@ -234,7 +234,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 3 */ + /* key 3 */ + .if (klen == KEY_128) + vaesenc xkey4, var_xdata, var_xdata + .else + vaesenc xkeyA, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -243,13 +248,18 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkey4, var_xdata, var_xdata /* key 4 */ + /* key 4 */ + .if (klen == KEY_128) + vaesenc xkeyB, var_xdata, var_xdata + .else + vaesenc xkey4, var_xdata, var_xdata + .endif .set i, (i +1) .endr .if (klen == KEY_128) .if (load_keys) - vmovdqa 6*16(p_keys), xkeyB + vmovdqa 6*16(p_keys), xkey8 .endif .else vmovdqa 6*16(p_keys), xkeyB @@ -267,12 +277,17 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyB, var_xdata, var_xdata /* key 6 */ + /* key 6 */ + .if (klen == KEY_128) + vaesenc xkey8, var_xdata, var_xdata + .else + vaesenc xkeyB, var_xdata, var_xdata + .endif .set i, (i +1) .endr .if (klen == KEY_128) - vmovdqa 8*16(p_keys), xkey8 + vmovdqa 8*16(p_keys), xkeyB .else .if (load_keys) vmovdqa 8*16(p_keys), xkey8 @@ -288,7 +303,7 @@ ddq_add_8: .if (klen == KEY_128) .if (load_keys) - vmovdqa 9*16(p_keys), xkeyA + vmovdqa 9*16(p_keys), xkey12 .endif .else vmovdqa 9*16(p_keys), xkeyA @@ -297,7 +312,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkey8, var_xdata, var_xdata /* key 8 */ + /* key 8 */ + .if (klen == KEY_128) + vaesenc xkeyB, var_xdata, var_xdata + .else + vaesenc xkey8, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -306,7 +326,12 @@ ddq_add_8: .set i, 0 .rept by club XDATA, i - vaesenc xkeyA, var_xdata, var_xdata /* key 9 */ + /* key 9 */ + .if (klen == KEY_128) + vaesenc xkey12, var_xdata, var_xdata + .else + vaesenc xkeyA, var_xdata, var_xdata + .endif .set i, (i +1) .endr @@ -412,7 +437,6 @@ ddq_add_8: /* main body of aes ctr load */ .macro do_aes_ctrmain key_len - cmp $16, num_bytes jb .Ldo_return2\key_len diff --git a/arch/x86/crypto/sha-mb/sha1_mb.c b/arch/x86/crypto/sha-mb/sha1_mb.c index a225a5ca..fd9f6b0 100644 --- a/arch/x86/crypto/sha-mb/sha1_mb.c +++ b/arch/x86/crypto/sha-mb/sha1_mb.c @@ -931,4 +931,4 @@ module_exit(sha1_mb_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, multi buffer accelerated"); -MODULE_ALIAS("sha1"); +MODULE_ALIAS_CRYPTO("sha1"); diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index e7e9682..f556c48 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h @@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void) /* * Load per CPU data from GDT. LSL is faster than RDTSCP and - * works on all CPUs. + * works on all CPUs. This is volatile so that it orders + * correctly wrt barrier() and to keep gcc from cleverly + * hoisting it out of the calling function. */ - asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); + asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); return p; } diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index 4433a4b..d162636 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c @@ -750,13 +750,13 @@ static int _acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) } /* wrapper to silence section mismatch warning */ -int __ref acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu) +int __ref acpi_map_cpu(acpi_handle handle, int physid, int *pcpu) { return _acpi_map_lsapic(handle, physid, pcpu); } -EXPORT_SYMBOL(acpi_map_lsapic); +EXPORT_SYMBOL(acpi_map_cpu); -int acpi_unmap_lsapic(int cpu) +int acpi_unmap_cpu(int cpu) { #ifdef CONFIG_ACPI_NUMA set_apicid_to_node(per_cpu(x86_cpu_to_apicid, cpu), NUMA_NO_NODE); @@ -768,8 +768,7 @@ int acpi_unmap_lsapic(int cpu) return (0); } - -EXPORT_SYMBOL(acpi_unmap_lsapic); +EXPORT_SYMBOL(acpi_unmap_cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base) diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index e27b49d..80091ae 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -66,3 +66,4 @@ targets += capflags.c $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE $(call if_changed,mkcapflags) endif +clean-files += capflags.c diff --git a/arch/x86/kernel/cpu/mkcapflags.sh b/arch/x86/kernel/cpu/mkcapflags.sh index e2b22df..36d99a3 100644 --- a/arch/x86/kernel/cpu/mkcapflags.sh +++ b/arch/x86/kernel/cpu/mkcapflags.sh @@ -28,7 +28,7 @@ function dump_array() # If the /* comment */ starts with a quote string, grab that. VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" [ -z "$VALUE" ] && VALUE="\"$NAME\"" - [ "$VALUE" == '""' ] && continue + [ "$VALUE" = '""' ] && continue # Name is uppercase, VALUE is all lowercase VALUE="$(echo "$VALUE" | tr A-Z a-z)" diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c index 3c895d4..0739833 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c @@ -568,8 +568,8 @@ struct event_constraint intel_atom_pebs_event_constraints[] = { }; struct event_constraint intel_slm_pebs_event_constraints[] = { - /* UOPS_RETIRED.ALL, inv=1, cmask=16 (cycles:p). */ - INTEL_FLAGS_EVENT_CONSTRAINT(0x108001c2, 0xf), + /* INST_RETIRED.ANY_P, inv=1, cmask=16 (cycles:p). */ + INTEL_FLAGS_EVENT_CONSTRAINT(0x108000c0, 0x1), /* Allow all events as PEBS with no flags */ INTEL_ALL_EVENT_CONSTRAINT(0, 0x1), EVENT_CONSTRAINT_END diff --git a/arch/x86/kernel/cpu/perf_event_intel_rapl.c b/arch/x86/kernel/cpu/perf_event_intel_rapl.c index 673f930..6e434f8 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_rapl.c +++ b/arch/x86/kernel/cpu/perf_event_intel_rapl.c @@ -103,6 +103,13 @@ static struct kobj_attribute format_attr_##_var = \ #define RAPL_CNTR_WIDTH 32 /* 32-bit rapl counters */ +#define RAPL_EVENT_ATTR_STR(_name, v, str) \ +static struct perf_pmu_events_attr event_attr_##v = { \ + .attr = __ATTR(_name, 0444, rapl_sysfs_show, NULL), \ + .id = 0, \ + .event_str = str, \ +}; + struct rapl_pmu { spinlock_t lock; int hw_unit; /* 1/2^hw_unit Joule */ @@ -379,23 +386,36 @@ static struct attribute_group rapl_pmu_attr_group = { .attrs = rapl_pmu_attrs, }; -EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); -EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); -EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); -EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); +static ssize_t rapl_sysfs_show(struct device *dev, + struct device_attribute *attr, + char *page) +{ + struct perf_pmu_events_attr *pmu_attr = \ + container_of(attr, struct perf_pmu_events_attr, attr); + + if (pmu_attr->event_str) + return sprintf(page, "%s", pmu_attr->event_str); + + return 0; +} + +RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01"); +RAPL_EVENT_ATTR_STR(energy-pkg , rapl_pkg, "event=0x02"); +RAPL_EVENT_ATTR_STR(energy-ram , rapl_ram, "event=0x03"); +RAPL_EVENT_ATTR_STR(energy-gpu , rapl_gpu, "event=0x04"); -EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); -EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); -EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); -EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); +RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_cores_unit, "Joules"); +RAPL_EVENT_ATTR_STR(energy-pkg.unit , rapl_pkg_unit, "Joules"); +RAPL_EVENT_ATTR_STR(energy-ram.unit , rapl_ram_unit, "Joules"); +RAPL_EVENT_ATTR_STR(energy-gpu.unit , rapl_gpu_unit, "Joules"); /* * we compute in 0.23 nJ increments regardless of MSR */ -EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); -EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); -EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); -EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_cores_scale, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_pkg_scale, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_ram_scale, "2.3283064365386962890625e-10"); +RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_gpu_scale, "2.3283064365386962890625e-10"); static struct attribute *rapl_events_srv_attr[] = { EVENT_PTR(rapl_cores), diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h index 18eb78b..863d9b0 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h @@ -17,7 +17,7 @@ #define UNCORE_PCI_DEV_TYPE(data) ((data >> 8) & 0xff) #define UNCORE_PCI_DEV_IDX(data) (data & 0xff) #define UNCORE_EXTRA_PCI_DEV 0xff -#define UNCORE_EXTRA_PCI_DEV_MAX 2 +#define UNCORE_EXTRA_PCI_DEV_MAX 3 /* support up to 8 sockets */ #define UNCORE_SOCKET_MAX 8 diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c index 745b158..21af6149e 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c @@ -891,6 +891,7 @@ void snbep_uncore_cpu_init(void) enum { SNBEP_PCI_QPI_PORT0_FILTER, SNBEP_PCI_QPI_PORT1_FILTER, + HSWEP_PCI_PCU_3, }; static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) @@ -2026,6 +2027,17 @@ void hswep_uncore_cpu_init(void) { if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores) hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores; + + /* Detect 6-8 core systems with only two SBOXes */ + if (uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3]) { + u32 capid4; + + pci_read_config_dword(uncore_extra_pci_dev[0][HSWEP_PCI_PCU_3], + 0x94, &capid4); + if (((capid4 >> 6) & 0x3) == 0) + hswep_uncore_sbox.num_boxes = 2; + } + uncore_msr_uncores = hswep_msr_uncores; } @@ -2287,6 +2299,11 @@ static DEFINE_PCI_DEVICE_TABLE(hswep_uncore_pci_ids) = { .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, SNBEP_PCI_QPI_PORT1_FILTER), }, + { /* PCU.3 (for Capability registers) */ + PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x2fc0), + .driver_data = UNCORE_PCI_DEV_DATA(UNCORE_EXTRA_PCI_DEV, + HSWEP_PCI_PCU_3), + }, { /* end: all zeroes */ } }; diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c index f7e3cd5..98f654d 100644 --- a/arch/x86/kernel/kprobes/core.c +++ b/arch/x86/kernel/kprobes/core.c @@ -1020,6 +1020,15 @@ int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) regs->flags &= ~X86_EFLAGS_IF; trace_hardirqs_off(); regs->ip = (unsigned long)(jp->entry); + + /* + * jprobes use jprobe_return() which skips the normal return + * path of the function, and this messes up the accounting of the + * function graph tracer to get messed up. + * + * Pause function graph tracing while performing the jprobe function. + */ + pause_graph_tracing(); return 1; } NOKPROBE_SYMBOL(setjmp_pre_handler); @@ -1048,24 +1057,25 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); u8 *addr = (u8 *) (regs->ip - 1); struct jprobe *jp = container_of(p, struct jprobe, kp); + void *saved_sp = kcb->jprobe_saved_sp; if ((addr > (u8 *) jprobe_return) && (addr < (u8 *) jprobe_return_end)) { - if (stack_addr(regs) != kcb->jprobe_saved_sp) { + if (stack_addr(regs) != saved_sp) { struct pt_regs *saved_regs = &kcb->jprobe_saved_regs; printk(KERN_ERR "current sp %p does not match saved sp %p\n", - stack_addr(regs), kcb->jprobe_saved_sp); + stack_addr(regs), saved_sp); printk(KERN_ERR "Saved registers for jprobe %p\n", jp); show_regs(saved_regs); printk(KERN_ERR "Current registers\n"); show_regs(regs); BUG(); } + /* It's OK to start function graph tracing again */ + unpause_graph_tracing(); *regs = kcb->jprobe_saved_regs; - memcpy((kprobe_opcode_t *)(kcb->jprobe_saved_sp), - kcb->jprobes_stack, - MIN_STACK_SIZE(kcb->jprobe_saved_sp)); + memcpy(saved_sp, kcb->jprobes_stack, MIN_STACK_SIZE(saved_sp)); preempt_enable_no_resched(); return 1; } diff --git a/arch/x86/kernel/perf_regs.c b/arch/x86/kernel/perf_regs.c index e309cc5..781861c 100644 --- a/arch/x86/kernel/perf_regs.c +++ b/arch/x86/kernel/perf_regs.c @@ -78,6 +78,14 @@ u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_32; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} #else /* CONFIG_X86_64 */ #define REG_NOSUPPORT ((1ULL << PERF_REG_X86_DS) | \ (1ULL << PERF_REG_X86_ES) | \ @@ -102,4 +110,86 @@ u64 perf_reg_abi(struct task_struct *task) else return PERF_SAMPLE_REGS_ABI_64; } + +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + struct pt_regs *user_regs = task_pt_regs(current); + + /* + * If we're in an NMI that interrupted task_pt_regs setup, then + * we can't sample user regs at all. This check isn't really + * sufficient, though, as we could be in an NMI inside an interrupt + * that happened during task_pt_regs setup. + */ + if (regs->sp > (unsigned long)&user_regs->r11 && + regs->sp <= (unsigned long)(user_regs + 1)) { + regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; + regs_user->regs = NULL; + return; + } + + /* + * RIP, flags, and the argument registers are usually saved. + * orig_ax is probably okay, too. + */ + regs_user_copy->ip = user_regs->ip; + regs_user_copy->cx = user_regs->cx; + regs_user_copy->dx = user_regs->dx; + regs_user_copy->si = user_regs->si; + regs_user_copy->di = user_regs->di; + regs_user_copy->r8 = user_regs->r8; + regs_user_copy->r9 = user_regs->r9; + regs_user_copy->r10 = user_regs->r10; + regs_user_copy->r11 = user_regs->r11; + regs_user_copy->orig_ax = user_regs->orig_ax; + regs_user_copy->flags = user_regs->flags; + + /* + * Don't even try to report the "rest" regs. + */ + regs_user_copy->bx = -1; + regs_user_copy->bp = -1; + regs_user_copy->r12 = -1; + regs_user_copy->r13 = -1; + regs_user_copy->r14 = -1; + regs_user_copy->r15 = -1; + + /* + * For this to be at all useful, we need a reasonable guess for + * sp and the ABI. Be careful: we're in NMI context, and we're + * considering current to be the current task, so we should + * be careful not to look at any other percpu variables that might + * change during context switches. + */ + if (IS_ENABLED(CONFIG_IA32_EMULATION) && + task_thread_info(current)->status & TS_COMPAT) { + /* Easy case: we're in a compat syscall. */ + regs_user->abi = PERF_SAMPLE_REGS_ABI_32; + regs_user_copy->sp = user_regs->sp; + regs_user_copy->cs = user_regs->cs; + regs_user_copy->ss = user_regs->ss; + } else if (user_regs->orig_ax != -1) { + /* + * We're probably in a 64-bit syscall. + * Warning: this code is severely racy. At least it's better + * than just blindly copying user_regs. + */ + regs_user->abi = PERF_SAMPLE_REGS_ABI_64; + regs_user_copy->sp = this_cpu_read(old_rsp); + regs_user_copy->cs = __USER_CS; + regs_user_copy->ss = __USER_DS; + regs_user_copy->cx = -1; /* usually contains garbage */ + } else { + /* We're probably in an interrupt or exception. */ + regs_user->abi = user_64bit_mode(user_regs) ? + PERF_SAMPLE_REGS_ABI_64 : PERF_SAMPLE_REGS_ABI_32; + regs_user_copy->sp = user_regs->sp; + regs_user_copy->cs = user_regs->cs; + regs_user_copy->ss = user_regs->ss; + } + + regs_user->regs = regs_user_copy; +} #endif /* CONFIG_X86_32 */ diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 2480978b..1313ae6 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -28,7 +28,7 @@ /* Verify next sizeof(t) bytes can be on the same instruction */ #define validate_next(t, insn, n) \ - ((insn)->next_byte + sizeof(t) + n < (insn)->end_kaddr) + ((insn)->next_byte + sizeof(t) + n <= (insn)->end_kaddr) #define __get_next(t, insn) \ ({ t r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index a97ee08..08a7d31 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping( static unsigned long __init get_new_step_size(unsigned long step_size) { /* - * Explain why we shift by 5 and why we don't have to worry about - * 'step_size << 5' overflowing: - * - * initial mapped size is PMD_SIZE (2M). + * Initial mapped size is PMD_SIZE (2M). * We can not set step_size to be PUD_SIZE (1G) yet. * In worse case, when we cross the 1G boundary, and * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) - * to map 1G range with PTE. Use 5 as shift for now. + * to map 1G range with PTE. Hence we use one less than the + * difference of page table level shifts. * - * Don't need to worry about overflow, on 32bit, when step_size - * is 0, round_down() returns 0 for start, and that turns it - * into 0x100000000ULL. + * Don't need to worry about overflow in the top-down case, on 32bit, + * when step_size is 0, round_down() returns 0 for start, and that + * turns it into 0x100000000ULL. + * In the bottom-up case, round_up(x, 0) returns 0 though too, which + * needs to be taken into consideration by the code below. */ - return step_size << 5; + return step_size << (PMD_SHIFT - PAGE_SHIFT - 1); } /** @@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start, unsigned long step_size; unsigned long addr; unsigned long mapped_ram_size = 0; - unsigned long new_mapped_ram_size; /* xen has big range in reserved near end of ram, skip it at first.*/ addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); @@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start, start = map_start; } else start = map_start; - new_mapped_ram_size = init_range_memory_mapping(start, + mapped_ram_size += init_range_memory_mapping(start, last_start); last_start = start; min_pfn_mapped = last_start >> PAGE_SHIFT; - /* only increase step_size after big range get mapped */ - if (new_mapped_ram_size > mapped_ram_size) + if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); - mapped_ram_size += new_mapped_ram_size; } if (real_end < map_end) @@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start, static void __init memory_map_bottom_up(unsigned long map_start, unsigned long map_end) { - unsigned long next, new_mapped_ram_size, start; + unsigned long next, start; unsigned long mapped_ram_size = 0; /* step_size need to be small so pgt_buf from BRK could cover it */ unsigned long step_size = PMD_SIZE; @@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start, * for page table. */ while (start < map_end) { - if (map_end - start > step_size) { + if (step_size && map_end - start > step_size) { next = round_up(start + 1, step_size); if (next > map_end) next = map_end; - } else + } else { next = map_end; + } - new_mapped_ram_size = init_range_memory_mapping(start, next); + mapped_ram_size += init_range_memory_mapping(start, next); start = next; - if (new_mapped_ram_size > mapped_ram_size) + if (mapped_ram_size >= step_size) step_size = get_new_step_size(step_size); - mapped_ram_size += new_mapped_ram_size; } } diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 009495b..1c9f750 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c @@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image) struct linux_binprm; -/* Put the vdso above the (randomized) stack with another randomized offset. - This way there is no hole in the middle of address space. - To save memory make sure it is still in the same PTE as the stack top. - This doesn't give that many random bits. - - Only used for the 64-bit and x32 vdsos. */ +/* + * Put the vdso above the (randomized) stack with another randomized + * offset. This way there is no hole in the middle of address space. + * To save memory make sure it is still in the same PTE as the stack + * top. This doesn't give that many random bits. + * + * Note that this algorithm is imperfect: the distribution of the vdso + * start address within a PMD is biased toward the end. + * + * Only used for the 64-bit and x32 vdsos. + */ static unsigned long vdso_addr(unsigned long start, unsigned len) { #ifdef CONFIG_X86_32 @@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) #else unsigned long addr, end; unsigned offset; - end = (start + PMD_SIZE - 1) & PMD_MASK; + + /* + * Round up the start address. It can start out unaligned as a result + * of stack start randomization. + */ + start = PAGE_ALIGN(start); + + /* Round the lowest possible end address up to a PMD boundary. */ + end = (start + len + PMD_SIZE - 1) & PMD_MASK; if (end >= TASK_SIZE_MAX) end = TASK_SIZE_MAX; end -= len; - /* This loses some more bits than a modulo, but is cheaper */ - offset = get_random_int() & (PTRS_PER_PTE - 1); - addr = start + (offset << PAGE_SHIFT); - if (addr >= end) - addr = end; + + if (end > start) { + offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1); + addr = start + (offset << PAGE_SHIFT); + } else { + addr = start; + } /* - * page-align it here so that get_unmapped_area doesn't - * align it wrongfully again to the next page. addr can come in 4K - * unaligned here as a result of stack start randomization. + * Forcibly align the final address in case we have a hardware + * issue that requires alignment for performance reasons. */ - addr = PAGE_ALIGN(addr); addr = align_vdso_addr(addr); return addr; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 6bf3a13..78a881b 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -40,6 +40,7 @@ #include <xen/interface/physdev.h> #include <xen/interface/vcpu.h> #include <xen/interface/memory.h> +#include <xen/interface/nmi.h> #include <xen/interface/xen-mca.h> #include <xen/features.h> #include <xen/page.h> @@ -66,6 +67,7 @@ #include <asm/reboot.h> #include <asm/stackprotector.h> #include <asm/hypervisor.h> +#include <asm/mach_traps.h> #include <asm/mwait.h> #include <asm/pci_x86.h> #include <asm/pat.h> @@ -1351,6 +1353,21 @@ static const struct machine_ops xen_machine_ops __initconst = { .emergency_restart = xen_emergency_restart, }; +static unsigned char xen_get_nmi_reason(void) +{ + unsigned char reason = 0; + + /* Construct a value which looks like it came from port 0x61. */ + if (test_bit(_XEN_NMIREASON_io_error, + &HYPERVISOR_shared_info->arch.nmi_reason)) + reason |= NMI_REASON_IOCHK; + if (test_bit(_XEN_NMIREASON_pci_serr, + &HYPERVISOR_shared_info->arch.nmi_reason)) + reason |= NMI_REASON_SERR; + + return reason; +} + static void __init xen_boot_params_init_edd(void) { #if IS_ENABLED(CONFIG_EDD) @@ -1535,9 +1552,12 @@ asmlinkage __visible void __init xen_start_kernel(void) pv_info = xen_info; pv_init_ops = xen_init_ops; pv_apic_ops = xen_apic_ops; - if (!xen_pvh_domain()) + if (!xen_pvh_domain()) { pv_cpu_ops = xen_cpu_ops; + x86_platform.get_nmi_reason = xen_get_nmi_reason; + } + if (xen_feature(XENFEAT_auto_translated_physmap)) x86_init.resources.memory_setup = xen_auto_xlated_memory_setup; else diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c index edbc7a6..70fb507 100644 --- a/arch/x86/xen/p2m.c +++ b/arch/x86/xen/p2m.c @@ -167,10 +167,13 @@ static void * __ref alloc_p2m_page(void) return (void *)__get_free_page(GFP_KERNEL | __GFP_REPEAT); } -/* Only to be called in case of a race for a page just allocated! */ -static void free_p2m_page(void *p) +static void __ref free_p2m_page(void *p) { - BUG_ON(!slab_is_available()); + if (unlikely(!slab_is_available())) { + free_bootmem((unsigned long)p, PAGE_SIZE); + return; + } + free_page((unsigned long)p); } @@ -375,7 +378,7 @@ static void __init xen_rebuild_p2m_list(unsigned long *p2m) p2m_missing_pte : p2m_identity_pte; for (i = 0; i < PMDS_PER_MID_PAGE; i++) { pmdp = populate_extra_pmd( - (unsigned long)(p2m + pfn + i * PTRS_PER_PTE)); + (unsigned long)(p2m + pfn) + i * PMD_SIZE); set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); } } @@ -436,10 +439,9 @@ EXPORT_SYMBOL_GPL(get_phys_to_machine); * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual * pmd. In case of PAE/x86-32 there are multiple pmds to allocate! */ -static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) +static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) { pte_t *ptechk; - pte_t *pteret = ptep; pte_t *pte_newpg[PMDS_PER_MID_PAGE]; pmd_t *pmdp; unsigned int level; @@ -473,8 +475,6 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) if (ptechk == pte_pg) { set_pmd(pmdp, __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); - if (vaddr == (addr & ~(PMD_SIZE - 1))) - pteret = pte_offset_kernel(pmdp, addr); pte_newpg[i] = NULL; } @@ -488,7 +488,7 @@ static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *ptep, pte_t *pte_pg) vaddr += PMD_SIZE; } - return pteret; + return lookup_address(addr, &level); } /* @@ -517,7 +517,7 @@ static bool alloc_p2m(unsigned long pfn) if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { /* PMD level is missing, allocate a new one */ - ptep = alloc_p2m_pmd(addr, ptep, pte_pg); + ptep = alloc_p2m_pmd(addr, pte_pg); if (!ptep) return false; } diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c index dfd77de..865e56c 100644 --- a/arch/x86/xen/setup.c +++ b/arch/x86/xen/setup.c @@ -140,7 +140,7 @@ static void __init xen_del_extra_mem(u64 start, u64 size) unsigned long __ref xen_chk_extra_mem(unsigned long pfn) { int i; - unsigned long addr = PFN_PHYS(pfn); + phys_addr_t addr = PFN_PHYS(pfn); for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { if (addr >= xen_extra_mem[i].start && @@ -160,6 +160,8 @@ void __init xen_inv_extra_mem(void) int i; for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + if (!xen_extra_mem[i].size) + continue; pfn_s = PFN_DOWN(xen_extra_mem[i].start); pfn_e = PFN_UP(xen_extra_mem[i].start + xen_extra_mem[i].size); for (pfn = pfn_s; pfn < pfn_e; pfn++) @@ -229,15 +231,14 @@ static int __init xen_free_mfn(unsigned long mfn) * as a fallback if the remapping fails. */ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, - unsigned long end_pfn, unsigned long nr_pages, unsigned long *identity, - unsigned long *released) + unsigned long end_pfn, unsigned long nr_pages, unsigned long *released) { - unsigned long len = 0; unsigned long pfn, end; int ret; WARN_ON(start_pfn > end_pfn); + /* Release pages first. */ end = min(end_pfn, nr_pages); for (pfn = start_pfn; pfn < end; pfn++) { unsigned long mfn = pfn_to_mfn(pfn); @@ -250,16 +251,14 @@ static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); if (ret == 1) { + (*released)++; if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) break; - len++; } else break; } - /* Need to release pages first */ - *released += len; - *identity += set_phys_range_identity(start_pfn, end_pfn); + set_phys_range_identity(start_pfn, end_pfn); } /* @@ -287,7 +286,7 @@ static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) } /* Update kernel mapping, but not for highmem. */ - if ((pfn << PAGE_SHIFT) >= __pa(high_memory)) + if (pfn >= PFN_UP(__pa(high_memory - 1))) return; if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), @@ -318,7 +317,6 @@ static void __init xen_do_set_identity_and_remap_chunk( unsigned long ident_pfn_iter, remap_pfn_iter; unsigned long ident_end_pfn = start_pfn + size; unsigned long left = size; - unsigned long ident_cnt = 0; unsigned int i, chunk; WARN_ON(size == 0); @@ -347,8 +345,7 @@ static void __init xen_do_set_identity_and_remap_chunk( xen_remap_mfn = mfn; /* Set identity map */ - ident_cnt += set_phys_range_identity(ident_pfn_iter, - ident_pfn_iter + chunk); + set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); left -= chunk; } @@ -371,7 +368,7 @@ static void __init xen_do_set_identity_and_remap_chunk( static unsigned long __init xen_set_identity_and_remap_chunk( const struct e820entry *list, size_t map_size, unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, unsigned long remap_pfn, - unsigned long *identity, unsigned long *released) + unsigned long *released, unsigned long *remapped) { unsigned long pfn; unsigned long i = 0; @@ -386,8 +383,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( /* Do not remap pages beyond the current allocation */ if (cur_pfn >= nr_pages) { /* Identity map remaining pages */ - *identity += set_phys_range_identity(cur_pfn, - cur_pfn + size); + set_phys_range_identity(cur_pfn, cur_pfn + size); break; } if (cur_pfn + size > nr_pages) @@ -398,7 +394,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( if (!remap_range_size) { pr_warning("Unable to find available pfn range, not remapping identity pages\n"); xen_set_identity_and_release_chunk(cur_pfn, - cur_pfn + left, nr_pages, identity, released); + cur_pfn + left, nr_pages, released); break; } /* Adjust size to fit in current e820 RAM region */ @@ -410,7 +406,7 @@ static unsigned long __init xen_set_identity_and_remap_chunk( /* Update variables to reflect new mappings. */ i += size; remap_pfn += size; - *identity += size; + *remapped += size; } /* @@ -427,13 +423,13 @@ static unsigned long __init xen_set_identity_and_remap_chunk( static void __init xen_set_identity_and_remap( const struct e820entry *list, size_t map_size, unsigned long nr_pages, - unsigned long *released) + unsigned long *released, unsigned long *remapped) { phys_addr_t start = 0; - unsigned long identity = 0; unsigned long last_pfn = nr_pages; const struct e820entry *entry; unsigned long num_released = 0; + unsigned long num_remapped = 0; int i; /* @@ -460,14 +456,14 @@ static void __init xen_set_identity_and_remap( last_pfn = xen_set_identity_and_remap_chunk( list, map_size, start_pfn, end_pfn, nr_pages, last_pfn, - &identity, &num_released); + &num_released, &num_remapped); start = end; } } *released = num_released; + *remapped = num_remapped; - pr_info("Set %ld page(s) to 1-1 mapping\n", identity); pr_info("Released %ld page(s)\n", num_released); } @@ -586,6 +582,7 @@ char * __init xen_memory_setup(void) struct xen_memory_map memmap; unsigned long max_pages; unsigned long extra_pages = 0; + unsigned long remapped_pages; int i; int op; @@ -635,9 +632,10 @@ char * __init xen_memory_setup(void) * underlying RAM. */ xen_set_identity_and_remap(map, memmap.nr_entries, max_pfn, - &xen_released_pages); + &xen_released_pages, &remapped_pages); extra_pages += xen_released_pages; + extra_pages += remapped_pages; /* * Clamp the amount of extra memory to a EXTRA_MEM_RATIO diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index f473d26..6908734 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -391,7 +391,7 @@ static const struct clock_event_device *xen_clockevent = struct xen_clock_event_device { struct clock_event_device evt; - char *name; + char name[16]; }; static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; @@ -420,46 +420,38 @@ void xen_teardown_timer(int cpu) if (evt->irq >= 0) { unbind_from_irqhandler(evt->irq, NULL); evt->irq = -1; - kfree(per_cpu(xen_clock_events, cpu).name); - per_cpu(xen_clock_events, cpu).name = NULL; } } void xen_setup_timer(int cpu) { - char *name; - struct clock_event_device *evt; + struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); + struct clock_event_device *evt = &xevt->evt; int irq; - evt = &per_cpu(xen_clock_events, cpu).evt; WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); if (evt->irq >= 0) xen_teardown_timer(cpu); printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); - name = kasprintf(GFP_KERNEL, "timer%d", cpu); - if (!name) - name = "<timer kasprintf failed>"; + snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu); irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, - name, NULL); + xevt->name, NULL); (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); memcpy(evt, xen_clockevent, sizeof(*evt)); evt->cpumask = cpumask_of(cpu); evt->irq = irq; - per_cpu(xen_clock_events, cpu).name = name; } void xen_setup_cpu_clockevents(void) { - BUG_ON(preemptible()); - clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); } diff --git a/block/blk-core.c b/block/blk-core.c index 30f6153..3ad4055 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -473,6 +473,25 @@ void blk_queue_bypass_end(struct request_queue *q) } EXPORT_SYMBOL_GPL(blk_queue_bypass_end); +void blk_set_queue_dying(struct request_queue *q) +{ + queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + + if (q->mq_ops) + blk_mq_wake_waiters(q); + else { + struct request_list *rl; + + blk_queue_for_each_rl(rl, q) { + if (rl->rq_pool) { + wake_up(&rl->wait[BLK_RW_SYNC]); + wake_up(&rl->wait[BLK_RW_ASYNC]); + } + } + } +} +EXPORT_SYMBOL_GPL(blk_set_queue_dying); + /** * blk_cleanup_queue - shutdown a request queue * @q: request queue to shutdown @@ -486,7 +505,7 @@ void blk_cleanup_queue(struct request_queue *q) /* mark @q DYING, no new request or merges will be allowed afterwards */ mutex_lock(&q->sysfs_lock); - queue_flag_set_unlocked(QUEUE_FLAG_DYING, q); + blk_set_queue_dying(q); spin_lock_irq(lock); /* diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 32e8dbb..60c9d4a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c @@ -68,9 +68,9 @@ bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) } /* - * Wakeup all potentially sleeping on normal (non-reserved) tags + * Wakeup all potentially sleeping on tags */ -static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) +void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) { struct blk_mq_bitmap_tags *bt; int i, wake_index; @@ -85,6 +85,12 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags) wake_index = bt_index_inc(wake_index); } + + if (include_reserve) { + bt = &tags->breserved_tags; + if (waitqueue_active(&bt->bs[0].wait)) + wake_up(&bt->bs[0].wait); + } } /* @@ -100,7 +106,7 @@ void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) atomic_dec(&tags->active_queues); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); } /* @@ -584,7 +590,7 @@ int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) * static and should never need resizing. */ bt_update_count(&tags->bitmap_tags, tdepth); - blk_mq_tag_wakeup_all(tags); + blk_mq_tag_wakeup_all(tags, false); return 0; } diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index 6206ed1..a6fa0fc 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h @@ -54,6 +54,7 @@ extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern void blk_mq_tag_init_last_tag(struct blk_mq_tags *tags, unsigned int *last_tag); extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); +extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); enum { BLK_MQ_TAG_CACHE_MIN = 1, diff --git a/block/blk-mq.c b/block/blk-mq.c index da1ab56..2f95747 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -107,7 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref) wake_up_all(&q->mq_freeze_wq); } -static void blk_mq_freeze_queue_start(struct request_queue *q) +void blk_mq_freeze_queue_start(struct request_queue *q) { bool freeze; @@ -120,6 +120,7 @@ static void blk_mq_freeze_queue_start(struct request_queue *q) blk_mq_run_queues(q, false); } } +EXPORT_SYMBOL_GPL(blk_mq_freeze_queue_start); static void blk_mq_freeze_queue_wait(struct request_queue *q) { @@ -136,7 +137,7 @@ void blk_mq_freeze_queue(struct request_queue *q) blk_mq_freeze_queue_wait(q); } -static void blk_mq_unfreeze_queue(struct request_queue *q) +void blk_mq_unfreeze_queue(struct request_queue *q) { bool wake; @@ -149,6 +150,24 @@ static void blk_mq_unfreeze_queue(struct request_queue *q) wake_up_all(&q->mq_freeze_wq); } } +EXPORT_SYMBOL_GPL(blk_mq_unfreeze_queue); + +void blk_mq_wake_waiters(struct request_queue *q) +{ + struct blk_mq_hw_ctx *hctx; + unsigned int i; + + queue_for_each_hw_ctx(q, hctx, i) + if (blk_mq_hw_queue_mapped(hctx)) + blk_mq_tag_wakeup_all(hctx->tags, true); + + /* + * If we are called because the queue has now been marked as + * dying, we need to ensure that processes currently waiting on + * the queue are notified as well. + */ + wake_up_all(&q->mq_freeze_wq); +} bool blk_mq_can_queue(struct blk_mq_hw_ctx *hctx) { @@ -258,8 +277,10 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, ctx = alloc_data.ctx; } blk_mq_put_ctx(ctx); - if (!rq) + if (!rq) { + blk_mq_queue_exit(q); return ERR_PTR(-EWOULDBLOCK); + } return rq; } EXPORT_SYMBOL(blk_mq_alloc_request); @@ -383,6 +404,12 @@ void blk_mq_complete_request(struct request *rq) } EXPORT_SYMBOL(blk_mq_complete_request); +int blk_mq_request_started(struct request *rq) +{ + return test_bit(REQ_ATOM_STARTED, &rq->atomic_flags); +} +EXPORT_SYMBOL_GPL(blk_mq_request_started); + void blk_mq_start_request(struct request *rq) { struct request_queue *q = rq->q; @@ -500,12 +527,38 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head) } EXPORT_SYMBOL(blk_mq_add_to_requeue_list); +void blk_mq_cancel_requeue_work(struct request_queue *q) +{ + cancel_work_sync(&q->requeue_work); +} +EXPORT_SYMBOL_GPL(blk_mq_cancel_requeue_work); + void blk_mq_kick_requeue_list(struct request_queue *q) { kblockd_schedule_work(&q->requeue_work); } EXPORT_SYMBOL(blk_mq_kick_requeue_list); +void blk_mq_abort_requeue_list(struct request_queue *q) +{ + unsigned long flags; + LIST_HEAD(rq_list); + + spin_lock_irqsave(&q->requeue_lock, flags); + list_splice_init(&q->requeue_list, &rq_list); + spin_unlock_irqrestore(&q->requeue_lock, flags); + + while (!list_empty(&rq_list)) { + struct request *rq; + + rq = list_first_entry(&rq_list, struct request, queuelist); + list_del_init(&rq->queuelist); + rq->errors = -EIO; + blk_mq_end_request(rq, rq->errors); + } +} +EXPORT_SYMBOL(blk_mq_abort_requeue_list); + static inline bool is_flush_request(struct request *rq, struct blk_flush_queue *fq, unsigned int tag) { @@ -566,13 +619,24 @@ void blk_mq_rq_timed_out(struct request *req, bool reserved) break; } } - + static void blk_mq_check_expired(struct blk_mq_hw_ctx *hctx, struct request *rq, void *priv, bool reserved) { struct blk_mq_timeout_data *data = priv; - if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) + if (!test_bit(REQ_ATOM_STARTED, &rq->atomic_flags)) { + /* + * If a request wasn't started before the queue was + * marked dying, kill it here or it'll go unnoticed. + */ + if (unlikely(blk_queue_dying(rq->q))) { + rq->errors = -EIO; + blk_mq_complete_request(rq); + } + return; + } + if (rq->cmd_flags & REQ_NO_TIMEOUT) return; if (time_after_eq(jiffies, rq->deadline)) { @@ -1601,7 +1665,6 @@ static int blk_mq_init_hctx(struct request_queue *q, hctx->queue = q; hctx->queue_num = hctx_idx; hctx->flags = set->flags; - hctx->cmd_size = set->cmd_size; blk_mq_init_cpu_notifier(&hctx->cpu_notifier, blk_mq_hctx_notify, hctx); diff --git a/block/blk-mq.h b/block/blk-mq.h index 206230e..4f4f943 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -32,6 +32,7 @@ void blk_mq_free_queue(struct request_queue *q); void blk_mq_clone_flush_request(struct request *flush_rq, struct request *orig_rq); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); +void blk_mq_wake_waiters(struct request_queue *q); /* * CPU hotplug helpers diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 56c0258..246dfb1 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -190,6 +190,9 @@ void blk_add_timer(struct request *req) struct request_queue *q = req->q; unsigned long expiry; + if (req->cmd_flags & REQ_NO_TIMEOUT) + return; + /* blk-mq has its own handler, so we don't need ->rq_timed_out_fn */ if (!q->mq_ops && !q->rq_timed_out_fn) return; diff --git a/crypto/aes_generic.c b/crypto/aes_generic.c index 9b3c54c..3dd1011 100644 --- a/crypto/aes_generic.c +++ b/crypto/aes_generic.c @@ -1475,3 +1475,4 @@ module_exit(aes_fini); MODULE_DESCRIPTION("Rijndael (AES) Cipher Algorithm"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_ALIAS_CRYPTO("aes"); +MODULE_ALIAS_CRYPTO("aes-generic"); diff --git a/crypto/ansi_cprng.c b/crypto/ansi_cprng.c index b4485a1..6f5bebc 100644 --- a/crypto/ansi_cprng.c +++ b/crypto/ansi_cprng.c @@ -477,3 +477,4 @@ MODULE_PARM_DESC(dbg, "Boolean to enable debugging (0/1 == off/on)"); module_init(prng_mod_init); module_exit(prng_mod_fini); MODULE_ALIAS_CRYPTO("stdrng"); +MODULE_ALIAS_CRYPTO("ansi_cprng"); diff --git a/crypto/blowfish_generic.c b/crypto/blowfish_generic.c index 7bd71f0..87b392a 100644 --- a/crypto/blowfish_generic.c +++ b/crypto/blowfish_generic.c @@ -139,3 +139,4 @@ module_exit(blowfish_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Blowfish Cipher Algorithm"); MODULE_ALIAS_CRYPTO("blowfish"); +MODULE_ALIAS_CRYPTO("blowfish-generic"); diff --git a/crypto/camellia_generic.c b/crypto/camellia_generic.c index 1b74c5a..a02286b 100644 --- a/crypto/camellia_generic.c +++ b/crypto/camellia_generic.c @@ -1099,3 +1099,4 @@ module_exit(camellia_fini); MODULE_DESCRIPTION("Camellia Cipher Algorithm"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("camellia"); +MODULE_ALIAS_CRYPTO("camellia-generic"); diff --git a/crypto/cast5_generic.c b/crypto/cast5_generic.c index 84c86db..df5c726 100644 --- a/crypto/cast5_generic.c +++ b/crypto/cast5_generic.c @@ -550,3 +550,4 @@ module_exit(cast5_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast5 Cipher Algorithm"); MODULE_ALIAS_CRYPTO("cast5"); +MODULE_ALIAS_CRYPTO("cast5-generic"); diff --git a/crypto/cast6_generic.c b/crypto/cast6_generic.c index f408f0b..058c8d7 100644 --- a/crypto/cast6_generic.c +++ b/crypto/cast6_generic.c @@ -292,3 +292,4 @@ module_exit(cast6_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cast6 Cipher Algorithm"); MODULE_ALIAS_CRYPTO("cast6"); +MODULE_ALIAS_CRYPTO("cast6-generic"); diff --git a/crypto/crc32c_generic.c b/crypto/crc32c_generic.c index 2a06202..06f1b60 100644 --- a/crypto/crc32c_generic.c +++ b/crypto/crc32c_generic.c @@ -171,4 +171,5 @@ MODULE_AUTHOR("Clay Haapala <chaapala@cisco.com>"); MODULE_DESCRIPTION("CRC32c (Castagnoli) calculations wrapper for lib/crc32c"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crc32c"); +MODULE_ALIAS_CRYPTO("crc32c-generic"); MODULE_SOFTDEP("pre: crc32c"); diff --git a/crypto/crct10dif_generic.c b/crypto/crct10dif_generic.c index 08bb4f5..c1229614 100644 --- a/crypto/crct10dif_generic.c +++ b/crypto/crct10dif_generic.c @@ -125,3 +125,4 @@ MODULE_AUTHOR("Tim Chen <tim.c.chen@linux.intel.com>"); MODULE_DESCRIPTION("T10 DIF CRC calculation."); MODULE_LICENSE("GPL"); MODULE_ALIAS_CRYPTO("crct10dif"); +MODULE_ALIAS_CRYPTO("crct10dif-generic"); diff --git a/crypto/des_generic.c b/crypto/des_generic.c index 4291294..a717205 100644 --- a/crypto/des_generic.c +++ b/crypto/des_generic.c @@ -983,8 +983,6 @@ static struct crypto_alg des_algs[2] = { { .cia_decrypt = des3_ede_decrypt } } } }; -MODULE_ALIAS_CRYPTO("des3_ede"); - static int __init des_generic_mod_init(void) { return crypto_register_algs(des_algs, ARRAY_SIZE(des_algs)); @@ -1001,4 +999,7 @@ module_exit(des_generic_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("DES & Triple DES EDE Cipher Algorithms"); MODULE_AUTHOR("Dag Arne Osvik <da@osvik.no>"); -MODULE_ALIAS("des"); +MODULE_ALIAS_CRYPTO("des"); +MODULE_ALIAS_CRYPTO("des-generic"); +MODULE_ALIAS_CRYPTO("des3_ede"); +MODULE_ALIAS_CRYPTO("des3_ede-generic"); diff --git a/crypto/ghash-generic.c b/crypto/ghash-generic.c index 4e97fae..bac7099 100644 --- a/crypto/ghash-generic.c +++ b/crypto/ghash-generic.c @@ -173,3 +173,4 @@ module_exit(ghash_mod_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("GHASH Message Digest Algorithm"); MODULE_ALIAS_CRYPTO("ghash"); +MODULE_ALIAS_CRYPTO("ghash-generic"); diff --git a/crypto/krng.c b/crypto/krng.c index 67c88b3..0224841 100644 --- a/crypto/krng.c +++ b/crypto/krng.c @@ -63,3 +63,4 @@ module_exit(krng_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Kernel Random Number Generator"); MODULE_ALIAS_CRYPTO("stdrng"); +MODULE_ALIAS_CRYPTO("krng"); diff --git a/crypto/salsa20_generic.c b/crypto/salsa20_generic.c index 3d0f9df..f550b5d 100644 --- a/crypto/salsa20_generic.c +++ b/crypto/salsa20_generic.c @@ -249,3 +249,4 @@ module_exit(salsa20_generic_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Salsa20 stream cipher algorithm"); MODULE_ALIAS_CRYPTO("salsa20"); +MODULE_ALIAS_CRYPTO("salsa20-generic"); diff --git a/crypto/serpent_generic.c b/crypto/serpent_generic.c index a53b5e2..94970a7 100644 --- a/crypto/serpent_generic.c +++ b/crypto/serpent_generic.c @@ -667,3 +667,4 @@ MODULE_DESCRIPTION("Serpent and tnepres (kerneli compatible serpent reversed) Ci MODULE_AUTHOR("Dag Arne Osvik <osvik@ii.uib.no>"); MODULE_ALIAS_CRYPTO("tnepres"); MODULE_ALIAS_CRYPTO("serpent"); +MODULE_ALIAS_CRYPTO("serpent-generic"); diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c index 039e58c..a3e50c3 100644 --- a/crypto/sha1_generic.c +++ b/crypto/sha1_generic.c @@ -154,3 +154,4 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm"); MODULE_ALIAS_CRYPTO("sha1"); +MODULE_ALIAS_CRYPTO("sha1-generic"); diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c index 5eb21b1..b001ff5 100644 --- a/crypto/sha256_generic.c +++ b/crypto/sha256_generic.c @@ -385,4 +385,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-224 and SHA-256 Secure Hash Algorithm"); MODULE_ALIAS_CRYPTO("sha224"); +MODULE_ALIAS_CRYPTO("sha224-generic"); MODULE_ALIAS_CRYPTO("sha256"); +MODULE_ALIAS_CRYPTO("sha256-generic"); diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c index 8d0b19e..1c3c376 100644 --- a/crypto/sha512_generic.c +++ b/crypto/sha512_generic.c @@ -289,4 +289,6 @@ MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA-512 and SHA-384 Secure Hash Algorithms"); MODULE_ALIAS_CRYPTO("sha384"); +MODULE_ALIAS_CRYPTO("sha384-generic"); MODULE_ALIAS_CRYPTO("sha512"); +MODULE_ALIAS_CRYPTO("sha512-generic"); diff --git a/crypto/tea.c b/crypto/tea.c index 495be2d..b70b441 100644 --- a/crypto/tea.c +++ b/crypto/tea.c @@ -270,6 +270,7 @@ static void __exit tea_mod_fini(void) crypto_unregister_algs(tea_algs, ARRAY_SIZE(tea_algs)); } +MODULE_ALIAS_CRYPTO("tea"); MODULE_ALIAS_CRYPTO("xtea"); MODULE_ALIAS_CRYPTO("xeta"); diff --git a/crypto/tgr192.c b/crypto/tgr192.c index 6e5651c..321bc6f 100644 --- a/crypto/tgr192.c +++ b/crypto/tgr192.c @@ -676,6 +676,7 @@ static void __exit tgr192_mod_fini(void) crypto_unregister_shashes(tgr_algs, ARRAY_SIZE(tgr_algs)); } +MODULE_ALIAS_CRYPTO("tgr192"); MODULE_ALIAS_CRYPTO("tgr160"); MODULE_ALIAS_CRYPTO("tgr128"); diff --git a/crypto/twofish_generic.c b/crypto/twofish_generic.c index 523ad8c..ebf7a3e 100644 --- a/crypto/twofish_generic.c +++ b/crypto/twofish_generic.c @@ -212,3 +212,4 @@ module_exit(twofish_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION ("Twofish Cipher Algorithm"); MODULE_ALIAS_CRYPTO("twofish"); +MODULE_ALIAS_CRYPTO("twofish-generic"); diff --git a/crypto/wp512.c b/crypto/wp512.c index 0de42eb..7ee5a04 100644 --- a/crypto/wp512.c +++ b/crypto/wp512.c @@ -1167,6 +1167,7 @@ static void __exit wp512_mod_fini(void) crypto_unregister_shashes(wp_algs, ARRAY_SIZE(wp_algs)); } +MODULE_ALIAS_CRYPTO("wp512"); MODULE_ALIAS_CRYPTO("wp384"); MODULE_ALIAS_CRYPTO("wp256"); diff --git a/drivers/Makefile b/drivers/Makefile index 67d2334..527a6da 100644 --- a/drivers/Makefile +++ b/drivers/Makefile @@ -50,7 +50,10 @@ obj-$(CONFIG_RESET_CONTROLLER) += reset/ obj-y += tty/ obj-y += char/ -# gpu/ comes after char for AGP vs DRM startup +# iommu/ comes before gpu as gpu are using iommu controllers +obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ + +# gpu/ comes after char for AGP vs DRM startup and after iommu obj-y += gpu/ obj-$(CONFIG_CONNECTOR) += connector/ @@ -141,7 +144,6 @@ obj-y += clk/ obj-$(CONFIG_MAILBOX) += mailbox/ obj-$(CONFIG_HWSPINLOCK) += hwspinlock/ -obj-$(CONFIG_IOMMU_SUPPORT) += iommu/ obj-$(CONFIG_REMOTEPROC) += remoteproc/ obj-$(CONFIG_RPMSG) += rpmsg/ diff --git a/drivers/acpi/acpi_processor.c b/drivers/acpi/acpi_processor.c index 1fdf5e0..1020b1b 100644 --- a/drivers/acpi/acpi_processor.c +++ b/drivers/acpi/acpi_processor.c @@ -170,7 +170,7 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) acpi_status status; int ret; - if (pr->apic_id == -1) + if (pr->phys_id == -1) return -ENODEV; status = acpi_evaluate_integer(pr->handle, "_STA", NULL, &sta); @@ -180,13 +180,13 @@ static int acpi_processor_hotadd_init(struct acpi_processor *pr) cpu_maps_update_begin(); cpu_hotplug_begin(); - ret = acpi_map_lsapic(pr->handle, pr->apic_id, &pr->id); + ret = acpi_map_cpu(pr->handle, pr->phys_id, &pr->id); if (ret) goto out; ret = arch_register_cpu(pr->id); if (ret) { - acpi_unmap_lsapic(pr->id); + acpi_unmap_cpu(pr->id); goto out; } @@ -215,7 +215,7 @@ static int acpi_processor_get_info(struct acpi_device *device) union acpi_object object = { 0 }; struct acpi_buffer buffer = { sizeof(union acpi_object), &object }; struct acpi_processor *pr = acpi_driver_data(device); - int apic_id, cpu_index, device_declaration = 0; + int phys_id, cpu_index, device_declaration = 0; acpi_status status = AE_OK; static int cpu0_initialized; unsigned long long value; @@ -262,15 +262,18 @@ static int acpi_processor_get_info(struct acpi_device *device) pr->acpi_id = value; } - apic_id = acpi_get_apicid(pr->handle, device_declaration, pr->acpi_id); - if (apic_id < 0) - acpi_handle_debug(pr->handle, "failed to get CPU APIC ID.\n"); - pr->apic_id = apic_id; + phys_id = acpi_get_phys_id(pr->handle, device_declaration, pr->acpi_id); + if (phys_id < 0) + acpi_handle_debug(pr->handle, "failed to get CPU physical ID.\n"); + pr->phys_id = phys_id; - cpu_index = acpi_map_cpuid(pr->apic_id, pr->acpi_id); + cpu_index = acpi_map_cpuid(pr->phys_id, pr->acpi_id); if (!cpu0_initialized && !acpi_has_cpu_in_madt()) { cpu0_initialized = 1; - /* Handle UP system running SMP kernel, with no LAPIC in MADT */ + /* + * Handle UP system running SMP kernel, with no CPU + * entry in MADT + */ if ((cpu_index == -1) && (num_online_cpus() == 1)) cpu_index = 0; } @@ -458,7 +461,7 @@ static void acpi_processor_remove(struct acpi_device *device) /* Remove the CPU. */ arch_unregister_cpu(pr->id); - acpi_unmap_lsapic(pr->id); + acpi_unmap_cpu(pr->id); cpu_hotplug_done(); cpu_maps_update_done(); diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c index c2daa85..c0d44d3 100644 --- a/drivers/acpi/device_pm.c +++ b/drivers/acpi/device_pm.c @@ -257,7 +257,7 @@ int acpi_bus_init_power(struct acpi_device *device) device->power.state = ACPI_STATE_UNKNOWN; if (!acpi_device_is_present(device)) - return 0; + return -ENXIO; result = acpi_device_get_power(device, &state); if (result) diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c index a27d31d..9dcf836 100644 --- a/drivers/acpi/int340x_thermal.c +++ b/drivers/acpi/int340x_thermal.c @@ -14,10 +14,10 @@ #include "internal.h" -#define DO_ENUMERATION 0x01 +#define INT3401_DEVICE 0X01 static const struct acpi_device_id int340x_thermal_device_ids[] = { - {"INT3400", DO_ENUMERATION }, - {"INT3401"}, + {"INT3400"}, + {"INT3401", INT3401_DEVICE}, {"INT3402"}, {"INT3403"}, {"INT3404"}, @@ -34,7 +34,10 @@ static int int340x_thermal_handler_attach(struct acpi_device *adev, const struct acpi_device_id *id) { #if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE) - if (id->driver_data == DO_ENUMERATION) + acpi_create_platform_device(adev); +#elif defined(INTEL_SOC_DTS_THERMAL) || defined(INTEL_SOC_DTS_THERMAL_MODULE) + /* Intel SoC DTS thermal driver needs INT3401 to set IRQ descriptor */ + if (id->driver_data == INT3401_DEVICE) acpi_create_platform_device(adev); #endif return 1; diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index 342942f..02e4839 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -69,7 +69,7 @@ static int map_madt_entry(int type, u32 acpi_id) unsigned long madt_end, entry; static struct acpi_table_madt *madt; static int read_madt; - int apic_id = -1; + int phys_id = -1; /* CPU hardware ID */ if (!read_madt) { if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, @@ -79,7 +79,7 @@ static int map_madt_entry(int type, u32 acpi_id) } if (!madt) - return apic_id; + return phys_id; entry = (unsigned long)madt; madt_end = entry + madt->header.length; @@ -91,18 +91,18 @@ static int map_madt_entry(int type, u32 acpi_id) struct acpi_subtable_header *header = (struct acpi_subtable_header *)entry; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { - if (!map_lapic_id(header, acpi_id, &apic_id)) + if (!map_lapic_id(header, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { - if (!map_x2apic_id(header, type, acpi_id, &apic_id)) + if (!map_x2apic_id(header, type, acpi_id, &phys_id)) break; } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { - if (!map_lsapic_id(header, type, acpi_id, &apic_id)) + if (!map_lsapic_id(header, type, acpi_id, &phys_id)) break; } entry += header->length; } - return apic_id; + return phys_id; } static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) @@ -110,7 +110,7 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct acpi_subtable_header *header; - int apic_id = -1; + int phys_id = -1; if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) goto exit; @@ -126,38 +126,38 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) header = (struct acpi_subtable_header *)obj->buffer.pointer; if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) - map_lapic_id(header, acpi_id, &apic_id); + map_lapic_id(header, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) - map_lsapic_id(header, type, acpi_id, &apic_id); + map_lsapic_id(header, type, acpi_id, &phys_id); else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) - map_x2apic_id(header, type, acpi_id, &apic_id); + map_x2apic_id(header, type, acpi_id, &phys_id); exit: kfree(buffer.pointer); - return apic_id; + return phys_id; } -int acpi_get_apicid(acpi_handle handle, int type, u32 acpi_id) +int acpi_get_phys_id(acpi_handle handle, int type, u32 acpi_id) { - int apic_id; + int phys_id; - apic_id = map_mat_entry(handle, type, acpi_id); - if (apic_id == -1) - apic_id = map_madt_entry(type, acpi_id); + phys_id = map_mat_entry(handle, type, acpi_id); + if (phys_id == -1) + phys_id = map_madt_entry(type, acpi_id); - return apic_id; + return phys_id; } -int acpi_map_cpuid(int apic_id, u32 acpi_id) +int acpi_map_cpuid(int phys_id, u32 acpi_id) { #ifdef CONFIG_SMP int i; #endif - if (apic_id == -1) { + if (phys_id == -1) { /* * On UP processor, there is no _MAT or MADT table. - * So above apic_id is always set to -1. + * So above phys_id is always set to -1. * * BIOS may define multiple CPU handles even for UP processor. * For example, @@ -170,7 +170,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) * Processor (CPU3, 0x03, 0x00000410, 0x06) {} * } * - * Ignores apic_id and always returns 0 for the processor + * Ignores phys_id and always returns 0 for the processor * handle with acpi id 0 if nr_cpu_ids is 1. * This should be the case if SMP tables are not found. * Return -1 for other CPU's handle. @@ -178,28 +178,28 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) if (nr_cpu_ids <= 1 && acpi_id == 0) return acpi_id; else - return apic_id; + return phys_id; } #ifdef CONFIG_SMP for_each_possible_cpu(i) { - if (cpu_physical_id(i) == apic_id) + if (cpu_physical_id(i) == phys_id) return i; } #else /* In UP kernel, only processor 0 is valid */ - if (apic_id == 0) - return apic_id; + if (phys_id == 0) + return phys_id; #endif return -1; } int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) { - int apic_id; + int phys_id; - apic_id = acpi_get_apicid(handle, type, acpi_id); + phys_id = acpi_get_phys_id(handle, type, acpi_id); - return acpi_map_cpuid(apic_id, acpi_id); + return acpi_map_cpuid(phys_id, acpi_id); } EXPORT_SYMBOL_GPL(acpi_get_cpuid); diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c index 16914cc..dc4d896 100644 --- a/drivers/acpi/scan.c +++ b/drivers/acpi/scan.c @@ -1001,7 +1001,7 @@ static void acpi_free_power_resources_lists(struct acpi_device *device) if (device->wakeup.flags.valid) acpi_power_resources_list_free(&device->wakeup.resources); - if (!device->flags.power_manageable) + if (!device->power.flags.power_resources) return; for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) { @@ -1744,10 +1744,8 @@ static void acpi_bus_get_power_flags(struct acpi_device *device) device->power.flags.power_resources) device->power.states[ACPI_STATE_D3_COLD].flags.os_accessible = 1; - if (acpi_bus_init_power(device)) { - acpi_free_power_resources_lists(device); + if (acpi_bus_init_power(device)) device->flags.power_manageable = 0; - } } static void acpi_bus_get_flags(struct acpi_device *device) @@ -2371,13 +2369,18 @@ static void acpi_bus_attach(struct acpi_device *device) /* Skip devices that are not present. */ if (!acpi_device_is_present(device)) { device->flags.visited = false; + device->flags.power_manageable = 0; return; } if (device->handler) goto ok; if (!device->flags.initialized) { - acpi_bus_update_power(device, NULL); + device->flags.power_manageable = + device->power.states[ACPI_STATE_D0].flags.valid; + if (acpi_bus_init_power(device)) + device->flags.power_manageable = 0; + device->flags.initialized = true; } device->flags.visited = false; diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index c72e79d2c5..032db45 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -522,6 +522,16 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "370R4E/370R4V/370R5E/3570RE/370R5V"), }, }, + + { + /* https://bugzilla.redhat.com/show_bug.cgi?id=1163574 */ + .callback = video_disable_native_backlight, + .ident = "Dell XPS15 L521X", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "XPS L521X"), + }, + }, {} }; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index a3a1360..5f60155 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -835,6 +835,7 @@ config PATA_AT32 config PATA_AT91 tristate "PATA support for AT91SAM9260" depends on ARM && SOC_AT91SAM9 + depends on !ARCH_MULTIPLATFORM help This option enables support for IDE devices on the Atmel AT91SAM9260 SoC. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 49f1e68..33bb06e 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -325,7 +325,6 @@ static const struct pci_device_id ahci_pci_tbl[] = { { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */ { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */ - { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */ { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */ diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c index feeb8f1..cbcd208 100644 --- a/drivers/ata/ahci_xgene.c +++ b/drivers/ata/ahci_xgene.c @@ -125,10 +125,11 @@ static int xgene_ahci_restart_engine(struct ata_port *ap) * xgene_ahci_qc_issue - Issue commands to the device * @qc: Command to issue * - * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot - * clear the BSY bit after receiving the PIO setup FIS. This results in the dma - * state machine goes into the CMFatalErrorUpdate state and locks up. By - * restarting the dma engine, it removes the controller out of lock up state. + * Due to Hardware errata for IDENTIFY DEVICE command and PACKET + * command of ATAPI protocol set, the controller cannot clear the BSY bit + * after receiving the PIO setup FIS. This results in the DMA state machine + * going into the CMFatalErrorUpdate state and locks up. By restarting the + * DMA engine, it removes the controller out of lock up state. */ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) { @@ -137,7 +138,8 @@ static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc) struct xgene_ahci_context *ctx = hpriv->plat_data; int rc = 0; - if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA)) + if (unlikely((ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA) || + (ctx->last_cmd[ap->port_no] == ATA_CMD_PACKET))) xgene_ahci_restart_engine(ap); rc = ahci_qc_issue(qc); @@ -188,7 +190,7 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev, * * Clear reserved bit 8 (DEVSLP bit) as we don't support DEVSLP */ - id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); + id[ATA_ID_FEATURE_SUPP] &= cpu_to_le16(~(1 << 8)); return 0; } diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 97683e4..61a9c07 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -2003,7 +2003,7 @@ static void ahci_set_aggressive_devslp(struct ata_port *ap, bool sleep) devslp = readl(port_mmio + PORT_DEVSLP); if (!(devslp & PORT_DEVSLP_DSP)) { - dev_err(ap->host->dev, "port does not support device sleep\n"); + dev_info(ap->host->dev, "port does not support device sleep\n"); return; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5c84fb5..d1a05f9 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4233,10 +4233,33 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { { "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER }, /* devices that don't properly handle queued TRIM commands */ - { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Micron_M550*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT*M550SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Micron_M[56]*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | + ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Crucial_CT*SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + + /* + * As defined, the DRAT (Deterministic Read After Trim) and RZAT + * (Return Zero After Trim) flags in the ATA Command Set are + * unreliable in the sense that they only define what happens if + * the device successfully executed the DSM TRIM command. TRIM + * is only advisory, however, and the device is free to silently + * ignore all or parts of the request. + * + * Whitelist drives that are known to reliably return zeroes + * after TRIM. + */ + + /* + * The intel 510 drive has buggy DRAT/RZAT. Explicitly exclude + * that model before whitelisting all other intel SSDs. + */ + { "INTEL*SSDSC2MH*", NULL, 0, }, + + { "INTEL*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "SSD*INTEL*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "Samsung*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "SAMSUNG*SSD*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, + { "ST[1248][0248]0[FH]*", NULL, ATA_HORKAGE_ZERO_AFTER_TRIM, }, /* * Some WD SATA-I drives spin up and down erratically when the link @@ -4748,7 +4771,10 @@ static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap) return NULL; for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) { - tag = tag < max_queue ? tag : 0; + if (ap->flags & ATA_FLAG_LOWTAG) + tag = i; + else + tag = tag < max_queue ? tag : 0; /* the last tag is reserved for internal command. */ if (tag == ATA_TAG_INTERNAL) diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c index 3dbec89..8d00c26 100644 --- a/drivers/ata/libata-eh.c +++ b/drivers/ata/libata-eh.c @@ -2389,6 +2389,7 @@ const char *ata_get_cmd_descript(u8 command) return NULL; } +EXPORT_SYMBOL_GPL(ata_get_cmd_descript); /** * ata_eh_link_report - report error handling to user diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c index e364e86..6abd17a 100644 --- a/drivers/ata/libata-scsi.c +++ b/drivers/ata/libata-scsi.c @@ -2532,13 +2532,15 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf) rbuf[15] = lowest_aligned; if (ata_id_has_trim(args->id)) { - rbuf[14] |= 0x80; /* TPE */ + rbuf[14] |= 0x80; /* LBPME */ - if (ata_id_has_zero_after_trim(args->id)) - rbuf[14] |= 0x40; /* TPRZ */ + if (ata_id_has_zero_after_trim(args->id) && + dev->horkage & ATA_HORKAGE_ZERO_AFTER_TRIM) { + ata_dev_info(dev, "Enabling discard_zeroes_data\n"); + rbuf[14] |= 0x40; /* LBPRZ */ + } } } - return 0; } diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c index db90aa3..2e86e3b 100644 --- a/drivers/ata/libata-sff.c +++ b/drivers/ata/libata-sff.c @@ -1333,7 +1333,19 @@ void ata_sff_flush_pio_task(struct ata_port *ap) DPRINTK("ENTER\n"); cancel_delayed_work_sync(&ap->sff_pio_task); + + /* + * We wanna reset the HSM state to IDLE. If we do so without + * grabbing the port lock, critical sections protected by it which + * expect the HSM state to stay stable may get surprised. For + * example, we may set IDLE in between the time + * __ata_sff_port_intr() checks for HSM_ST_IDLE and before it calls + * ata_sff_hsm_move() causing ata_sff_hsm_move() to BUG(). + */ + spin_lock_irq(ap->lock); ap->hsm_task_state = HSM_ST_IDLE; + spin_unlock_irq(ap->lock); + ap->sff_pio_task_link = NULL; if (ata_msg_ctl(ap)) diff --git a/drivers/ata/sata_dwc_460ex.c b/drivers/ata/sata_dwc_460ex.c index c7ddef8..8e824817 100644 --- a/drivers/ata/sata_dwc_460ex.c +++ b/drivers/ata/sata_dwc_460ex.c @@ -797,7 +797,7 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) if (err) { dev_err(host_pvt.dwc_dev, "%s: dma_request_interrupts returns" " %d\n", __func__, err); - goto error_out; + return err; } /* Enabe DMA */ @@ -808,11 +808,6 @@ static int dma_dwc_init(struct sata_dwc_device *hsdev, int irq) sata_dma_regs); return 0; - -error_out: - dma_dwc_exit(hsdev); - - return err; } static int sata_dwc_scr_read(struct ata_link *link, unsigned int scr, u32 *val) @@ -1662,7 +1657,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) char *ver = (char *)&versionr; u8 *base = NULL; int err = 0; - int irq, rc; + int irq; struct ata_host *host; struct ata_port_info pi = sata_dwc_port_info[0]; const struct ata_port_info *ppi[] = { &pi, NULL }; @@ -1725,7 +1720,7 @@ static int sata_dwc_probe(struct platform_device *ofdev) if (irq == NO_IRQ) { dev_err(&ofdev->dev, "no SATA DMA irq\n"); err = -ENODEV; - goto error_out; + goto error_iomap; } /* Get physical SATA DMA register base address */ @@ -1734,14 +1729,16 @@ static int sata_dwc_probe(struct platform_device *ofdev) dev_err(&ofdev->dev, "ioremap failed for AHBDMA register" " address\n"); err = -ENODEV; - goto error_out; + goto error_iomap; } /* Save dev for later use in dev_xxx() routines */ host_pvt.dwc_dev = &ofdev->dev; /* Initialize AHB DMAC */ - dma_dwc_init(hsdev, irq); + err = dma_dwc_init(hsdev, irq); + if (err) + goto error_dma_iomap; /* Enable SATA Interrupts */ sata_dwc_enable_interrupts(hsdev); @@ -1759,9 +1756,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) * device discovery process, invoking our port_start() handler & * error_handler() to execute a dummy Softreset EH session */ - rc = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); - - if (rc != 0) + err = ata_host_activate(host, irq, sata_dwc_isr, 0, &sata_dwc_sht); + if (err) dev_err(&ofdev->dev, "failed to activate host"); dev_set_drvdata(&ofdev->dev, host); @@ -1770,7 +1766,8 @@ static int sata_dwc_probe(struct platform_device *ofdev) error_out: /* Free SATA DMA resources */ dma_dwc_exit(hsdev); - +error_dma_iomap: + iounmap((void __iomem *)host_pvt.sata_dma_regs); error_iomap: iounmap(base); error_kmalloc: @@ -1791,6 +1788,7 @@ static int sata_dwc_remove(struct platform_device *ofdev) /* Free SATA DMA resources */ dma_dwc_exit(hsdev); + iounmap((void __iomem *)host_pvt.sata_dma_regs); iounmap(hsdev->reg_base); kfree(hsdev); kfree(host); diff --git a/drivers/ata/sata_sil24.c b/drivers/ata/sata_sil24.c index d81b20d..ea65594 100644 --- a/drivers/ata/sata_sil24.c +++ b/drivers/ata/sata_sil24.c @@ -246,7 +246,7 @@ enum { /* host flags */ SIL24_COMMON_FLAGS = ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | ATA_FLAG_ACPI_SATA | - ATA_FLAG_AN | ATA_FLAG_PMP, + ATA_FLAG_AN | ATA_FLAG_PMP | ATA_FLAG_LOWTAG, SIL24_FLAG_PCIX_IRQ_WOC = (1 << 24), /* IRQ loss errata on PCI-X */ IRQ_STAT_4PORTS = 0xf, diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index ae9f615..aa2224a 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -530,7 +530,7 @@ static int null_add_dev(void) goto out_cleanup_queues; nullb->q = blk_mq_init_queue(&nullb->tag_set); - if (!nullb->q) { + if (IS_ERR(nullb->q)) { rv = -ENOMEM; goto out_cleanup_tags; } diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c index b1d5d87..cb529e9 100644 --- a/drivers/block/nvme-core.c +++ b/drivers/block/nvme-core.c @@ -215,6 +215,7 @@ static void nvme_set_info(struct nvme_cmd_info *cmd, void *ctx, cmd->fn = handler; cmd->ctx = ctx; cmd->aborted = 0; + blk_mq_start_request(blk_mq_rq_from_pdu(cmd)); } /* Special values must be less than 0x1000 */ @@ -431,8 +432,13 @@ static void req_completion(struct nvme_queue *nvmeq, void *ctx, if (unlikely(status)) { if (!(status & NVME_SC_DNR || blk_noretry_request(req)) && (jiffies - req->start_time) < req->timeout) { + unsigned long flags; + blk_mq_requeue_request(req); - blk_mq_kick_requeue_list(req->q); + spin_lock_irqsave(req->q->queue_lock, flags); + if (!blk_queue_stopped(req->q)) + blk_mq_kick_requeue_list(req->q); + spin_unlock_irqrestore(req->q->queue_lock, flags); return; } req->errors = nvme_error_status(status); @@ -664,8 +670,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx, } } - blk_mq_start_request(req); - nvme_set_info(cmd, iod, req_completion); spin_lock_irq(&nvmeq->q_lock); if (req->cmd_flags & REQ_DISCARD) @@ -835,6 +839,7 @@ static int nvme_submit_async_admin_req(struct nvme_dev *dev) if (IS_ERR(req)) return PTR_ERR(req); + req->cmd_flags |= REQ_NO_TIMEOUT; cmd_info = blk_mq_rq_to_pdu(req); nvme_set_info(cmd_info, req, async_req_completion); @@ -1016,14 +1021,19 @@ static void nvme_abort_req(struct request *req) struct nvme_command cmd; if (!nvmeq->qid || cmd_rq->aborted) { + unsigned long flags; + + spin_lock_irqsave(&dev_list_lock, flags); if (work_busy(&dev->reset_work)) - return; + goto out; list_del_init(&dev->node); dev_warn(&dev->pci_dev->dev, "I/O %d QID %d timeout, reset controller\n", req->tag, nvmeq->qid); dev->reset_workfn = nvme_reset_failed_dev; queue_work(nvme_workq, &dev->reset_work); + out: + spin_unlock_irqrestore(&dev_list_lock, flags); return; } @@ -1064,15 +1074,22 @@ static void nvme_cancel_queue_ios(struct blk_mq_hw_ctx *hctx, void *ctx; nvme_completion_fn fn; struct nvme_cmd_info *cmd; - static struct nvme_completion cqe = { - .status = cpu_to_le16(NVME_SC_ABORT_REQ << 1), - }; + struct nvme_completion cqe; + + if (!blk_mq_request_started(req)) + return; cmd = blk_mq_rq_to_pdu(req); if (cmd->ctx == CMD_CTX_CANCELLED) return; + if (blk_queue_dying(req->q)) + cqe.status = cpu_to_le16((NVME_SC_ABORT_REQ | NVME_SC_DNR) << 1); + else + cqe.status = cpu_to_le16(NVME_SC_ABORT_REQ << 1); + + dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid); ctx = cancel_cmd_info(cmd, &fn); @@ -1084,17 +1101,29 @@ static enum blk_eh_timer_return nvme_timeout(struct request *req, bool reserved) struct nvme_cmd_info *cmd = blk_mq_rq_to_pdu(req); struct nvme_queue *nvmeq = cmd->nvmeq; - dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, - nvmeq->qid); - if (nvmeq->dev->initialized) - nvme_abort_req(req); - /* * The aborted req will be completed on receiving the abort req. * We enable the timer again. If hit twice, it'll cause a device reset, * as the device then is in a faulty state. */ - return BLK_EH_RESET_TIMER; + int ret = BLK_EH_RESET_TIMER; + + dev_warn(nvmeq->q_dmadev, "Timeout I/O %d QID %d\n", req->tag, + nvmeq->qid); + + spin_lock_irq(&nvmeq->q_lock); + if (!nvmeq->dev->initialized) { + /* + * Force cancelled command frees the request, which requires we + * return BLK_EH_NOT_HANDLED. + */ + nvme_cancel_queue_ios(nvmeq->hctx, req, nvmeq, reserved); + ret = BLK_EH_NOT_HANDLED; + } else + nvme_abort_req(req); + spin_unlock_irq(&nvmeq->q_lock); + + return ret; } static void nvme_free_queue(struct nvme_queue *nvmeq) @@ -1131,10 +1160,16 @@ static void nvme_free_queues(struct nvme_dev *dev, int lowest) */ static int nvme_suspend_queue(struct nvme_queue *nvmeq) { - int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; + int vector; spin_lock_irq(&nvmeq->q_lock); + if (nvmeq->cq_vector == -1) { + spin_unlock_irq(&nvmeq->q_lock); + return 1; + } + vector = nvmeq->dev->entry[nvmeq->cq_vector].vector; nvmeq->dev->online_queues--; + nvmeq->cq_vector = -1; spin_unlock_irq(&nvmeq->q_lock); irq_set_affinity_hint(vector, NULL); @@ -1169,11 +1204,13 @@ static void nvme_disable_queue(struct nvme_dev *dev, int qid) adapter_delete_sq(dev, qid); adapter_delete_cq(dev, qid); } + if (!qid && dev->admin_q) + blk_mq_freeze_queue_start(dev->admin_q); nvme_clear_queue(nvmeq); } static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, - int depth, int vector) + int depth) { struct device *dmadev = &dev->pci_dev->dev; struct nvme_queue *nvmeq = kzalloc(sizeof(*nvmeq), GFP_KERNEL); @@ -1199,7 +1236,6 @@ static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid, nvmeq->cq_phase = 1; nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride]; nvmeq->q_depth = depth; - nvmeq->cq_vector = vector; nvmeq->qid = qid; dev->queue_count++; dev->queues[qid] = nvmeq; @@ -1244,6 +1280,7 @@ static int nvme_create_queue(struct nvme_queue *nvmeq, int qid) struct nvme_dev *dev = nvmeq->dev; int result; + nvmeq->cq_vector = qid - 1; result = adapter_alloc_cq(dev, qid, nvmeq); if (result < 0) return result; @@ -1355,6 +1392,14 @@ static struct blk_mq_ops nvme_mq_ops = { .timeout = nvme_timeout, }; +static void nvme_dev_remove_admin(struct nvme_dev *dev) +{ + if (dev->admin_q && !blk_queue_dying(dev->admin_q)) { + blk_cleanup_queue(dev->admin_q); + blk_mq_free_tag_set(&dev->admin_tagset); + } +} + static int nvme_alloc_admin_tags(struct nvme_dev *dev) { if (!dev->admin_q) { @@ -1370,21 +1415,20 @@ static int nvme_alloc_admin_tags(struct nvme_dev *dev) return -ENOMEM; dev->admin_q = blk_mq_init_queue(&dev->admin_tagset); - if (!dev->admin_q) { + if (IS_ERR(dev->admin_q)) { blk_mq_free_tag_set(&dev->admin_tagset); return -ENOMEM; } - } + if (!blk_get_queue(dev->admin_q)) { + nvme_dev_remove_admin(dev); + return -ENODEV; + } + } else + blk_mq_unfreeze_queue(dev->admin_q); return 0; } -static void nvme_free_admin_tags(struct nvme_dev *dev) -{ - if (dev->admin_q) - blk_mq_free_tag_set(&dev->admin_tagset); -} - static int nvme_configure_admin_queue(struct nvme_dev *dev) { int result; @@ -1416,7 +1460,7 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) nvmeq = dev->queues[0]; if (!nvmeq) { - nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH, 0); + nvmeq = nvme_alloc_queue(dev, 0, NVME_AQ_DEPTH); if (!nvmeq) return -ENOMEM; } @@ -1439,18 +1483,13 @@ static int nvme_configure_admin_queue(struct nvme_dev *dev) if (result) goto free_nvmeq; - result = nvme_alloc_admin_tags(dev); - if (result) - goto free_nvmeq; - + nvmeq->cq_vector = 0; result = queue_request_irq(dev, nvmeq, nvmeq->irqname); if (result) - goto free_tags; + goto free_nvmeq; return result; - free_tags: - nvme_free_admin_tags(dev); free_nvmeq: nvme_free_queues(dev, 0); return result; @@ -1944,7 +1983,7 @@ static void nvme_create_io_queues(struct nvme_dev *dev) unsigned i; for (i = dev->queue_count; i <= dev->max_qid; i++) - if (!nvme_alloc_queue(dev, i, dev->q_depth, i - 1)) + if (!nvme_alloc_queue(dev, i, dev->q_depth)) break; for (i = dev->online_queues; i <= dev->queue_count - 1; i++) @@ -2235,13 +2274,18 @@ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev) break; if (!schedule_timeout(ADMIN_TIMEOUT) || fatal_signal_pending(current)) { + /* + * Disable the controller first since we can't trust it + * at this point, but leave the admin queue enabled + * until all queue deletion requests are flushed. + * FIXME: This may take a while if there are more h/w + * queues than admin tags. + */ set_current_state(TASK_RUNNING); - nvme_disable_ctrl(dev, readq(&dev->bar->cap)); - nvme_disable_queue(dev, 0); - - send_sig(SIGKILL, dq->worker->task, 1); + nvme_clear_queue(dev->queues[0]); flush_kthread_worker(dq->worker); + nvme_disable_queue(dev, 0); return; } } @@ -2318,7 +2362,6 @@ static void nvme_del_queue_start(struct kthread_work *work) { struct nvme_queue *nvmeq = container_of(work, struct nvme_queue, cmdinfo.work); - allow_signal(SIGKILL); if (nvme_delete_sq(nvmeq)) nvme_del_queue_end(nvmeq); } @@ -2376,6 +2419,34 @@ static void nvme_dev_list_remove(struct nvme_dev *dev) kthread_stop(tmp); } +static void nvme_freeze_queues(struct nvme_dev *dev) +{ + struct nvme_ns *ns; + + list_for_each_entry(ns, &dev->namespaces, list) { + blk_mq_freeze_queue_start(ns->queue); + + spin_lock(ns->queue->queue_lock); + queue_flag_set(QUEUE_FLAG_STOPPED, ns->queue); + spin_unlock(ns->queue->queue_lock); + + blk_mq_cancel_requeue_work(ns->queue); + blk_mq_stop_hw_queues(ns->queue); + } +} + +static void nvme_unfreeze_queues(struct nvme_dev *dev) +{ + struct nvme_ns *ns; + + list_for_each_entry(ns, &dev->namespaces, list) { + queue_flag_clear_unlocked(QUEUE_FLAG_STOPPED, ns->queue); + blk_mq_unfreeze_queue(ns->queue); + blk_mq_start_stopped_hw_queues(ns->queue, true); + blk_mq_kick_requeue_list(ns->queue); + } +} + static void nvme_dev_shutdown(struct nvme_dev *dev) { int i; @@ -2384,8 +2455,10 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) dev->initialized = 0; nvme_dev_list_remove(dev); - if (dev->bar) + if (dev->bar) { + nvme_freeze_queues(dev); csts = readl(&dev->bar->csts); + } if (csts & NVME_CSTS_CFS || !(csts & NVME_CSTS_RDY)) { for (i = dev->queue_count - 1; i >= 0; i--) { struct nvme_queue *nvmeq = dev->queues[i]; @@ -2400,12 +2473,6 @@ static void nvme_dev_shutdown(struct nvme_dev *dev) nvme_dev_unmap(dev); } -static void nvme_dev_remove_admin(struct nvme_dev *dev) -{ - if (dev->admin_q && !blk_queue_dying(dev->admin_q)) - blk_cleanup_queue(dev->admin_q); -} - static void nvme_dev_remove(struct nvme_dev *dev) { struct nvme_ns *ns; @@ -2413,8 +2480,10 @@ static void nvme_dev_remove(struct nvme_dev *dev) list_for_each_entry(ns, &dev->namespaces, list) { if (ns->disk->flags & GENHD_FL_UP) del_gendisk(ns->disk); - if (!blk_queue_dying(ns->queue)) + if (!blk_queue_dying(ns->queue)) { + blk_mq_abort_requeue_list(ns->queue); blk_cleanup_queue(ns->queue); + } } } @@ -2495,6 +2564,7 @@ static void nvme_free_dev(struct kref *kref) nvme_free_namespaces(dev); nvme_release_instance(dev); blk_mq_free_tag_set(&dev->tagset); + blk_put_queue(dev->admin_q); kfree(dev->queues); kfree(dev->entry); kfree(dev); @@ -2591,15 +2661,20 @@ static int nvme_dev_start(struct nvme_dev *dev) } nvme_init_queue(dev->queues[0], 0); + result = nvme_alloc_admin_tags(dev); + if (result) + goto disable; result = nvme_setup_io_queues(dev); if (result) - goto disable; + goto free_tags; nvme_set_irq_hints(dev); return result; + free_tags: + nvme_dev_remove_admin(dev); disable: nvme_disable_queue(dev, 0); nvme_dev_list_remove(dev); @@ -2639,6 +2714,9 @@ static int nvme_dev_resume(struct nvme_dev *dev) dev->reset_workfn = nvme_remove_disks; queue_work(nvme_workq, &dev->reset_work); spin_unlock(&dev_list_lock); + } else { + nvme_unfreeze_queues(dev); + nvme_set_irq_hints(dev); } dev->initialized = 1; return 0; @@ -2776,11 +2854,10 @@ static void nvme_remove(struct pci_dev *pdev) pci_set_drvdata(pdev, NULL); flush_work(&dev->reset_work); misc_deregister(&dev->miscdev); - nvme_dev_remove(dev); nvme_dev_shutdown(dev); + nvme_dev_remove(dev); nvme_dev_remove_admin(dev); nvme_free_queues(dev, 0); - nvme_free_admin_tags(dev); nvme_release_prp_pools(dev); kref_put(&dev->kref, nvme_free_dev); } diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c index 7ef7c09..cdfbd21 100644 --- a/drivers/block/virtio_blk.c +++ b/drivers/block/virtio_blk.c @@ -638,7 +638,7 @@ static int virtblk_probe(struct virtio_device *vdev) goto out_put_disk; q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set); - if (!q) { + if (IS_ERR(q)) { err = -ENOMEM; goto out_free_tags; } diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 860da40..0ce5e2d 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c @@ -1312,6 +1312,9 @@ static int cci_probe(void) if (!np) return -ENODEV; + if (!of_device_is_available(np)) + return -ENODEV; + cci_config = of_match_node(arm_cci_matches, np)->data; if (!cci_config) return -ENODEV; diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c index fd5a5e8..982b963 100644 --- a/drivers/char/ipmi/ipmi_ssif.c +++ b/drivers/char/ipmi/ipmi_ssif.c @@ -969,7 +969,8 @@ static void sender(void *send_info, do_gettimeofday(&t); pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n", - msg->data[0], msg->data[1], t.tv_sec, t.tv_usec); + msg->data[0], msg->data[1], + (long) t.tv_sec, (long) t.tv_usec); } } diff --git a/drivers/clk/at91/clk-slow.c b/drivers/clk/at91/clk-slow.c index 32f7c1b..2f13bd5 100644 --- a/drivers/clk/at91/clk-slow.c +++ b/drivers/clk/at91/clk-slow.c @@ -70,6 +70,7 @@ struct clk_sam9x5_slow { #define to_clk_sam9x5_slow(hw) container_of(hw, struct clk_sam9x5_slow, hw) +static struct clk *slow_clk; static int clk_slow_osc_prepare(struct clk_hw *hw) { @@ -357,6 +358,8 @@ at91_clk_register_sam9x5_slow(void __iomem *sckcr, clk = clk_register(NULL, &slowck->hw); if (IS_ERR(clk)) kfree(slowck); + else + slow_clk = clk; return clk; } @@ -433,6 +436,8 @@ at91_clk_register_sam9260_slow(struct at91_pmc *pmc, clk = clk_register(NULL, &slowck->hw); if (IS_ERR(clk)) kfree(slowck); + else + slow_clk = clk; return clk; } @@ -465,3 +470,25 @@ void __init of_at91sam9260_clk_slow_setup(struct device_node *np, of_clk_add_provider(np, of_clk_src_simple_get, clk); } + +/* + * FIXME: All slow clk users are not properly claiming it (get + prepare + + * enable) before using it. + * If all users properly claiming this clock decide that they don't need it + * anymore (or are removed), it is disabled while faulty users are still + * requiring it, and the system hangs. + * Prevent this clock from being disabled until all users are properly + * requesting it. + * Once this is done we should remove this function and the slow_clk variable. + */ +static int __init of_at91_clk_slow_retain(void) +{ + if (!slow_clk) + return 0; + + __clk_get(slow_clk); + clk_prepare_enable(slow_clk); + + return 0; +} +arch_initcall(of_at91_clk_slow_retain); diff --git a/drivers/clk/berlin/bg2q.c b/drivers/clk/berlin/bg2q.c index 21784e4..440ef81 100644 --- a/drivers/clk/berlin/bg2q.c +++ b/drivers/clk/berlin/bg2q.c @@ -285,7 +285,6 @@ static const struct berlin2_gate_data bg2q_gates[] __initconst = { { "pbridge", "perif", 15, CLK_IGNORE_UNUSED }, { "sdio", "perif", 16, CLK_IGNORE_UNUSED }, { "nfc", "perif", 18 }, - { "smemc", "perif", 19 }, { "pcie", "perif", 22 }, }; diff --git a/drivers/clk/clk-ppc-corenet.c b/drivers/clk/clk-ppc-corenet.c index b6e6c85..0a47d6f 100644 --- a/drivers/clk/clk-ppc-corenet.c +++ b/drivers/clk/clk-ppc-corenet.c @@ -291,7 +291,7 @@ static const struct of_device_id ppc_clk_ids[] __initconst = { {} }; -static struct platform_driver ppc_corenet_clk_driver __initdata = { +static struct platform_driver ppc_corenet_clk_driver = { .driver = { .name = "ppc_corenet_clock", .of_match_table = ppc_clk_ids, diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c index f4963b7..d48ac71 100644 --- a/drivers/clk/clk.c +++ b/drivers/clk/clk.c @@ -1366,7 +1366,7 @@ static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) new_rate = clk->ops->determine_rate(clk->hw, rate, &best_parent_rate, &parent_hw); - parent = parent_hw->clk; + parent = parent_hw ? parent_hw->clk : NULL; } else if (clk->ops->round_rate) { new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate); diff --git a/drivers/clk/rockchip/clk-cpu.c b/drivers/clk/rockchip/clk-cpu.c index 75c8c45..8539c4f 100644 --- a/drivers/clk/rockchip/clk-cpu.c +++ b/drivers/clk/rockchip/clk-cpu.c @@ -124,10 +124,11 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, { const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; unsigned long alt_prate, alt_div; + unsigned long flags; alt_prate = clk_get_rate(cpuclk->alt_parent); - spin_lock(cpuclk->lock); + spin_lock_irqsave(cpuclk->lock, flags); /* * If the old parent clock speed is less than the clock speed @@ -164,7 +165,7 @@ static int rockchip_cpuclk_pre_rate_change(struct rockchip_cpuclk *cpuclk, cpuclk->reg_base + reg_data->core_reg); } - spin_unlock(cpuclk->lock); + spin_unlock_irqrestore(cpuclk->lock, flags); return 0; } @@ -173,6 +174,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, { const struct rockchip_cpuclk_reg_data *reg_data = cpuclk->reg_data; const struct rockchip_cpuclk_rate_table *rate; + unsigned long flags; rate = rockchip_get_cpuclk_settings(cpuclk, ndata->new_rate); if (!rate) { @@ -181,7 +183,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, return -EINVAL; } - spin_lock(cpuclk->lock); + spin_lock_irqsave(cpuclk->lock, flags); if (ndata->old_rate < ndata->new_rate) rockchip_cpuclk_set_dividers(cpuclk, rate); @@ -201,7 +203,7 @@ static int rockchip_cpuclk_post_rate_change(struct rockchip_cpuclk *cpuclk, if (ndata->old_rate > ndata->new_rate) rockchip_cpuclk_set_dividers(cpuclk, rate); - spin_unlock(cpuclk->lock); + spin_unlock_irqrestore(cpuclk->lock, flags); return 0; } diff --git a/drivers/clk/rockchip/clk-rk3188.c b/drivers/clk/rockchip/clk-rk3188.c index c540789..7eb684c 100644 --- a/drivers/clk/rockchip/clk-rk3188.c +++ b/drivers/clk/rockchip/clk-rk3188.c @@ -210,6 +210,17 @@ PNAME(mux_sclk_hsadc_p) = { "hsadc_src", "hsadc_frac", "ext_hsadc" }; PNAME(mux_mac_p) = { "gpll", "dpll" }; PNAME(mux_sclk_macref_p) = { "mac_src", "ext_rmii" }; +static struct rockchip_pll_clock rk3066_pll_clks[] __initdata = { + [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), + RK2928_MODE_CON, 0, 5, 0, rk3188_pll_rates), + [dpll] = PLL(pll_rk3066, PLL_DPLL, "dpll", mux_pll_p, 0, RK2928_PLL_CON(4), + RK2928_MODE_CON, 4, 4, 0, NULL), + [cpll] = PLL(pll_rk3066, PLL_CPLL, "cpll", mux_pll_p, 0, RK2928_PLL_CON(8), + RK2928_MODE_CON, 8, 6, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates), + [gpll] = PLL(pll_rk3066, PLL_GPLL, "gpll", mux_pll_p, 0, RK2928_PLL_CON(12), + RK2928_MODE_CON, 12, 7, ROCKCHIP_PLL_SYNC_RATE, rk3188_pll_rates), +}; + static struct rockchip_pll_clock rk3188_pll_clks[] __initdata = { [apll] = PLL(pll_rk3066, PLL_APLL, "apll", mux_pll_p, 0, RK2928_PLL_CON(0), RK2928_MODE_CON, 0, 6, 0, rk3188_pll_rates), @@ -427,11 +438,11 @@ static struct rockchip_clk_branch common_clk_branches[] __initdata = { /* hclk_peri gates */ GATE(0, "hclk_peri_axi_matrix", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 0, GFLAGS), GATE(0, "hclk_peri_ahb_arbi", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 6, GFLAGS), - GATE(0, "hclk_emem_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 7, GFLAGS), + GATE(0, "hclk_emem_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 7, GFLAGS), GATE(HCLK_EMAC, "hclk_emac", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 0, GFLAGS), GATE(HCLK_NANDC0, "hclk_nandc0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 9, GFLAGS), - GATE(0, "hclk_usb_peri", "hclk_peri", 0, RK2928_CLKGATE_CON(4), 5, GFLAGS), - GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 13, GFLAGS), + GATE(0, "hclk_usb_peri", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(4), 5, GFLAGS), + GATE(HCLK_OTG0, "hclk_usbotg0", "hclk_peri", CLK_IGNORE_UNUSED, RK2928_CLKGATE_CON(5), 13, GFLAGS), GATE(HCLK_HSADC, "hclk_hsadc", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 5, GFLAGS), GATE(HCLK_PIDF, "hclk_pidfilter", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 6, GFLAGS), GATE(HCLK_SDMMC, "hclk_sdmmc", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 10, GFLAGS), @@ -592,7 +603,8 @@ static struct rockchip_clk_branch rk3066a_clk_branches[] __initdata = { GATE(0, "hclk_cif1", "hclk_cpu", 0, RK2928_CLKGATE_CON(6), 6, GFLAGS), GATE(0, "hclk_hdmi", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), - GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(5), 14, GFLAGS), + GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED, + RK2928_CLKGATE_CON(5), 14, GFLAGS), GATE(0, "aclk_cif1", "aclk_vio1", 0, RK2928_CLKGATE_CON(6), 7, GFLAGS), @@ -680,7 +692,8 @@ static struct rockchip_clk_branch rk3188_clk_branches[] __initdata = { GATE(0, "hclk_imem0", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 14, GFLAGS), GATE(0, "hclk_imem1", "hclk_cpu", 0, RK2928_CLKGATE_CON(4), 15, GFLAGS), - GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 3, GFLAGS), + GATE(HCLK_OTG1, "hclk_usbotg1", "hclk_peri", CLK_IGNORE_UNUSED, + RK2928_CLKGATE_CON(7), 3, GFLAGS), GATE(HCLK_HSIC, "hclk_hsic", "hclk_peri", 0, RK2928_CLKGATE_CON(7), 4, GFLAGS), GATE(PCLK_TIMER3, "pclk_timer3", "pclk_cpu", 0, RK2928_CLKGATE_CON(7), 9, GFLAGS), @@ -735,8 +748,8 @@ static void __init rk3188_common_clk_init(struct device_node *np) static void __init rk3066a_clk_init(struct device_node *np) { rk3188_common_clk_init(np); - rockchip_clk_register_plls(rk3188_pll_clks, - ARRAY_SIZE(rk3188_pll_clks), + rockchip_clk_register_plls(rk3066_pll_clks, + ARRAY_SIZE(rk3066_pll_clks), RK3066_GRF_SOC_STATUS); rockchip_clk_register_branches(rk3066a_clk_branches, ARRAY_SIZE(rk3066a_clk_branches)); diff --git a/drivers/clk/rockchip/clk-rk3288.c b/drivers/clk/rockchip/clk-rk3288.c index ac6be7c..11194b8 100644 --- a/drivers/clk/rockchip/clk-rk3288.c +++ b/drivers/clk/rockchip/clk-rk3288.c @@ -145,20 +145,20 @@ struct rockchip_pll_rate_table rk3288_pll_rates[] = { } static struct rockchip_cpuclk_rate_table rk3288_cpuclk_rates[] __initdata = { - RK3288_CPUCLK_RATE(1800000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE(1704000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE(1608000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE(1512000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE(1416000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE(1200000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE(1008000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 816000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 696000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 600000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 408000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 312000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 216000000, 2, 4, 2, 4, 4), - RK3288_CPUCLK_RATE( 126000000, 2, 4, 2, 4, 4), + RK3288_CPUCLK_RATE(1800000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE(1704000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE(1608000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE(1512000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE(1416000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE(1200000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE(1008000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 816000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 696000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 600000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 408000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 312000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 216000000, 1, 3, 1, 3, 3), + RK3288_CPUCLK_RATE( 126000000, 1, 3, 1, 3, 3), }; static const struct rockchip_cpuclk_reg_data rk3288_cpuclk_data = { diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index 3804785..5c06254 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -1505,7 +1505,6 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dw->regs = chip->regs; chip->dw = dw; - pm_runtime_enable(chip->dev); pm_runtime_get_sync(chip->dev); dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); @@ -1703,7 +1702,6 @@ int dw_dma_remove(struct dw_dma_chip *chip) } pm_runtime_put_sync_suspend(chip->dev); - pm_runtime_disable(chip->dev); return 0; } EXPORT_SYMBOL_GPL(dw_dma_remove); diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index a630161..32ea1ac 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -15,6 +15,7 @@ #include <linux/module.h> #include <linux/device.h> #include <linux/clk.h> +#include <linux/pm_runtime.h> #include <linux/platform_device.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> @@ -185,6 +186,8 @@ static int dw_probe(struct platform_device *pdev) if (err) return err; + pm_runtime_enable(&pdev->dev); + err = dw_dma_probe(chip, pdata); if (err) goto err_dw_dma_probe; @@ -205,6 +208,7 @@ static int dw_probe(struct platform_device *pdev) return 0; err_dw_dma_probe: + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(chip->clk); return err; } @@ -217,6 +221,7 @@ static int dw_remove(struct platform_device *pdev) of_dma_controller_free(pdev->dev.of_node); dw_dma_remove(chip); + pm_runtime_disable(&pdev->dev); clk_disable_unprepare(chip->clk); return 0; diff --git a/drivers/gpio/gpio-crystalcove.c b/drivers/gpio/gpio-crystalcove.c index 55d4803..3d9e08f 100644 --- a/drivers/gpio/gpio-crystalcove.c +++ b/drivers/gpio/gpio-crystalcove.c @@ -272,7 +272,7 @@ static irqreturn_t crystalcove_gpio_irq_handler(int irq, void *data) for (gpio = 0; gpio < CRYSTALCOVE_GPIO_NUM; gpio++) { if (pending & BIT(gpio)) { virq = irq_find_mapping(cg->chip.irqdomain, gpio); - generic_handle_irq(virq); + handle_nested_irq(virq); } } diff --git a/drivers/gpio/gpio-dln2.c b/drivers/gpio/gpio-dln2.c index 978b51e..ce3c155 100644 --- a/drivers/gpio/gpio-dln2.c +++ b/drivers/gpio/gpio-dln2.c @@ -47,13 +47,6 @@ #define DLN2_GPIO_MAX_PINS 32 -struct dln2_irq_work { - struct work_struct work; - struct dln2_gpio *dln2; - int pin; - int type; -}; - struct dln2_gpio { struct platform_device *pdev; struct gpio_chip gpio; @@ -64,10 +57,12 @@ struct dln2_gpio { */ DECLARE_BITMAP(output_enabled, DLN2_GPIO_MAX_PINS); - DECLARE_BITMAP(irqs_masked, DLN2_GPIO_MAX_PINS); - DECLARE_BITMAP(irqs_enabled, DLN2_GPIO_MAX_PINS); - DECLARE_BITMAP(irqs_pending, DLN2_GPIO_MAX_PINS); - struct dln2_irq_work *irq_work; + /* active IRQs - not synced to hardware */ + DECLARE_BITMAP(unmasked_irqs, DLN2_GPIO_MAX_PINS); + /* active IRQS - synced to hardware */ + DECLARE_BITMAP(enabled_irqs, DLN2_GPIO_MAX_PINS); + int irq_type[DLN2_GPIO_MAX_PINS]; + struct mutex irq_lock; }; struct dln2_gpio_pin { @@ -141,16 +136,16 @@ static int dln2_gpio_pin_get_out_val(struct dln2_gpio *dln2, unsigned int pin) return !!ret; } -static void dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, - unsigned int pin, int value) +static int dln2_gpio_pin_set_out_val(struct dln2_gpio *dln2, + unsigned int pin, int value) { struct dln2_gpio_pin_val req = { .pin = cpu_to_le16(pin), .value = value, }; - dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, - sizeof(req)); + return dln2_transfer_tx(dln2->pdev, DLN2_GPIO_PIN_SET_OUT_VAL, &req, + sizeof(req)); } #define DLN2_GPIO_DIRECTION_IN 0 @@ -267,6 +262,13 @@ static int dln2_gpio_direction_input(struct gpio_chip *chip, unsigned offset) static int dln2_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int value) { + struct dln2_gpio *dln2 = container_of(chip, struct dln2_gpio, gpio); + int ret; + + ret = dln2_gpio_pin_set_out_val(dln2, offset, value); + if (ret < 0) + return ret; + return dln2_gpio_set_direction(chip, offset, DLN2_GPIO_DIRECTION_OUT); } @@ -297,36 +299,13 @@ static int dln2_gpio_set_event_cfg(struct dln2_gpio *dln2, unsigned pin, &req, sizeof(req)); } -static void dln2_irq_work(struct work_struct *w) -{ - struct dln2_irq_work *iw = container_of(w, struct dln2_irq_work, work); - struct dln2_gpio *dln2 = iw->dln2; - u8 type = iw->type & DLN2_GPIO_EVENT_MASK; - - if (test_bit(iw->pin, dln2->irqs_enabled)) - dln2_gpio_set_event_cfg(dln2, iw->pin, type, 0); - else - dln2_gpio_set_event_cfg(dln2, iw->pin, DLN2_GPIO_EVENT_NONE, 0); -} - -static void dln2_irq_enable(struct irq_data *irqd) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); - int pin = irqd_to_hwirq(irqd); - - set_bit(pin, dln2->irqs_enabled); - schedule_work(&dln2->irq_work[pin].work); -} - -static void dln2_irq_disable(struct irq_data *irqd) +static void dln2_irq_unmask(struct irq_data *irqd) { struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); int pin = irqd_to_hwirq(irqd); - clear_bit(pin, dln2->irqs_enabled); - schedule_work(&dln2->irq_work[pin].work); + set_bit(pin, dln2->unmasked_irqs); } static void dln2_irq_mask(struct irq_data *irqd) @@ -335,27 +314,7 @@ static void dln2_irq_mask(struct irq_data *irqd) struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); int pin = irqd_to_hwirq(irqd); - set_bit(pin, dln2->irqs_masked); -} - -static void dln2_irq_unmask(struct irq_data *irqd) -{ - struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); - struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); - struct device *dev = dln2->gpio.dev; - int pin = irqd_to_hwirq(irqd); - - if (test_and_clear_bit(pin, dln2->irqs_pending)) { - int irq; - - irq = irq_find_mapping(dln2->gpio.irqdomain, pin); - if (!irq) { - dev_err(dev, "pin %d not mapped to IRQ\n", pin); - return; - } - - generic_handle_irq(irq); - } + clear_bit(pin, dln2->unmasked_irqs); } static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) @@ -366,19 +325,19 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) switch (type) { case IRQ_TYPE_LEVEL_HIGH: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_HIGH; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_HIGH; break; case IRQ_TYPE_LEVEL_LOW: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_LVL_LOW; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_LVL_LOW; break; case IRQ_TYPE_EDGE_BOTH: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE; break; case IRQ_TYPE_EDGE_RISING: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_RISING; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_RISING; break; case IRQ_TYPE_EDGE_FALLING: - dln2->irq_work[pin].type = DLN2_GPIO_EVENT_CHANGE_FALLING; + dln2->irq_type[pin] = DLN2_GPIO_EVENT_CHANGE_FALLING; break; default: return -EINVAL; @@ -387,13 +346,50 @@ static int dln2_irq_set_type(struct irq_data *irqd, unsigned type) return 0; } +static void dln2_irq_bus_lock(struct irq_data *irqd) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); + + mutex_lock(&dln2->irq_lock); +} + +static void dln2_irq_bus_unlock(struct irq_data *irqd) +{ + struct gpio_chip *gc = irq_data_get_irq_chip_data(irqd); + struct dln2_gpio *dln2 = container_of(gc, struct dln2_gpio, gpio); + int pin = irqd_to_hwirq(irqd); + int enabled, unmasked; + unsigned type; + int ret; + + enabled = test_bit(pin, dln2->enabled_irqs); + unmasked = test_bit(pin, dln2->unmasked_irqs); + + if (enabled != unmasked) { + if (unmasked) { + type = dln2->irq_type[pin] & DLN2_GPIO_EVENT_MASK; + set_bit(pin, dln2->enabled_irqs); + } else { + type = DLN2_GPIO_EVENT_NONE; + clear_bit(pin, dln2->enabled_irqs); + } + + ret = dln2_gpio_set_event_cfg(dln2, pin, type, 0); + if (ret) + dev_err(dln2->gpio.dev, "failed to set event\n"); + } + + mutex_unlock(&dln2->irq_lock); +} + static struct irq_chip dln2_gpio_irqchip = { .name = "dln2-irq", - .irq_enable = dln2_irq_enable, - .irq_disable = dln2_irq_disable, .irq_mask = dln2_irq_mask, .irq_unmask = dln2_irq_unmask, .irq_set_type = dln2_irq_set_type, + .irq_bus_lock = dln2_irq_bus_lock, + .irq_bus_sync_unlock = dln2_irq_bus_unlock, }; static void dln2_gpio_event(struct platform_device *pdev, u16 echo, @@ -425,14 +421,7 @@ static void dln2_gpio_event(struct platform_device *pdev, u16 echo, return; } - if (!test_bit(pin, dln2->irqs_enabled)) - return; - if (test_bit(pin, dln2->irqs_masked)) { - set_bit(pin, dln2->irqs_pending); - return; - } - - switch (dln2->irq_work[pin].type) { + switch (dln2->irq_type[pin]) { case DLN2_GPIO_EVENT_CHANGE_RISING: if (event->value) generic_handle_irq(irq); @@ -451,7 +440,7 @@ static int dln2_gpio_probe(struct platform_device *pdev) struct dln2_gpio *dln2; struct device *dev = &pdev->dev; int pins; - int i, ret; + int ret; pins = dln2_gpio_get_pin_count(pdev); if (pins < 0) { @@ -467,15 +456,7 @@ static int dln2_gpio_probe(struct platform_device *pdev) if (!dln2) return -ENOMEM; - dln2->irq_work = devm_kcalloc(&pdev->dev, pins, - sizeof(struct dln2_irq_work), GFP_KERNEL); - if (!dln2->irq_work) - return -ENOMEM; - for (i = 0; i < pins; i++) { - INIT_WORK(&dln2->irq_work[i].work, dln2_irq_work); - dln2->irq_work[i].pin = i; - dln2->irq_work[i].dln2 = dln2; - } + mutex_init(&dln2->irq_lock); dln2->pdev = pdev; @@ -529,11 +510,8 @@ out: static int dln2_gpio_remove(struct platform_device *pdev) { struct dln2_gpio *dln2 = platform_get_drvdata(pdev); - int i; dln2_unregister_event_cb(pdev, DLN2_GPIO_CONDITION_MET_EV); - for (i = 0; i < dln2->gpio.ngpio; i++) - flush_work(&dln2->irq_work[i].work); gpiochip_remove(&dln2->gpio); return 0; diff --git a/drivers/gpio/gpio-grgpio.c b/drivers/gpio/gpio-grgpio.c index 09daaf2..3a5a710 100644 --- a/drivers/gpio/gpio-grgpio.c +++ b/drivers/gpio/gpio-grgpio.c @@ -441,7 +441,8 @@ static int grgpio_probe(struct platform_device *ofdev) err = gpiochip_add(gc); if (err) { dev_err(&ofdev->dev, "Could not add gpiochip\n"); - irq_domain_remove(priv->domain); + if (priv->domain) + irq_domain_remove(priv->domain); return err; } diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c index 604dbe6..08261f2 100644 --- a/drivers/gpio/gpiolib-of.c +++ b/drivers/gpio/gpiolib-of.c @@ -45,8 +45,14 @@ static int of_gpiochip_find_and_xlate(struct gpio_chip *gc, void *data) return false; ret = gc->of_xlate(gc, &gg_data->gpiospec, gg_data->flags); - if (ret < 0) - return false; + if (ret < 0) { + /* We've found the gpio chip, but the translation failed. + * Return true to stop looking and return the translation + * error via out_gpio + */ + gg_data->out_gpio = ERR_PTR(ret); + return true; + } gg_data->out_gpio = gpiochip_get_desc(gc, ret); return true; diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c index 2ac1800..f62aa11 100644 --- a/drivers/gpio/gpiolib-sysfs.c +++ b/drivers/gpio/gpiolib-sysfs.c @@ -128,7 +128,7 @@ static ssize_t gpio_value_store(struct device *dev, return status; } -static const DEVICE_ATTR(value, 0644, +static DEVICE_ATTR(value, 0644, gpio_value_show, gpio_value_store); static irqreturn_t gpio_sysfs_irq(int irq, void *priv) @@ -353,17 +353,46 @@ static ssize_t gpio_active_low_store(struct device *dev, return status ? : size; } -static const DEVICE_ATTR(active_low, 0644, +static DEVICE_ATTR(active_low, 0644, gpio_active_low_show, gpio_active_low_store); -static const struct attribute *gpio_attrs[] = { +static umode_t gpio_is_visible(struct kobject *kobj, struct attribute *attr, + int n) +{ + struct device *dev = container_of(kobj, struct device, kobj); + struct gpio_desc *desc = dev_get_drvdata(dev); + umode_t mode = attr->mode; + bool show_direction = test_bit(FLAG_SYSFS_DIR, &desc->flags); + + if (attr == &dev_attr_direction.attr) { + if (!show_direction) + mode = 0; + } else if (attr == &dev_attr_edge.attr) { + if (gpiod_to_irq(desc) < 0) + mode = 0; + if (!show_direction && test_bit(FLAG_IS_OUT, &desc->flags)) + mode = 0; + } + + return mode; +} + +static struct attribute *gpio_attrs[] = { + &dev_attr_direction.attr, + &dev_attr_edge.attr, &dev_attr_value.attr, &dev_attr_active_low.attr, NULL, }; -static const struct attribute_group gpio_attr_group = { - .attrs = (struct attribute **) gpio_attrs, +static const struct attribute_group gpio_group = { + .attrs = gpio_attrs, + .is_visible = gpio_is_visible, +}; + +static const struct attribute_group *gpio_groups[] = { + &gpio_group, + NULL }; /* @@ -400,16 +429,13 @@ static ssize_t chip_ngpio_show(struct device *dev, } static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL); -static const struct attribute *gpiochip_attrs[] = { +static struct attribute *gpiochip_attrs[] = { &dev_attr_base.attr, &dev_attr_label.attr, &dev_attr_ngpio.attr, NULL, }; - -static const struct attribute_group gpiochip_attr_group = { - .attrs = (struct attribute **) gpiochip_attrs, -}; +ATTRIBUTE_GROUPS(gpiochip); /* * /sys/class/gpio/export ... write-only @@ -556,45 +582,30 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change) goto fail_unlock; } - if (!desc->chip->direction_input || !desc->chip->direction_output) - direction_may_change = false; + if (desc->chip->direction_input && desc->chip->direction_output && + direction_may_change) { + set_bit(FLAG_SYSFS_DIR, &desc->flags); + } + spin_unlock_irqrestore(&gpio_lock, flags); offset = gpio_chip_hwgpio(desc); if (desc->chip->names && desc->chip->names[offset]) ioname = desc->chip->names[offset]; - dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0), - desc, ioname ? ioname : "gpio%u", - desc_to_gpio(desc)); + dev = device_create_with_groups(&gpio_class, desc->chip->dev, + MKDEV(0, 0), desc, gpio_groups, + ioname ? ioname : "gpio%u", + desc_to_gpio(desc)); if (IS_ERR(dev)) { status = PTR_ERR(dev); goto fail_unlock; } - status = sysfs_create_group(&dev->kobj, &gpio_attr_group); - if (status) - goto fail_unregister_device; - - if (direction_may_change) { - status = device_create_file(dev, &dev_attr_direction); - if (status) - goto fail_unregister_device; - } - - if (gpiod_to_irq(desc) >= 0 && (direction_may_change || - !test_bit(FLAG_IS_OUT, &desc->flags))) { - status = device_create_file(dev, &dev_attr_edge); - if (status) - goto fail_unregister_device; - } - set_bit(FLAG_EXPORT, &desc->flags); mutex_unlock(&sysfs_lock); return 0; -fail_unregister_device: - device_unregister(dev); fail_unlock: mutex_unlock(&sysfs_lock); gpiod_dbg(desc, "%s: status %d\n", __func__, status); @@ -718,6 +729,7 @@ void gpiod_unexport(struct gpio_desc *desc) dev = class_find_device(&gpio_class, NULL, desc, match_export); if (dev) { gpio_setup_irq(desc, dev, 0); + clear_bit(FLAG_SYSFS_DIR, &desc->flags); clear_bit(FLAG_EXPORT, &desc->flags); } else status = -ENODEV; @@ -750,13 +762,13 @@ int gpiochip_export(struct gpio_chip *chip) /* use chip->base for the ID; it's already known to be unique */ mutex_lock(&sysfs_lock); - dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip, - "gpiochip%d", chip->base); - if (!IS_ERR(dev)) { - status = sysfs_create_group(&dev->kobj, - &gpiochip_attr_group); - } else + dev = device_create_with_groups(&gpio_class, chip->dev, MKDEV(0, 0), + chip, gpiochip_groups, + "gpiochip%d", chip->base); + if (IS_ERR(dev)) status = PTR_ERR(dev); + else + status = 0; chip->exported = (status == 0); mutex_unlock(&sysfs_lock); diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 487afe6..568aa2b 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c @@ -248,29 +248,30 @@ int gpiochip_add(struct gpio_chip *chip) base = gpiochip_find_base(chip->ngpio); if (base < 0) { status = base; - goto unlock; + spin_unlock_irqrestore(&gpio_lock, flags); + goto err_free_descs; } chip->base = base; } status = gpiochip_add_to_list(chip); + if (status) { + spin_unlock_irqrestore(&gpio_lock, flags); + goto err_free_descs; + } - if (status == 0) { - for (id = 0; id < chip->ngpio; id++) { - struct gpio_desc *desc = &descs[id]; - desc->chip = chip; - - /* REVISIT: most hardware initializes GPIOs as - * inputs (often with pullups enabled) so power - * usage is minimized. Linux code should set the - * gpio direction first thing; but until it does, - * and in case chip->get_direction is not set, - * we may expose the wrong direction in sysfs. - */ - desc->flags = !chip->direction_input - ? (1 << FLAG_IS_OUT) - : 0; - } + for (id = 0; id < chip->ngpio; id++) { + struct gpio_desc *desc = &descs[id]; + + desc->chip = chip; + + /* REVISIT: most hardware initializes GPIOs as inputs (often + * with pullups enabled) so power usage is minimized. Linux + * code should set the gpio direction first thing; but until + * it does, and in case chip->get_direction is not set, we may + * expose the wrong direction in sysfs. + */ + desc->flags = !chip->direction_input ? (1 << FLAG_IS_OUT) : 0; } chip->desc = descs; @@ -284,12 +285,9 @@ int gpiochip_add(struct gpio_chip *chip) of_gpiochip_add(chip); acpi_gpiochip_add(chip); - if (status) - goto fail; - status = gpiochip_export(chip); if (status) - goto fail; + goto err_remove_chip; pr_debug("%s: registered GPIOs %d to %d on device: %s\n", __func__, chip->base, chip->base + chip->ngpio - 1, @@ -297,11 +295,15 @@ int gpiochip_add(struct gpio_chip *chip) return 0; -unlock: +err_remove_chip: + acpi_gpiochip_remove(chip); + of_gpiochip_remove(chip); + spin_lock_irqsave(&gpio_lock, flags); + list_del(&chip->list); spin_unlock_irqrestore(&gpio_lock, flags); -fail: - kfree(descs); chip->desc = NULL; +err_free_descs: + kfree(descs); /* failures here can mean systems won't boot... */ pr_err("%s: GPIOs %d..%d (%s) failed to register\n", __func__, @@ -325,14 +327,15 @@ void gpiochip_remove(struct gpio_chip *chip) unsigned long flags; unsigned id; - acpi_gpiochip_remove(chip); - - spin_lock_irqsave(&gpio_lock, flags); + gpiochip_unexport(chip); gpiochip_irqchip_remove(chip); + + acpi_gpiochip_remove(chip); gpiochip_remove_pin_ranges(chip); of_gpiochip_remove(chip); + spin_lock_irqsave(&gpio_lock, flags); for (id = 0; id < chip->ngpio; id++) { if (test_bit(FLAG_REQUESTED, &chip->desc[id].flags)) dev_crit(chip->dev, "REMOVING GPIOCHIP WITH GPIOS STILL REQUESTED\n"); @@ -342,7 +345,6 @@ void gpiochip_remove(struct gpio_chip *chip) list_del(&chip->list); spin_unlock_irqrestore(&gpio_lock, flags); - gpiochip_unexport(chip); kfree(chip->desc); chip->desc = NULL; diff --git a/drivers/gpio/gpiolib.h b/drivers/gpio/gpiolib.h index e3a5211..550a5ea 100644 --- a/drivers/gpio/gpiolib.h +++ b/drivers/gpio/gpiolib.h @@ -77,6 +77,7 @@ struct gpio_desc { #define FLAG_OPEN_DRAIN 7 /* Gpio is open drain type */ #define FLAG_OPEN_SOURCE 8 /* Gpio is open source type */ #define FLAG_USED_AS_IRQ 9 /* GPIO is connected to an IRQ */ +#define FLAG_SYSFS_DIR 10 /* show sysfs direction attribute */ #define ID_SHIFT 16 /* add new flags before this one */ diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile index 9d92ba3..cf0eed8 100644 --- a/drivers/gpu/drm/Makefile +++ b/drivers/gpu/drm/Makefile @@ -37,6 +37,7 @@ obj-$(CONFIG_DRM_MIPI_DSI) += drm_mipi_dsi.o obj-$(CONFIG_DRM_TTM) += ttm/ obj-$(CONFIG_DRM_TDFX) += tdfx/ obj-$(CONFIG_DRM_R128) += r128/ +obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ obj-$(CONFIG_DRM_RADEON)+= radeon/ obj-$(CONFIG_DRM_MGA) += mga/ obj-$(CONFIG_DRM_I810) += i810/ @@ -68,4 +69,3 @@ obj-$(CONFIG_DRM_IMX) += imx/ obj-y += i2c/ obj-y += panel/ obj-y += bridge/ -obj-$(CONFIG_HSA_AMD) += amd/amdkfd/ diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index cd09c05..0f49601 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -12,6 +12,5 @@ amdkfd-y := kfd_module.o kfd_device.o kfd_chardev.o kfd_topology.o \ kfd_kernel_queue_vi.o kfd_packet_manager.o \ kfd_process_queue_manager.o kfd_device_queue_manager.o \ kfd_device_queue_manager_cik.o kfd_device_queue_manager_vi.o \ - kfd_interrupt.o obj-$(CONFIG_HSA_AMD) += amdkfd.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 38b6150..732087dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -31,7 +31,6 @@ #include <uapi/linux/kfd_ioctl.h> #include <linux/time.h> #include <linux/mm.h> -#include <linux/uaccess.h> #include <uapi/asm-generic/mman-common.h> #include <asm/processor.h> #include "kfd_priv.h" @@ -127,17 +126,14 @@ static int kfd_open(struct inode *inode, struct file *filep) return 0; } -static long kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, - void __user *arg) +static int kfd_ioctl_get_version(struct file *filep, struct kfd_process *p, + void *data) { - struct kfd_ioctl_get_version_args args; + struct kfd_ioctl_get_version_args *args = data; int err = 0; - args.major_version = KFD_IOCTL_MAJOR_VERSION; - args.minor_version = KFD_IOCTL_MINOR_VERSION; - - if (copy_to_user(arg, &args, sizeof(args))) - err = -EFAULT; + args->major_version = KFD_IOCTL_MAJOR_VERSION; + args->minor_version = KFD_IOCTL_MINOR_VERSION; return err; } @@ -249,10 +245,10 @@ static int set_queue_properties_from_user(struct queue_properties *q_properties, return 0; } -static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, - void __user *arg) +static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, + void *data) { - struct kfd_ioctl_create_queue_args args; + struct kfd_ioctl_create_queue_args *args = data; struct kfd_dev *dev; int err = 0; unsigned int queue_id; @@ -261,19 +257,16 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, memset(&q_properties, 0, sizeof(struct queue_properties)); - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - pr_debug("kfd: creating queue ioctl\n"); - err = set_queue_properties_from_user(&q_properties, &args); + err = set_queue_properties_from_user(&q_properties, args); if (err) return err; - pr_debug("kfd: looking for gpu id 0x%x\n", args.gpu_id); - dev = kfd_device_by_id(args.gpu_id); + pr_debug("kfd: looking for gpu id 0x%x\n", args->gpu_id); + dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) { - pr_debug("kfd: gpu id 0x%x was not found\n", args.gpu_id); + pr_debug("kfd: gpu id 0x%x was not found\n", args->gpu_id); return -EINVAL; } @@ -281,7 +274,7 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { - err = PTR_ERR(pdd); + err = -ESRCH; goto err_bind_process; } @@ -294,33 +287,26 @@ static long kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, if (err != 0) goto err_create_queue; - args.queue_id = queue_id; + args->queue_id = queue_id; /* Return gpu_id as doorbell offset for mmap usage */ - args.doorbell_offset = args.gpu_id << PAGE_SHIFT; - - if (copy_to_user(arg, &args, sizeof(args))) { - err = -EFAULT; - goto err_copy_args_out; - } + args->doorbell_offset = args->gpu_id << PAGE_SHIFT; mutex_unlock(&p->mutex); - pr_debug("kfd: queue id %d was created successfully\n", args.queue_id); + pr_debug("kfd: queue id %d was created successfully\n", args->queue_id); pr_debug("ring buffer address == 0x%016llX\n", - args.ring_base_address); + args->ring_base_address); pr_debug("read ptr address == 0x%016llX\n", - args.read_pointer_address); + args->read_pointer_address); pr_debug("write ptr address == 0x%016llX\n", - args.write_pointer_address); + args->write_pointer_address); return 0; -err_copy_args_out: - pqm_destroy_queue(&p->pqm, queue_id); err_create_queue: err_bind_process: mutex_unlock(&p->mutex); @@ -328,99 +314,90 @@ err_bind_process: } static int kfd_ioctl_destroy_queue(struct file *filp, struct kfd_process *p, - void __user *arg) + void *data) { int retval; - struct kfd_ioctl_destroy_queue_args args; - - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; + struct kfd_ioctl_destroy_queue_args *args = data; pr_debug("kfd: destroying queue id %d for PASID %d\n", - args.queue_id, + args->queue_id, p->pasid); mutex_lock(&p->mutex); - retval = pqm_destroy_queue(&p->pqm, args.queue_id); + retval = pqm_destroy_queue(&p->pqm, args->queue_id); mutex_unlock(&p->mutex); return retval; } static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p, - void __user *arg) + void *data) { int retval; - struct kfd_ioctl_update_queue_args args; + struct kfd_ioctl_update_queue_args *args = data; struct queue_properties properties; - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - if (args.queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { + if (args->queue_percentage > KFD_MAX_QUEUE_PERCENTAGE) { pr_err("kfd: queue percentage must be between 0 to KFD_MAX_QUEUE_PERCENTAGE\n"); return -EINVAL; } - if (args.queue_priority > KFD_MAX_QUEUE_PRIORITY) { + if (args->queue_priority > KFD_MAX_QUEUE_PRIORITY) { pr_err("kfd: queue priority must be between 0 to KFD_MAX_QUEUE_PRIORITY\n"); return -EINVAL; } - if ((args.ring_base_address) && + if ((args->ring_base_address) && (!access_ok(VERIFY_WRITE, - (const void __user *) args.ring_base_address, + (const void __user *) args->ring_base_address, sizeof(uint64_t)))) { pr_err("kfd: can't access ring base address\n"); return -EFAULT; } - if (!is_power_of_2(args.ring_size) && (args.ring_size != 0)) { + if (!is_power_of_2(args->ring_size) && (args->ring_size != 0)) { pr_err("kfd: ring size must be a power of 2 or 0\n"); return -EINVAL; } - properties.queue_address = args.ring_base_address; - properties.queue_size = args.ring_size; - properties.queue_percent = args.queue_percentage; - properties.priority = args.queue_priority; + properties.queue_address = args->ring_base_address; + properties.queue_size = args->ring_size; + properties.queue_percent = args->queue_percentage; + properties.priority = args->queue_priority; pr_debug("kfd: updating queue id %d for PASID %d\n", - args.queue_id, p->pasid); + args->queue_id, p->pasid); mutex_lock(&p->mutex); - retval = pqm_update_queue(&p->pqm, args.queue_id, &properties); + retval = pqm_update_queue(&p->pqm, args->queue_id, &properties); mutex_unlock(&p->mutex); return retval; } -static long kfd_ioctl_set_memory_policy(struct file *filep, - struct kfd_process *p, void __user *arg) +static int kfd_ioctl_set_memory_policy(struct file *filep, + struct kfd_process *p, void *data) { - struct kfd_ioctl_set_memory_policy_args args; + struct kfd_ioctl_set_memory_policy_args *args = data; struct kfd_dev *dev; int err = 0; struct kfd_process_device *pdd; enum cache_policy default_policy, alternate_policy; - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - if (args.default_policy != KFD_IOC_CACHE_POLICY_COHERENT - && args.default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { + if (args->default_policy != KFD_IOC_CACHE_POLICY_COHERENT + && args->default_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } - if (args.alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT - && args.alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { + if (args->alternate_policy != KFD_IOC_CACHE_POLICY_COHERENT + && args->alternate_policy != KFD_IOC_CACHE_POLICY_NONCOHERENT) { return -EINVAL; } - dev = kfd_device_by_id(args.gpu_id); + dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; @@ -428,23 +405,23 @@ static long kfd_ioctl_set_memory_policy(struct file *filep, pdd = kfd_bind_process_to_device(dev, p); if (IS_ERR(pdd)) { - err = PTR_ERR(pdd); + err = -ESRCH; goto out; } - default_policy = (args.default_policy == KFD_IOC_CACHE_POLICY_COHERENT) + default_policy = (args->default_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; alternate_policy = - (args.alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) + (args->alternate_policy == KFD_IOC_CACHE_POLICY_COHERENT) ? cache_policy_coherent : cache_policy_noncoherent; if (!dev->dqm->ops.set_cache_memory_policy(dev->dqm, &pdd->qpd, default_policy, alternate_policy, - (void __user *)args.alternate_aperture_base, - args.alternate_aperture_size)) + (void __user *)args->alternate_aperture_base, + args->alternate_aperture_size)) err = -EINVAL; out: @@ -453,53 +430,44 @@ out: return err; } -static long kfd_ioctl_get_clock_counters(struct file *filep, - struct kfd_process *p, void __user *arg) +static int kfd_ioctl_get_clock_counters(struct file *filep, + struct kfd_process *p, void *data) { - struct kfd_ioctl_get_clock_counters_args args; + struct kfd_ioctl_get_clock_counters_args *args = data; struct kfd_dev *dev; struct timespec time; - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - dev = kfd_device_by_id(args.gpu_id); + dev = kfd_device_by_id(args->gpu_id); if (dev == NULL) return -EINVAL; /* Reading GPU clock counter from KGD */ - args.gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); + args->gpu_clock_counter = kfd2kgd->get_gpu_clock_counter(dev->kgd); /* No access to rdtsc. Using raw monotonic time */ getrawmonotonic(&time); - args.cpu_clock_counter = (uint64_t)timespec_to_ns(&time); + args->cpu_clock_counter = (uint64_t)timespec_to_ns(&time); get_monotonic_boottime(&time); - args.system_clock_counter = (uint64_t)timespec_to_ns(&time); + args->system_clock_counter = (uint64_t)timespec_to_ns(&time); /* Since the counter is in nano-seconds we use 1GHz frequency */ - args.system_clock_freq = 1000000000; - - if (copy_to_user(arg, &args, sizeof(args))) - return -EFAULT; + args->system_clock_freq = 1000000000; return 0; } static int kfd_ioctl_get_process_apertures(struct file *filp, - struct kfd_process *p, void __user *arg) + struct kfd_process *p, void *data) { - struct kfd_ioctl_get_process_apertures_args args; + struct kfd_ioctl_get_process_apertures_args *args = data; struct kfd_process_device_apertures *pAperture; struct kfd_process_device *pdd; dev_dbg(kfd_device, "get apertures for PASID %d", p->pasid); - if (copy_from_user(&args, arg, sizeof(args))) - return -EFAULT; - - args.num_of_nodes = 0; + args->num_of_nodes = 0; mutex_lock(&p->mutex); @@ -508,7 +476,8 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, /* Run over all pdd of the process */ pdd = kfd_get_first_process_device_data(p); do { - pAperture = &args.process_apertures[args.num_of_nodes]; + pAperture = + &args->process_apertures[args->num_of_nodes]; pAperture->gpu_id = pdd->dev->id; pAperture->lds_base = pdd->lds_base; pAperture->lds_limit = pdd->lds_limit; @@ -518,7 +487,7 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, pAperture->scratch_limit = pdd->scratch_limit; dev_dbg(kfd_device, - "node id %u\n", args.num_of_nodes); + "node id %u\n", args->num_of_nodes); dev_dbg(kfd_device, "gpu id %u\n", pdd->dev->id); dev_dbg(kfd_device, @@ -534,80 +503,131 @@ static int kfd_ioctl_get_process_apertures(struct file *filp, dev_dbg(kfd_device, "scratch_limit %llX\n", pdd->scratch_limit); - args.num_of_nodes++; + args->num_of_nodes++; } while ((pdd = kfd_get_next_process_device_data(p, pdd)) != NULL && - (args.num_of_nodes < NUM_OF_SUPPORTED_GPUS)); + (args->num_of_nodes < NUM_OF_SUPPORTED_GPUS)); } mutex_unlock(&p->mutex); - if (copy_to_user(arg, &args, sizeof(args))) - return -EFAULT; - return 0; } +#define AMDKFD_IOCTL_DEF(ioctl, _func, _flags) \ + [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func, .flags = _flags, .cmd_drv = 0, .name = #ioctl} + +/** Ioctl table */ +static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = { + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_VERSION, + kfd_ioctl_get_version, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_CREATE_QUEUE, + kfd_ioctl_create_queue, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_DESTROY_QUEUE, + kfd_ioctl_destroy_queue, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_MEMORY_POLICY, + kfd_ioctl_set_memory_policy, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_CLOCK_COUNTERS, + kfd_ioctl_get_clock_counters, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_GET_PROCESS_APERTURES, + kfd_ioctl_get_process_apertures, 0), + + AMDKFD_IOCTL_DEF(AMDKFD_IOC_UPDATE_QUEUE, + kfd_ioctl_update_queue, 0), +}; + +#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls) + static long kfd_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) { struct kfd_process *process; - long err = -EINVAL; + amdkfd_ioctl_t *func; + const struct amdkfd_ioctl_desc *ioctl = NULL; + unsigned int nr = _IOC_NR(cmd); + char stack_kdata[128]; + char *kdata = NULL; + unsigned int usize, asize; + int retcode = -EINVAL; - dev_dbg(kfd_device, - "ioctl cmd 0x%x (#%d), arg 0x%lx\n", - cmd, _IOC_NR(cmd), arg); + if (nr >= AMDKFD_CORE_IOCTL_COUNT) + goto err_i1; + + if ((nr >= AMDKFD_COMMAND_START) && (nr < AMDKFD_COMMAND_END)) { + u32 amdkfd_size; + + ioctl = &amdkfd_ioctls[nr]; + + amdkfd_size = _IOC_SIZE(ioctl->cmd); + usize = asize = _IOC_SIZE(cmd); + if (amdkfd_size > asize) + asize = amdkfd_size; + + cmd = ioctl->cmd; + } else + goto err_i1; + + dev_dbg(kfd_device, "ioctl cmd 0x%x (#%d), arg 0x%lx\n", cmd, nr, arg); process = kfd_get_process(current); - if (IS_ERR(process)) - return PTR_ERR(process); + if (IS_ERR(process)) { + dev_dbg(kfd_device, "no process\n"); + goto err_i1; + } - switch (cmd) { - case KFD_IOC_GET_VERSION: - err = kfd_ioctl_get_version(filep, process, (void __user *)arg); - break; - case KFD_IOC_CREATE_QUEUE: - err = kfd_ioctl_create_queue(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_DESTROY_QUEUE: - err = kfd_ioctl_destroy_queue(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_SET_MEMORY_POLICY: - err = kfd_ioctl_set_memory_policy(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_GET_CLOCK_COUNTERS: - err = kfd_ioctl_get_clock_counters(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_GET_PROCESS_APERTURES: - err = kfd_ioctl_get_process_apertures(filep, process, - (void __user *)arg); - break; - - case KFD_IOC_UPDATE_QUEUE: - err = kfd_ioctl_update_queue(filep, process, - (void __user *)arg); - break; - - default: - dev_err(kfd_device, - "unknown ioctl cmd 0x%x, arg 0x%lx)\n", - cmd, arg); - err = -EINVAL; - break; + /* Do not trust userspace, use our own definition */ + func = ioctl->func; + + if (unlikely(!func)) { + dev_dbg(kfd_device, "no function\n"); + retcode = -EINVAL; + goto err_i1; } - if (err < 0) - dev_err(kfd_device, - "ioctl error %ld for ioctl cmd 0x%x (#%d)\n", - err, cmd, _IOC_NR(cmd)); + if (cmd & (IOC_IN | IOC_OUT)) { + if (asize <= sizeof(stack_kdata)) { + kdata = stack_kdata; + } else { + kdata = kmalloc(asize, GFP_KERNEL); + if (!kdata) { + retcode = -ENOMEM; + goto err_i1; + } + } + if (asize > usize) + memset(kdata + usize, 0, asize - usize); + } - return err; + if (cmd & IOC_IN) { + if (copy_from_user(kdata, (void __user *)arg, usize) != 0) { + retcode = -EFAULT; + goto err_i1; + } + } else if (cmd & IOC_OUT) { + memset(kdata, 0, usize); + } + + retcode = func(filep, process, kdata); + + if (cmd & IOC_OUT) + if (copy_to_user((void __user *)arg, kdata, usize) != 0) + retcode = -EFAULT; + +err_i1: + if (!ioctl) + dev_dbg(kfd_device, "invalid ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n", + task_pid_nr(current), cmd, nr); + + if (kdata != stack_kdata) + kfree(kdata); + + if (retcode) + dev_dbg(kfd_device, "ret = %d\n", retcode); + + return retcode; } static int kfd_mmap(struct file *filp, struct vm_area_struct *vma) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index a770ec6..1ba8332 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -229,13 +229,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto kfd_topology_add_device_error; } - if (kfd_interrupt_init(kfd)) { - dev_err(kfd_device, - "Error initializing interrupts for device (%x:%x)\n", - kfd->pdev->vendor, kfd->pdev->device); - goto kfd_interrupt_error; - } - if (!device_iommu_pasid_init(kfd)) { dev_err(kfd_device, "Error initializing iommuv2 for device (%x:%x)\n", @@ -274,8 +267,6 @@ dqm_start_error: device_queue_manager_error: amd_iommu_free_device(kfd->pdev); device_iommu_pasid_error: - kfd_interrupt_exit(kfd); -kfd_interrupt_error: kfd_topology_remove_device(kfd); kfd_topology_add_device_error: kfd_gtt_sa_fini(kfd); @@ -293,7 +284,6 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) if (kfd->init_complete) { device_queue_manager_uninit(kfd->dqm); amd_iommu_free_device(kfd->pdev); - kfd_interrupt_exit(kfd); kfd_topology_remove_device(kfd); kfd_gtt_sa_fini(kfd); kfd2kgd->free_gtt_mem(kfd->kgd, kfd->gtt_mem); @@ -337,15 +327,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd) /* This is called directly from KGD at ISR. */ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { - if (kfd->init_complete) { - spin_lock(&kfd->interrupt_lock); - - if (kfd->interrupts_active - && enqueue_ih_ring_entry(kfd, ih_ring_entry)) - schedule_work(&kfd->interrupt_work); - - spin_unlock(&kfd->interrupt_lock); - } + /* Process interrupts / schedule work as necessary */ } static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index e804e87..a5c69e9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -138,6 +138,9 @@ static void deallocate_vmid(struct device_queue_manager *dqm, { int bit = qpd->vmid - KFD_VMID_START_OFFSET; + /* Release the vmid mapping */ + set_pasid_vmid_mapping(dqm, 0, qpd->vmid); + set_bit(bit, (unsigned long *)&dqm->vmid_bitmap); qpd->vmid = 0; q->properties.vmid = 0; @@ -253,6 +256,18 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm, return retval; } + pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n", + q->pipe, + q->queue); + + retval = mqd->load_mqd(mqd, q->mqd, q->pipe, + q->queue, (uint32_t __user *) q->properties.write_ptr); + if (retval != 0) { + deallocate_hqd(dqm, q); + mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj); + return retval; + } + return 0; } @@ -311,6 +326,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) { int retval; struct mqd_manager *mqd; + bool prev_active = false; BUG_ON(!dqm || !q || !q->mqd); @@ -321,10 +337,18 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q) return -ENOMEM; } - retval = mqd->update_mqd(mqd, q->mqd, &q->properties); if (q->properties.is_active == true) + prev_active = true; + + /* + * + * check active state vs. the previous state + * and modify counter accordingly + */ + retval = mqd->update_mqd(mqd, q->mqd, &q->properties); + if ((q->properties.is_active == true) && (prev_active == false)) dqm->queue_count++; - else + else if ((q->properties.is_active == false) && (prev_active == true)) dqm->queue_count--; if (sched_policy != KFD_SCHED_POLICY_NO_HWS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c deleted file mode 100644 index 5b99909..0000000 --- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * KFD Interrupts. - * - * AMD GPUs deliver interrupts by pushing an interrupt description onto the - * interrupt ring and then sending an interrupt. KGD receives the interrupt - * in ISR and sends us a pointer to each new entry on the interrupt ring. - * - * We generally can't process interrupt-signaled events from ISR, so we call - * out to each interrupt client module (currently only the scheduler) to ask if - * each interrupt is interesting. If they return true, then it requires further - * processing so we copy it to an internal interrupt ring and call each - * interrupt client again from a work-queue. - * - * There's no acknowledgment for the interrupts we use. The hardware simply - * queues a new interrupt each time without waiting. - * - * The fixed-size internal queue means that it's possible for us to lose - * interrupts because we have no back-pressure to the hardware. - */ - -#include <linux/slab.h> -#include <linux/device.h> -#include "kfd_priv.h" - -#define KFD_INTERRUPT_RING_SIZE 256 - -static void interrupt_wq(struct work_struct *); - -int kfd_interrupt_init(struct kfd_dev *kfd) -{ - void *interrupt_ring = kmalloc_array(KFD_INTERRUPT_RING_SIZE, - kfd->device_info->ih_ring_entry_size, - GFP_KERNEL); - if (!interrupt_ring) - return -ENOMEM; - - kfd->interrupt_ring = interrupt_ring; - kfd->interrupt_ring_size = - KFD_INTERRUPT_RING_SIZE * kfd->device_info->ih_ring_entry_size; - atomic_set(&kfd->interrupt_ring_wptr, 0); - atomic_set(&kfd->interrupt_ring_rptr, 0); - - spin_lock_init(&kfd->interrupt_lock); - - INIT_WORK(&kfd->interrupt_work, interrupt_wq); - - kfd->interrupts_active = true; - - /* - * After this function returns, the interrupt will be enabled. This - * barrier ensures that the interrupt running on a different processor - * sees all the above writes. - */ - smp_wmb(); - - return 0; -} - -void kfd_interrupt_exit(struct kfd_dev *kfd) -{ - /* - * Stop the interrupt handler from writing to the ring and scheduling - * workqueue items. The spinlock ensures that any interrupt running - * after we have unlocked sees interrupts_active = false. - */ - unsigned long flags; - - spin_lock_irqsave(&kfd->interrupt_lock, flags); - kfd->interrupts_active = false; - spin_unlock_irqrestore(&kfd->interrupt_lock, flags); - - /* - * Flush_scheduled_work ensures that there are no outstanding - * work-queue items that will access interrupt_ring. New work items - * can't be created because we stopped interrupt handling above. - */ - flush_scheduled_work(); - - kfree(kfd->interrupt_ring); -} - -/* - * This assumes that it can't be called concurrently with itself - * but only with dequeue_ih_ring_entry. - */ -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry) -{ - unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr); - unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr); - - if ((rptr - wptr) % kfd->interrupt_ring_size == - kfd->device_info->ih_ring_entry_size) { - /* This is very bad, the system is likely to hang. */ - dev_err_ratelimited(kfd_chardev(), - "Interrupt ring overflow, dropping interrupt.\n"); - return false; - } - - memcpy(kfd->interrupt_ring + wptr, ih_ring_entry, - kfd->device_info->ih_ring_entry_size); - - wptr = (wptr + kfd->device_info->ih_ring_entry_size) % - kfd->interrupt_ring_size; - smp_wmb(); /* Ensure memcpy'd data is visible before wptr update. */ - atomic_set(&kfd->interrupt_ring_wptr, wptr); - - return true; -} - -/* - * This assumes that it can't be called concurrently with itself - * but only with enqueue_ih_ring_entry. - */ -static bool dequeue_ih_ring_entry(struct kfd_dev *kfd, void *ih_ring_entry) -{ - /* - * Assume that wait queues have an implicit barrier, i.e. anything that - * happened in the ISR before it queued work is visible. - */ - - unsigned int wptr = atomic_read(&kfd->interrupt_ring_wptr); - unsigned int rptr = atomic_read(&kfd->interrupt_ring_rptr); - - if (rptr == wptr) - return false; - - memcpy(ih_ring_entry, kfd->interrupt_ring + rptr, - kfd->device_info->ih_ring_entry_size); - - rptr = (rptr + kfd->device_info->ih_ring_entry_size) % - kfd->interrupt_ring_size; - - /* - * Ensure the rptr write update is not visible until - * memcpy has finished reading. - */ - smp_mb(); - atomic_set(&kfd->interrupt_ring_rptr, rptr); - - return true; -} - -static void interrupt_wq(struct work_struct *work) -{ - struct kfd_dev *dev = container_of(work, struct kfd_dev, - interrupt_work); - - uint32_t ih_ring_entry[DIV_ROUND_UP( - dev->device_info->ih_ring_entry_size, - sizeof(uint32_t))]; - - while (dequeue_ih_ring_entry(dev, ih_ring_entry)) - ; -} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 4e582de..a318743 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -264,7 +264,7 @@ static bool is_occupied(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { - return kfd2kgd->hqd_is_occupies(mm->dev->kgd, queue_address, + return kfd2kgd->hqd_is_occupied(mm->dev->kgd, queue_address, pipe_id, queue_id); } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c index 71699ad..4c25ef5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pasid.c @@ -32,7 +32,7 @@ int kfd_pasid_init(void) { pasid_limit = max_num_of_processes; - pasid_bitmap = kzalloc(BITS_TO_LONGS(pasid_limit), GFP_KERNEL); + pasid_bitmap = kcalloc(BITS_TO_LONGS(pasid_limit), sizeof(long), GFP_KERNEL); if (!pasid_bitmap) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index bfcf45f..1b35a9c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -157,22 +157,10 @@ struct kfd_dev { unsigned int gtt_sa_chunk_size; unsigned int gtt_sa_num_of_chunks; - void *interrupt_ring; - size_t interrupt_ring_size; - atomic_t interrupt_ring_rptr; - atomic_t interrupt_ring_wptr; - struct work_struct interrupt_work; - spinlock_t interrupt_lock; - /* QCM Device instance */ struct device_queue_manager *dqm; bool init_complete; - /* - * Interrupts of interest to KFD are copied - * from the HW ring into a SW ring. - */ - bool interrupts_active; }; /* KGD2KFD callbacks */ @@ -490,6 +478,24 @@ struct kfd_process { bool is_32bit_user_mode; }; +/** + * Ioctl function type. + * + * \param filep pointer to file structure. + * \param p amdkfd process pointer. + * \param data pointer to arg that was copied from user. + */ +typedef int amdkfd_ioctl_t(struct file *filep, struct kfd_process *p, + void *data); + +struct amdkfd_ioctl_desc { + unsigned int cmd; + int flags; + amdkfd_ioctl_t *func; + unsigned int cmd_drv; + const char *name; +}; + void kfd_process_create_wq(void); void kfd_process_destroy_wq(void); struct kfd_process *kfd_create_process(const struct task_struct *); @@ -548,10 +554,7 @@ struct kfd_dev *kfd_device_by_pci_dev(const struct pci_dev *pdev); struct kfd_dev *kfd_topology_enum_kfd_devices(uint8_t idx); /* Interrupts */ -int kfd_interrupt_init(struct kfd_dev *dev); -void kfd_interrupt_exit(struct kfd_dev *dev); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); -bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry); /* Power Management */ void kgd2kfd_suspend(struct kfd_dev *kfd); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 4886dde..4983993 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -934,7 +934,7 @@ static int kfd_build_sysfs_node_tree(void) uint32_t i = 0; list_for_each_entry(dev, &topology_device_list, list) { - ret = kfd_build_sysfs_node_entry(dev, 0); + ret = kfd_build_sysfs_node_entry(dev, i); if (ret < 0) return ret; i++; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 8834997..239bc16 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -180,7 +180,7 @@ struct kfd2kgd_calls { int (*hqd_sdma_load)(struct kgd_dev *kgd, void *mqd); - bool (*hqd_is_occupies)(struct kgd_dev *kgd, uint64_t queue_address, + bool (*hqd_is_occupied)(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); int (*hqd_destroy)(struct kgd_dev *kgd, uint32_t reset_type, diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 52ce26d..cf775a4 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -741,7 +741,9 @@ int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info) int i, j, rc = 0; int start; - drm_modeset_lock_all(dev); + if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + return -EBUSY; + } if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; @@ -915,7 +917,9 @@ int drm_fb_helper_pan_display(struct fb_var_screeninfo *var, int ret = 0; int i; - drm_modeset_lock_all(dev); + if (__drm_modeset_lock_all(dev, !!oops_in_progress)) { + return -EBUSY; + } if (!drm_fb_helper_is_bound(fb_helper)) { drm_modeset_unlock_all(dev); return -EBUSY; diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index 121470a..1bcbe07 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -645,18 +645,6 @@ static int exynos_drm_init(void) if (!is_exynos) return -ENODEV; - /* - * Register device object only in case of Exynos SoC. - * - * Below codes resolves temporarily infinite loop issue incurred - * by Exynos drm driver when using multi-platform kernel. - * So these codes will be replaced with more generic way later. - */ - if (!of_machine_is_compatible("samsung,exynos3") && - !of_machine_is_compatible("samsung,exynos4") && - !of_machine_is_compatible("samsung,exynos5")) - return -ENODEV; - exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, NULL, 0); if (IS_ERR(exynos_drm_pdev)) diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 5765a16..98051e8 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -1669,7 +1669,6 @@ static void hdmi_mode_apply(struct hdmi_context *hdata) static void hdmiphy_conf_reset(struct hdmi_context *hdata) { - u8 buffer[2]; u32 reg; clk_disable_unprepare(hdata->res.sclk_hdmi); @@ -1677,11 +1676,8 @@ static void hdmiphy_conf_reset(struct hdmi_context *hdata) clk_prepare_enable(hdata->res.sclk_hdmi); /* operation mode */ - buffer[0] = 0x1f; - buffer[1] = 0x00; - - if (hdata->hdmiphy_port) - i2c_master_send(hdata->hdmiphy_port, buffer, 2); + hdmiphy_reg_writeb(hdata, HDMIPHY_MODE_SET_DONE, + HDMI_PHY_ENABLE_MODE_SET); if (hdata->type == HDMI_TYPE13) reg = HDMI_V13_PHY_RSTOUT; diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 820b762..064ed65 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -1026,6 +1026,7 @@ static void mixer_win_disable(struct exynos_drm_manager *mgr, int zpos) static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) { struct mixer_context *mixer_ctx = mgr_to_mixer(mgr); + int err; mutex_lock(&mixer_ctx->mixer_mutex); if (!mixer_ctx->powered) { @@ -1034,7 +1035,11 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr) } mutex_unlock(&mixer_ctx->mixer_mutex); - drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); + err = drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe); + if (err < 0) { + DRM_DEBUG_KMS("failed to acquire vblank counter\n"); + return; + } atomic_set(&mixer_ctx->wait_vsync_event, 1); @@ -1262,8 +1267,6 @@ static int mixer_bind(struct device *dev, struct device *manager, void *data) return ret; } - pm_runtime_enable(dev); - return 0; } @@ -1272,8 +1275,6 @@ static void mixer_unbind(struct device *dev, struct device *master, void *data) struct mixer_context *ctx = dev_get_drvdata(dev); mixer_mgr_remove(&ctx->manager); - - pm_runtime_disable(dev); } static const struct component_ops mixer_component_ops = { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index fd7a493..54f2a27 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1826,8 +1826,6 @@ struct drm_i915_private { */ struct workqueue_struct *dp_wq; - uint32_t bios_vgacntr; - /* Abstract the submission mechanism (legacy ringbuffer or execlists) away */ struct { int (*do_execbuf)(struct drm_device *dev, struct drm_file *file, diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 3044fb3..4e4d969 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -1048,6 +1048,7 @@ int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { + struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_pwrite *args = data; struct drm_i915_gem_object *obj; int ret; @@ -1067,9 +1068,11 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, return -EFAULT; } + intel_runtime_pm_get(dev_priv); + ret = i915_mutex_lock_interruptible(dev); if (ret) - return ret; + goto put_rpm; obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle)); if (&obj->base == NULL) { @@ -1121,6 +1124,9 @@ out: drm_gem_object_unreference(&obj->base); unlock: mutex_unlock(&dev->struct_mutex); +put_rpm: + intel_runtime_pm_put(dev_priv); + return ret; } @@ -5090,7 +5096,7 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task) if (!mutex_is_locked(mutex)) return false; -#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_MUTEXES) +#if defined(CONFIG_SMP) && !defined(CONFIG_DEBUG_MUTEXES) return mutex->owner == task; #else /* Since UP may be pre-empted, we cannot assume that we own the lock */ diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 5d83773..818ab4e 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -296,6 +296,23 @@ void gen6_enable_rps_interrupts(struct drm_device *dev) spin_unlock_irq(&dev_priv->irq_lock); } +u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask) +{ + /* + * SNB,IVB can while VLV,CHV may hard hang on looping batchbuffer + * if GEN6_PM_UP_EI_EXPIRED is masked. + * + * TODO: verify if this can be reproduced on VLV,CHV. + */ + if (INTEL_INFO(dev_priv)->gen <= 7 && !IS_HASWELL(dev_priv)) + mask &= ~GEN6_PM_RP_UP_EI_EXPIRED; + + if (INTEL_INFO(dev_priv)->gen >= 8) + mask &= ~GEN8_PMINTR_REDIRECT_TO_NON_DISP; + + return mask; +} + void gen6_disable_rps_interrupts(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -308,8 +325,7 @@ void gen6_disable_rps_interrupts(struct drm_device *dev) spin_lock_irq(&dev_priv->irq_lock); - I915_WRITE(GEN6_PMINTRMSK, INTEL_INFO(dev_priv)->gen >= 8 ? - ~GEN8_PMINTR_REDIRECT_TO_NON_DISP : ~0); + I915_WRITE(GEN6_PMINTRMSK, gen6_sanitize_rps_pm_mask(dev_priv, ~0)); __gen6_disable_pm_irq(dev_priv, dev_priv->pm_rps_events); I915_WRITE(gen6_pm_ier(dev_priv), I915_READ(gen6_pm_ier(dev_priv)) & @@ -3731,8 +3747,6 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3742,6 +3756,7 @@ static bool i8xx_handle_vblank(struct drm_device *dev, if (I915_READ16(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; @@ -3913,8 +3928,6 @@ static bool i915_handle_vblank(struct drm_device *dev, if ((iir & flip_pending) == 0) goto check_page_flip; - intel_prepare_page_flip(dev, plane); - /* We detect FlipDone by looking for the change in PendingFlip from '1' * to '0' on the following vblank, i.e. IIR has the Pendingflip * asserted following the MI_DISPLAY_FLIP, but ISR is deasserted, hence @@ -3924,6 +3937,7 @@ static bool i915_handle_vblank(struct drm_device *dev, if (I915_READ(ISR) & flip_pending) goto check_page_flip; + intel_prepare_page_flip(dev, plane); intel_finish_page_flip(dev, pipe); return true; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index d01db1b..dc266e7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -9703,7 +9703,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, if (obj->tiling_mode != work->old_fb_obj->tiling_mode) /* vlv: DISPLAY_FLIP fails to change tiling */ ring = NULL; - } else if (IS_IVYBRIDGE(dev)) { + } else if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) { ring = &dev_priv->ring[BCS]; } else if (INTEL_INFO(dev)->gen >= 7) { ring = i915_gem_request_get_ring(obj->last_read_req); @@ -12956,11 +12956,7 @@ static void i915_disable_vga(struct drm_device *dev) vga_put(dev->pdev, VGA_RSRC_LEGACY_IO); udelay(300); - /* - * Fujitsu-Siemens Lifebook S6010 (830) has problems resuming - * from S3 without preserving (some of?) the other bits. - */ - I915_WRITE(vga_reg, dev_priv->bios_vgacntr | VGA_DISP_DISABLE); + I915_WRITE(vga_reg, VGA_DISP_DISABLE); POSTING_READ(vga_reg); } @@ -13045,8 +13041,6 @@ void intel_modeset_init(struct drm_device *dev) intel_shared_dpll_init(dev); - /* save the BIOS value before clobbering it */ - dev_priv->bios_vgacntr = I915_READ(i915_vgacntrl_reg(dev)); /* Just disable it once at startup */ i915_disable_vga(dev); intel_setup_outputs(dev); diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index 588b618..bb871f3 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -795,6 +795,7 @@ void gen6_disable_pm_irq(struct drm_i915_private *dev_priv, uint32_t mask); void gen6_reset_rps_interrupts(struct drm_device *dev); void gen6_enable_rps_interrupts(struct drm_device *dev); void gen6_disable_rps_interrupts(struct drm_device *dev); +u32 gen6_sanitize_rps_pm_mask(struct drm_i915_private *dev_priv, u32 mask); void intel_runtime_pm_disable_interrupts(struct drm_i915_private *dev_priv); void intel_runtime_pm_enable_interrupts(struct drm_i915_private *dev_priv); static inline bool intel_irqs_enabled(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a3ebaa8..7d99a9c 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3745,16 +3745,7 @@ static u32 gen6_rps_pm_mask(struct drm_i915_private *dev_priv, u8 val) mask |= dev_priv->pm_rps_events & (GEN6_PM_RP_DOWN_EI_EXPIRED | GEN6_PM_RP_UP_EI_EXPIRED); mask &= dev_priv->pm_rps_events; - /* IVB and SNB hard hangs on looping batchbuffer - * if GEN6_PM_UP_EI_EXPIRED is masked. - */ - if (INTEL_INFO(dev_priv->dev)->gen <= 7 && !IS_HASWELL(dev_priv->dev)) - mask |= GEN6_PM_RP_UP_EI_EXPIRED; - - if (IS_GEN8(dev_priv->dev)) - mask |= GEN8_PMINTR_REDIRECT_TO_NON_DISP; - - return ~mask; + return gen6_sanitize_rps_pm_mask(dev_priv, ~mask); } /* gen6_set_rps is called to update the frequency request, but should also be @@ -3823,7 +3814,8 @@ static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) return; /* Mask turbo interrupt so that they will not come in between */ - I915_WRITE(GEN6_PMINTRMSK, 0xffffffff); + I915_WRITE(GEN6_PMINTRMSK, + gen6_sanitize_rps_pm_mask(dev_priv, ~0)); vlv_force_gfx_clock(dev_priv, true); diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 6aa3a81..39e1b07 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -615,29 +615,6 @@ static void chv_pipe_power_well_disable(struct drm_i915_private *dev_priv, vlv_power_sequencer_reset(dev_priv); } -static void check_power_well_state(struct drm_i915_private *dev_priv, - struct i915_power_well *power_well) -{ - bool enabled = power_well->ops->is_enabled(dev_priv, power_well); - - if (power_well->always_on || !i915.disable_power_well) { - if (!enabled) - goto mismatch; - - return; - } - - if (enabled != (power_well->count > 0)) - goto mismatch; - - return; - -mismatch: - I915_STATE_WARN(1, "state mismatch for '%s' (always_on %d hw state %d use-count %d disable_power_well %d\n", - power_well->name, power_well->always_on, enabled, - power_well->count, i915.disable_power_well); -} - /** * intel_display_power_get - grab a power domain reference * @dev_priv: i915 device instance @@ -669,8 +646,6 @@ void intel_display_power_get(struct drm_i915_private *dev_priv, power_well->ops->enable(dev_priv, power_well); power_well->hw_enabled = true; } - - check_power_well_state(dev_priv, power_well); } power_domains->domain_use_count[domain]++; @@ -709,8 +684,6 @@ void intel_display_power_put(struct drm_i915_private *dev_priv, power_well->hw_enabled = false; power_well->ops->disable(dev_priv, power_well); } - - check_power_well_state(dev_priv, power_well); } mutex_unlock(&power_domains->lock); diff --git a/drivers/gpu/drm/nouveau/core/core/event.c b/drivers/gpu/drm/nouveau/core/core/event.c index ff2b434..760947e 100644 --- a/drivers/gpu/drm/nouveau/core/core/event.c +++ b/drivers/gpu/drm/nouveau/core/core/event.c @@ -26,7 +26,7 @@ void nvkm_event_put(struct nvkm_event *event, u32 types, int index) { - BUG_ON(!spin_is_locked(&event->refs_lock)); + assert_spin_locked(&event->refs_lock); while (types) { int type = __ffs(types); types &= ~(1 << type); if (--event->refs[index * event->types_nr + type] == 0) { @@ -39,7 +39,7 @@ nvkm_event_put(struct nvkm_event *event, u32 types, int index) void nvkm_event_get(struct nvkm_event *event, u32 types, int index) { - BUG_ON(!spin_is_locked(&event->refs_lock)); + assert_spin_locked(&event->refs_lock); while (types) { int type = __ffs(types); types &= ~(1 << type); if (++event->refs[index * event->types_nr + type] == 1) { diff --git a/drivers/gpu/drm/nouveau/core/core/notify.c b/drivers/gpu/drm/nouveau/core/core/notify.c index d1bcde5..839a325 100644 --- a/drivers/gpu/drm/nouveau/core/core/notify.c +++ b/drivers/gpu/drm/nouveau/core/core/notify.c @@ -98,7 +98,7 @@ nvkm_notify_send(struct nvkm_notify *notify, void *data, u32 size) struct nvkm_event *event = notify->event; unsigned long flags; - BUG_ON(!spin_is_locked(&event->list_lock)); + assert_spin_locked(&event->list_lock); BUG_ON(size != notify->size); spin_lock_irqsave(&event->refs_lock, flags); diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c index 674da1f..7329226 100644 --- a/drivers/gpu/drm/nouveau/core/engine/device/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/device/nve0.c @@ -249,6 +249,39 @@ nve0_identify(struct nouveau_device *device) device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; device->oclass[NVDEV_ENGINE_PERFMON] = &nvf0_perfmon_oclass; break; + case 0x106: + device->cname = "GK208B"; + device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; + device->oclass[NVDEV_SUBDEV_GPIO ] = nve0_gpio_oclass; + device->oclass[NVDEV_SUBDEV_I2C ] = nve0_i2c_oclass; + device->oclass[NVDEV_SUBDEV_FUSE ] = &gf100_fuse_oclass; + device->oclass[NVDEV_SUBDEV_CLOCK ] = &nve0_clock_oclass; + device->oclass[NVDEV_SUBDEV_THERM ] = &nvd0_therm_oclass; + device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass; + device->oclass[NVDEV_SUBDEV_DEVINIT] = nvc0_devinit_oclass; + device->oclass[NVDEV_SUBDEV_MC ] = gk20a_mc_oclass; + device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass; + device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass; + device->oclass[NVDEV_SUBDEV_FB ] = nve0_fb_oclass; + device->oclass[NVDEV_SUBDEV_LTC ] = gk104_ltc_oclass; + device->oclass[NVDEV_SUBDEV_IBUS ] = &nve0_ibus_oclass; + device->oclass[NVDEV_SUBDEV_INSTMEM] = nv50_instmem_oclass; + device->oclass[NVDEV_SUBDEV_VM ] = &nvc0_vmmgr_oclass; + device->oclass[NVDEV_SUBDEV_BAR ] = &nvc0_bar_oclass; + device->oclass[NVDEV_SUBDEV_PWR ] = nv108_pwr_oclass; + device->oclass[NVDEV_SUBDEV_VOLT ] = &nv40_volt_oclass; + device->oclass[NVDEV_ENGINE_DMAOBJ ] = nvd0_dmaeng_oclass; + device->oclass[NVDEV_ENGINE_FIFO ] = nv108_fifo_oclass; + device->oclass[NVDEV_ENGINE_SW ] = nvc0_software_oclass; + device->oclass[NVDEV_ENGINE_GR ] = nv108_graph_oclass; + device->oclass[NVDEV_ENGINE_DISP ] = nvf0_disp_oclass; + device->oclass[NVDEV_ENGINE_COPY0 ] = &nve0_copy0_oclass; + device->oclass[NVDEV_ENGINE_COPY1 ] = &nve0_copy1_oclass; + device->oclass[NVDEV_ENGINE_COPY2 ] = &nve0_copy2_oclass; + device->oclass[NVDEV_ENGINE_BSP ] = &nve0_bsp_oclass; + device->oclass[NVDEV_ENGINE_VP ] = &nve0_vp_oclass; + device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; + break; case 0x108: device->cname = "GK208"; device->oclass[NVDEV_SUBDEV_VBIOS ] = &nouveau_bios_oclass; diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c index 5e58bba..a7a890f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c +++ b/drivers/gpu/drm/nouveau/core/subdev/bios/shadowramin.c @@ -44,8 +44,10 @@ static void pramin_fini(void *data) { struct priv *priv = data; - nv_wr32(priv->bios, 0x001700, priv->bar0); - kfree(priv); + if (priv) { + nv_wr32(priv->bios, 0x001700, priv->bar0); + kfree(priv); + } } static void * diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c index 00f2ca7..033a8e9 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvaa.c @@ -24,34 +24,71 @@ #include "nv50.h" +struct nvaa_ram_priv { + struct nouveau_ram base; + u64 poller_base; +}; + static int nvaa_ram_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 datasize, struct nouveau_object **pobject) { - const u32 rsvd_head = ( 256 * 1024) >> 12; /* vga memory */ - const u32 rsvd_tail = (1024 * 1024) >> 12; /* vbios etc */ + u32 rsvd_head = ( 256 * 1024); /* vga memory */ + u32 rsvd_tail = (1024 * 1024); /* vbios etc */ struct nouveau_fb *pfb = nouveau_fb(parent); - struct nouveau_ram *ram; + struct nvaa_ram_priv *priv; int ret; - ret = nouveau_ram_create(parent, engine, oclass, &ram); - *pobject = nv_object(ram); + ret = nouveau_ram_create(parent, engine, oclass, &priv); + *pobject = nv_object(priv); if (ret) return ret; - ram->size = nv_rd32(pfb, 0x10020c); - ram->size = (ram->size & 0xffffff00) | ((ram->size & 0x000000ff) << 32); + priv->base.type = NV_MEM_TYPE_STOLEN; + priv->base.stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; + priv->base.size = (u64)nv_rd32(pfb, 0x100e14) << 12; - ret = nouveau_mm_init(&pfb->vram, rsvd_head, (ram->size >> 12) - - (rsvd_head + rsvd_tail), 1); + rsvd_tail += 0x1000; + priv->poller_base = priv->base.size - rsvd_tail; + + ret = nouveau_mm_init(&pfb->vram, rsvd_head >> 12, + (priv->base.size - (rsvd_head + rsvd_tail)) >> 12, + 1); if (ret) return ret; - ram->type = NV_MEM_TYPE_STOLEN; - ram->stolen = (u64)nv_rd32(pfb, 0x100e10) << 12; - ram->get = nv50_ram_get; - ram->put = nv50_ram_put; + priv->base.get = nv50_ram_get; + priv->base.put = nv50_ram_put; + return 0; +} + +static int +nvaa_ram_init(struct nouveau_object *object) +{ + struct nouveau_fb *pfb = nouveau_fb(object); + struct nvaa_ram_priv *priv = (void *)object; + int ret; + u64 dniso, hostnb, flush; + + ret = nouveau_ram_init(&priv->base); + if (ret) + return ret; + + dniso = ((priv->base.size - (priv->poller_base + 0x00)) >> 5) - 1; + hostnb = ((priv->base.size - (priv->poller_base + 0x20)) >> 5) - 1; + flush = ((priv->base.size - (priv->poller_base + 0x40)) >> 5) - 1; + + /* Enable NISO poller for various clients and set their associated + * read address, only for MCP77/78 and MCP79/7A. (fd#25701) + */ + nv_wr32(pfb, 0x100c18, dniso); + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000001); + nv_wr32(pfb, 0x100c1c, hostnb); + nv_mask(pfb, 0x100c14, 0x00000000, 0x00000002); + nv_wr32(pfb, 0x100c24, flush); + nv_mask(pfb, 0x100c14, 0x00000000, 0x00010000); + return 0; } @@ -60,7 +97,7 @@ nvaa_ram_oclass = { .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvaa_ram_ctor, .dtor = _nouveau_ram_dtor, - .init = _nouveau_ram_init, + .init = nvaa_ram_init, .fini = _nouveau_ram_fini, }, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c index a75c35c..165401c 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv4c.c @@ -24,13 +24,6 @@ #include "nv04.h" -static void -nv4c_mc_msi_rearm(struct nouveau_mc *pmc) -{ - struct nv04_mc_priv *priv = (void *)pmc; - nv_wr08(priv, 0x088050, 0xff); -} - struct nouveau_oclass * nv4c_mc_oclass = &(struct nouveau_mc_oclass) { .base.handle = NV_SUBDEV(MC, 0x4c), @@ -41,5 +34,4 @@ nv4c_mc_oclass = &(struct nouveau_mc_oclass) { .fini = _nouveau_mc_fini, }, .intr = nv04_mc_intr, - .msi_rearm = nv4c_mc_msi_rearm, }.base; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 21ec561..bba2960 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1572,8 +1572,10 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) * so use the DMA API for them. */ if (!nv_device_is_cpu_coherent(device) && - ttm->caching_state == tt_uncached) + ttm->caching_state == tt_uncached) { ttm_dma_unpopulate(ttm_dma, dev->dev); + return; + } #if __OS_HAS_AGP if (drm->agp.stat == ENABLED) { diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 42c34ba..bf0f9e21d 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -36,7 +36,14 @@ void nouveau_gem_object_del(struct drm_gem_object *gem) { struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; + struct device *dev = drm->dev->dev; + int ret; + + ret = pm_runtime_get_sync(dev); + if (WARN_ON(ret < 0 && ret != -EACCES)) + return; if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); @@ -46,6 +53,9 @@ nouveau_gem_object_del(struct drm_gem_object *gem) /* reset filp so nouveau_bo_del_ttm() can test for it */ gem->filp = NULL; ttm_bo_unref(&bo); + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } int @@ -53,7 +63,9 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct nouveau_vma *vma; + struct device *dev = drm->dev->dev; int ret; if (!cli->vm) @@ -71,11 +83,16 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) goto out; } + ret = pm_runtime_get_sync(dev); + if (ret < 0 && ret != -EACCES) + goto out; + ret = nouveau_bo_vma_add(nvbo, cli->vm, vma); - if (ret) { + if (ret) kfree(vma); - goto out; - } + + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); } else { vma->refcount++; } @@ -129,6 +146,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct device *dev = drm->dev->dev; struct nouveau_vma *vma; int ret; @@ -141,8 +160,14 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) vma = nouveau_bo_vma_find(nvbo, cli->vm); if (vma) { - if (--vma->refcount == 0) - nouveau_gem_object_unmap(nvbo, vma); + if (--vma->refcount == 0) { + ret = pm_runtime_get_sync(dev); + if (!WARN_ON(ret < 0 && ret != -EACCES)) { + nouveau_gem_object_unmap(nvbo, vma); + pm_runtime_mark_last_busy(dev); + pm_runtime_put_autosuspend(dev); + } + } } ttm_bo_unreserve(&nvbo->bo); } diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c index d59ec49..ed644a4 100644 --- a/drivers/gpu/drm/radeon/atombios_crtc.c +++ b/drivers/gpu/drm/radeon/atombios_crtc.c @@ -1851,10 +1851,9 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) return pll; } /* otherwise, pick one of the plls */ - if ((rdev->family == CHIP_KAVERI) || - (rdev->family == CHIP_KABINI) || + if ((rdev->family == CHIP_KABINI) || (rdev->family == CHIP_MULLINS)) { - /* KB/KV/ML has PPLL1 and PPLL2 */ + /* KB/ML has PPLL1 and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -1863,7 +1862,7 @@ static int radeon_atom_pick_pll(struct drm_crtc *crtc) DRM_ERROR("unable to allocate a PPLL\n"); return ATOM_PPLL_INVALID; } else { - /* CI has PPLL0, PPLL1, and PPLL2 */ + /* CI/KV has PPLL0, PPLL1, and PPLL2 */ pll_in_use = radeon_get_pll_use_mask(crtc); if (!(pll_in_use & (1 << ATOM_PPLL2))) return ATOM_PPLL2; @@ -2155,6 +2154,7 @@ static void atombios_crtc_disable(struct drm_crtc *crtc) case ATOM_PPLL0: /* disable the ppll */ if ((rdev->family == CHIP_ARUBA) || + (rdev->family == CHIP_KAVERI) || (rdev->family == CHIP_BONAIRE) || (rdev->family == CHIP_HAWAII)) atombios_crtc_program_pll(crtc, radeon_crtc->crtc_id, radeon_crtc->pll_id, diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 11ba9d2..db42a67 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -492,6 +492,10 @@ int radeon_dp_mode_valid_helper(struct drm_connector *connector, struct radeon_connector_atom_dig *dig_connector; int dp_clock; + if ((mode->clock > 340000) && + (!radeon_connector_is_dp12_capable(connector))) + return MODE_CLOCK_HIGH; + if (!radeon_connector->con_priv) return MODE_CLOCK_HIGH; dig_connector = radeon_connector->con_priv; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 14d173e..ed336fb 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -6057,6 +6057,17 @@ void cik_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm_id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_OPERATION(0) | /* wait */ + WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* compute doesn't have PFP */ if (usepfp) { /* sync PFP to ME, otherwise we might get invalid PFP reads */ diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1f4ded1..479519c 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -932,6 +932,9 @@ void cik_sdma_vm_pad_ib(struct radeon_ib *ib) void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, unsigned vm_id, uint64_t pd_addr) { + u32 extra_bits = (SDMA_POLL_REG_MEM_EXTRA_OP(0) | + SDMA_POLL_REG_MEM_EXTRA_FUNC(0)); /* always */ + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); if (vm_id < 8) { radeon_ring_write(ring, (VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (vm_id << 2)) >> 2); @@ -972,5 +975,12 @@ void cik_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_SRBM_WRITE, 0, 0xf000)); radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); radeon_ring_write(ring, 1 << vm_id); + + radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_POLL_REG_MEM, 0, extra_bits)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* reference */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, (0xfff << 16) | 10); /* retry count, poll interval */ } diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h index ba85986..03003f8 100644 --- a/drivers/gpu/drm/radeon/cikd.h +++ b/drivers/gpu/drm/radeon/cikd.h @@ -2156,4 +2156,6 @@ #define ATC_VM_APERTURE1_HIGH_ADDR 0x330Cu #define ATC_VM_APERTURE1_LOW_ADDR 0x3304u +#define IH_VMID_0_LUT 0x3D40u + #endif diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c index 2fe8cfc..bafdf92 100644 --- a/drivers/gpu/drm/radeon/dce3_1_afmt.c +++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c @@ -103,7 +103,7 @@ static void dce3_2_afmt_write_sad_regs(struct drm_encoder *encoder) } sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); - if (sad_count < 0) { + if (sad_count <= 0) { DRM_ERROR("Couldn't read SADs: %d\n", sad_count); return; } diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c index 9b42001..e3e9c10 100644 --- a/drivers/gpu/drm/radeon/kv_dpm.c +++ b/drivers/gpu/drm/radeon/kv_dpm.c @@ -2745,13 +2745,11 @@ int kv_dpm_init(struct radeon_device *rdev) pi->enable_auto_thermal_throttling = true; pi->disable_nb_ps3_in_battery = false; if (radeon_bapm == -1) { - /* There are stability issues reported on with - * bapm enabled on an asrock system. - */ - if (rdev->pdev->subsystem_vendor == 0x1849) - pi->bapm_enable = false; - else + /* only enable bapm on KB, ML by default */ + if (rdev->family == CHIP_KABINI || rdev->family == CHIP_MULLINS) pi->bapm_enable = true; + else + pi->bapm_enable = false; } else if (radeon_bapm == 0) { pi->bapm_enable = false; } else { diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 360de9f..aea48c8 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2516,6 +2516,16 @@ void cayman_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, PACKET0(VM_INVALIDATE_REQUEST, 0)); radeon_ring_write(ring, 1 << vm_id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); diff --git a/drivers/gpu/drm/radeon/ni_dma.c b/drivers/gpu/drm/radeon/ni_dma.c index 50f8861..4be2bb7 100644 --- a/drivers/gpu/drm/radeon/ni_dma.c +++ b/drivers/gpu/drm/radeon/ni_dma.c @@ -463,5 +463,11 @@ void cayman_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm_id); + + /* wait for invalidate to complete */ + radeon_ring_write(ring, DMA_SRBM_READ_PACKET); + radeon_ring_write(ring, (0xff << 20) | (VM_INVALIDATE_REQUEST >> 2)); + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0); /* value */ } diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h index 2e12e4d..ad71254 100644 --- a/drivers/gpu/drm/radeon/nid.h +++ b/drivers/gpu/drm/radeon/nid.h @@ -1133,6 +1133,23 @@ #define PACKET3_MEM_SEMAPHORE 0x39 #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ #define PACKET3_MEM_WRITE 0x3D #define PACKET3_PFP_SYNC_ME 0x42 #define PACKET3_SURFACE_SYNC 0x43 @@ -1272,6 +1289,13 @@ (1 << 21) | \ (((n) & 0xFFFFF) << 0)) +#define DMA_SRBM_POLL_PACKET ((9 << 28) | \ + (1 << 27) | \ + (1 << 26)) + +#define DMA_SRBM_READ_PACKET ((9 << 28) | \ + (1 << 27)) + /* async DMA Packet types */ #define DMA_PACKET_WRITE 0x2 #define DMA_PACKET_COPY 0x3 diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 850de57..121aff6 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -333,6 +333,20 @@ static struct radeon_asic_ring r300_gfx_ring = { .set_wptr = &r100_gfx_set_wptr, }; +static struct radeon_asic_ring rv515_gfx_ring = { + .ib_execute = &r100_ring_ib_execute, + .emit_fence = &r300_fence_ring_emit, + .emit_semaphore = &r100_semaphore_ring_emit, + .cs_parse = &r300_cs_parse, + .ring_start = &rv515_ring_start, + .ring_test = &r100_ring_test, + .ib_test = &r100_ib_test, + .is_lockup = &r100_gpu_is_lockup, + .get_rptr = &r100_gfx_get_rptr, + .get_wptr = &r100_gfx_get_wptr, + .set_wptr = &r100_gfx_set_wptr, +}; + static struct radeon_asic r300_asic = { .init = &r300_init, .fini = &r300_fini, @@ -748,7 +762,7 @@ static struct radeon_asic rv515_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring + [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring }, .irq = { .set = &rs600_irq_set, @@ -814,7 +828,7 @@ static struct radeon_asic r520_asic = { .set_page = &rv370_pcie_gart_set_page, }, .ring = { - [RADEON_RING_TYPE_GFX_INDEX] = &r300_gfx_ring + [RADEON_RING_TYPE_GFX_INDEX] = &rv515_gfx_ring }, .irq = { .set = &rs600_irq_set, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index a46f737..d0b4f7d 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -576,7 +576,7 @@ error_unreserve: error_free: drm_free_large(vm_bos); - if (r) + if (r && r != -ERESTARTSYS) DRM_ERROR("Couldn't update BO_VA (%d)\n", r); } diff --git a/drivers/gpu/drm/radeon/radeon_kfd.c b/drivers/gpu/drm/radeon/radeon_kfd.c index ddbabab..7b27420 100644 --- a/drivers/gpu/drm/radeon/radeon_kfd.c +++ b/drivers/gpu/drm/radeon/radeon_kfd.c @@ -70,7 +70,7 @@ static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id, static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr); static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd); -static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, @@ -91,7 +91,7 @@ static const struct kfd2kgd_calls kfd2kgd = { .init_pipeline = kgd_init_pipeline, .hqd_load = kgd_hqd_load, .hqd_sdma_load = kgd_hqd_sdma_load, - .hqd_is_occupies = kgd_hqd_is_occupies, + .hqd_is_occupied = kgd_hqd_is_occupied, .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied, .hqd_destroy = kgd_hqd_destroy, .hqd_sdma_destroy = kgd_hqd_sdma_destroy, @@ -102,6 +102,7 @@ static const struct kgd2kfd_calls *kgd2kfd; bool radeon_kfd_init(void) { +#if defined(CONFIG_HSA_AMD_MODULE) bool (*kgd2kfd_init_p)(unsigned, const struct kfd2kgd_calls*, const struct kgd2kfd_calls**); @@ -118,6 +119,17 @@ bool radeon_kfd_init(void) } return true; +#elif defined(CONFIG_HSA_AMD) + if (!kgd2kfd_init(KFD_INTERFACE_VERSION, &kfd2kgd, &kgd2kfd)) { + kgd2kfd = NULL; + + return false; + } + + return true; +#else + return false; +#endif } void radeon_kfd_fini(void) @@ -370,6 +382,10 @@ static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid, cpu_relax(); write_register(kgd, ATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid); + /* Mapping vmid to pasid also for IH block */ + write_register(kgd, IH_VMID_0_LUT + vmid * sizeof(uint32_t), + pasid_mapping); + return 0; } @@ -529,7 +545,7 @@ static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd) return 0; } -static bool kgd_hqd_is_occupies(struct kgd_dev *kgd, uint64_t queue_address, +static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id) { uint32_t act; @@ -586,6 +602,7 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, uint32_t reset_type, if (timeout == 0) { pr_err("kfd: cp queue preemption time out (%dms)\n", temp); + release_queue(kgd); return -ETIME; } msleep(20); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 32522cc..f7da8fe 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1287,8 +1287,39 @@ dpm_failed: return ret; } +struct radeon_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; +}; + +/* cards with dpm stability problems */ +static struct radeon_dpm_quirk radeon_dpm_quirk_list[] = { + /* TURKS - https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1386534 */ + { PCI_VENDOR_ID_ATI, 0x6759, 0x1682, 0x3195 }, + /* TURKS - https://bugzilla.kernel.org/show_bug.cgi?id=83731 */ + { PCI_VENDOR_ID_ATI, 0x6840, 0x1179, 0xfb81 }, + { 0, 0, 0, 0 }, +}; + int radeon_pm_init(struct radeon_device *rdev) { + struct radeon_dpm_quirk *p = radeon_dpm_quirk_list; + bool disable_dpm = false; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + disable_dpm = true; + break; + } + ++p; + } + /* enable dpm on rv6xx+ */ switch (rdev->family) { case CHIP_RV610: @@ -1344,6 +1375,8 @@ int radeon_pm_init(struct radeon_device *rdev) (!(rdev->flags & RADEON_IS_IGP)) && (!rdev->smc_fw)) rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (disable_dpm && (radeon_dpm == -1)) + rdev->pm.pm_method = PM_METHOD_PROFILE; else if (radeon_dpm == 0) rdev->pm.pm_method = PM_METHOD_PROFILE; else diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c index 535403e..15aee72 100644 --- a/drivers/gpu/drm/radeon/radeon_state.c +++ b/drivers/gpu/drm/radeon/radeon_state.c @@ -1703,7 +1703,7 @@ static int radeon_cp_dispatch_texture(struct drm_device * dev, u32 format; u32 *buffer; const u8 __user *data; - int size, dwords, tex_width, blit_width, spitch; + unsigned int size, dwords, tex_width, blit_width, spitch; u32 height; int i; u32 texpitch, microtile; diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 60df444..5d89b87 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -5057,6 +5057,16 @@ void si_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, 0); radeon_ring_write(ring, 1 << vm_id); + /* wait for the invalidate to complete */ + radeon_ring_write(ring, PACKET3(PACKET3_WAIT_REG_MEM, 5)); + radeon_ring_write(ring, (WAIT_REG_MEM_FUNCTION(0) | /* always */ + WAIT_REG_MEM_ENGINE(0))); /* me */ + radeon_ring_write(ring, VM_INVALIDATE_REQUEST >> 2); + radeon_ring_write(ring, 0); + radeon_ring_write(ring, 0); /* ref */ + radeon_ring_write(ring, 0); /* mask */ + radeon_ring_write(ring, 0x20); /* poll interval */ + /* sync PFP to ME, otherwise we might get invalid PFP reads */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); diff --git a/drivers/gpu/drm/radeon/si_dma.c b/drivers/gpu/drm/radeon/si_dma.c index f5cc777..aa7b872 100644 --- a/drivers/gpu/drm/radeon/si_dma.c +++ b/drivers/gpu/drm/radeon/si_dma.c @@ -206,6 +206,14 @@ void si_dma_vm_flush(struct radeon_device *rdev, struct radeon_ring *ring, radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_SRBM_WRITE, 0, 0, 0, 0)); radeon_ring_write(ring, (0xf << 16) | (VM_INVALIDATE_REQUEST >> 2)); radeon_ring_write(ring, 1 << vm_id); + + /* wait for invalidate to complete */ + radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_POLL_REG_MEM, 0, 0, 0, 0)); + radeon_ring_write(ring, VM_INVALIDATE_REQUEST); + radeon_ring_write(ring, 0xff << 16); /* retry */ + radeon_ring_write(ring, 1 << vm_id); /* mask */ + radeon_ring_write(ring, 0); /* value */ + radeon_ring_write(ring, (0 << 28) | 0x20); /* func(always) | poll interval */ } /** diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 32e354b..eff8a64 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -2908,6 +2908,22 @@ static int si_init_smc_spll_table(struct radeon_device *rdev) return ret; } +struct si_dpm_quirk { + u32 chip_vendor; + u32 chip_device; + u32 subsys_vendor; + u32 subsys_device; + u32 max_sclk; + u32 max_mclk; +}; + +/* cards with dpm stability problems */ +static struct si_dpm_quirk si_dpm_quirk_list[] = { + /* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */ + { PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 }, + { 0, 0, 0, 0 }, +}; + static void si_apply_state_adjust_rules(struct radeon_device *rdev, struct radeon_ps *rps) { @@ -2918,7 +2934,22 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, u32 mclk, sclk; u16 vddc, vddci; u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; + u32 max_sclk = 0, max_mclk = 0; int i; + struct si_dpm_quirk *p = si_dpm_quirk_list; + + /* Apply dpm quirks */ + while (p && p->chip_device != 0) { + if (rdev->pdev->vendor == p->chip_vendor && + rdev->pdev->device == p->chip_device && + rdev->pdev->subsystem_vendor == p->subsys_vendor && + rdev->pdev->subsystem_device == p->subsys_device) { + max_sclk = p->max_sclk; + max_mclk = p->max_mclk; + break; + } + ++p; + } if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) @@ -2972,6 +3003,14 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (ps->performance_levels[i].mclk > max_mclk_vddc) ps->performance_levels[i].mclk = max_mclk_vddc; } + if (max_mclk) { + if (ps->performance_levels[i].mclk > max_mclk) + ps->performance_levels[i].mclk = max_mclk; + } + if (max_sclk) { + if (ps->performance_levels[i].sclk > max_sclk) + ps->performance_levels[i].sclk = max_sclk; + } } /* XXX validate the min clocks required for display */ diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h index 4069be89..8499924 100644 --- a/drivers/gpu/drm/radeon/sid.h +++ b/drivers/gpu/drm/radeon/sid.h @@ -1632,6 +1632,23 @@ #define PACKET3_MPEG_INDEX 0x3A #define PACKET3_COPY_DW 0x3B #define PACKET3_WAIT_REG_MEM 0x3C +#define WAIT_REG_MEM_FUNCTION(x) ((x) << 0) + /* 0 - always + * 1 - < + * 2 - <= + * 3 - == + * 4 - != + * 5 - >= + * 6 - > + */ +#define WAIT_REG_MEM_MEM_SPACE(x) ((x) << 4) + /* 0 - reg + * 1 - mem + */ +#define WAIT_REG_MEM_ENGINE(x) ((x) << 8) + /* 0 - me + * 1 - pfp + */ #define PACKET3_MEM_WRITE 0x3D #define PACKET3_COPY_DATA 0x40 #define PACKET3_CP_DMA 0x41 @@ -1835,6 +1852,7 @@ #define DMA_PACKET_TRAP 0x7 #define DMA_PACKET_SRBM_WRITE 0x9 #define DMA_PACKET_CONSTANT_FILL 0xd +#define DMA_PACKET_POLL_REG_MEM 0xe #define DMA_PACKET_NOP 0xf #define VCE_STATUS 0x20004 diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig index 230b6f8..dfdc269 100644 --- a/drivers/hid/Kconfig +++ b/drivers/hid/Kconfig @@ -27,7 +27,8 @@ if HID config HID_BATTERY_STRENGTH bool "Battery level reporting for HID devices" - depends on HID && POWER_SUPPLY && HID = POWER_SUPPLY + depends on HID + select POWER_SUPPLY default n ---help--- This option adds support of reporting battery strength (for HID devices diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index c3d0ac1..8b63879 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -1805,6 +1805,7 @@ static const struct hid_device_id hid_have_special_driver[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_ERGO_525V) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_I405X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_LABTEC, USB_DEVICE_ID_LABTEC_WIRELESS_KEYBOARD) }, { HID_USB_DEVICE(USB_VENDOR_ID_LCPOWER, USB_DEVICE_ID_LCPOWER_LC1000 ) }, diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h index 7460f34..9243359 100644 --- a/drivers/hid/hid-ids.h +++ b/drivers/hid/hid-ids.h @@ -526,6 +526,7 @@ #define USB_DEVICE_ID_KYE_GPEN_560 0x5003 #define USB_DEVICE_ID_KYE_EASYPEN_I405X 0x5010 #define USB_DEVICE_ID_KYE_MOUSEPEN_I608X 0x5011 +#define USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2 0x501a #define USB_DEVICE_ID_KYE_EASYPEN_M610X 0x5013 #define USB_VENDOR_ID_LABTEC 0x1020 diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c index e0a0f06..9505605 100644 --- a/drivers/hid/hid-input.c +++ b/drivers/hid/hid-input.c @@ -312,6 +312,9 @@ static const struct hid_device_id hid_battery_quirks[] = { USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, + USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO), + HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, + { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_ANSI), HID_BATTERY_QUIRK_PERCENT | HID_BATTERY_QUIRK_FEATURE }, {} diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c index b92bf01..158fcf5 100644 --- a/drivers/hid/hid-kye.c +++ b/drivers/hid/hid-kye.c @@ -323,6 +323,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc, } break; case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: if (*rsize == MOUSEPEN_I608X_RDESC_ORIG_SIZE) { rdesc = mousepen_i608x_rdesc_fixed; *rsize = sizeof(mousepen_i608x_rdesc_fixed); @@ -415,6 +416,7 @@ static int kye_probe(struct hid_device *hdev, const struct hid_device_id *id) switch (id->product) { case USB_DEVICE_ID_KYE_EASYPEN_I405X: case USB_DEVICE_ID_KYE_MOUSEPEN_I608X: + case USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2: case USB_DEVICE_ID_KYE_EASYPEN_M610X: ret = kye_tablet_enable(hdev); if (ret) { @@ -446,6 +448,8 @@ static const struct hid_device_id kye_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, + USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2) }, + { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X) }, { HID_USB_DEVICE(USB_VENDOR_ID_KYE, USB_DEVICE_ID_GENIUS_GILA_GAMING_MOUSE) }, diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c index c917ab6..5bc6d80 100644 --- a/drivers/hid/hid-logitech-dj.c +++ b/drivers/hid/hid-logitech-dj.c @@ -962,10 +962,24 @@ static int logi_dj_raw_event(struct hid_device *hdev, switch (data[0]) { case REPORT_ID_DJ_SHORT: + if (size != DJREPORT_SHORT_LENGTH) { + dev_err(&hdev->dev, "DJ report of bad size (%d)", size); + return false; + } return logi_dj_dj_event(hdev, report, data, size); case REPORT_ID_HIDPP_SHORT: - /* intentional fallthrough */ + if (size != HIDPP_REPORT_SHORT_LENGTH) { + dev_err(&hdev->dev, + "Short HID++ report of bad size (%d)", size); + return false; + } + return logi_dj_hidpp_event(hdev, report, data, size); case REPORT_ID_HIDPP_LONG: + if (size != HIDPP_REPORT_LONG_LENGTH) { + dev_err(&hdev->dev, + "Long HID++ report of bad size (%d)", size); + return false; + } return logi_dj_hidpp_event(hdev, report, data, size); } diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c index 2f420c0..a93cefe 100644 --- a/drivers/hid/hid-logitech-hidpp.c +++ b/drivers/hid/hid-logitech-hidpp.c @@ -282,6 +282,33 @@ static inline bool hidpp_report_is_connect_event(struct hidpp_report *report) (report->rap.sub_id == 0x41); } +/** + * hidpp_prefix_name() prefixes the current given name with "Logitech ". + */ +static void hidpp_prefix_name(char **name, int name_length) +{ +#define PREFIX_LENGTH 9 /* "Logitech " */ + + int new_length; + char *new_name; + + if (name_length > PREFIX_LENGTH && + strncmp(*name, "Logitech ", PREFIX_LENGTH) == 0) + /* The prefix has is already in the name */ + return; + + new_length = PREFIX_LENGTH + name_length; + new_name = kzalloc(new_length, GFP_KERNEL); + if (!new_name) + return; + + snprintf(new_name, new_length, "Logitech %s", *name); + + kfree(*name); + + *name = new_name; +} + /* -------------------------------------------------------------------------- */ /* HIDP++ 1.0 commands */ /* -------------------------------------------------------------------------- */ @@ -321,6 +348,10 @@ static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev) return NULL; memcpy(name, &response.rap.params[2], len); + + /* include the terminating '\0' */ + hidpp_prefix_name(&name, len + 1); + return name; } @@ -498,6 +529,9 @@ static char *hidpp_get_device_name(struct hidpp_device *hidpp) index += ret; } + /* include the terminating '\0' */ + hidpp_prefix_name(&name, __name_length + 1); + return name; } @@ -794,18 +828,25 @@ static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size) switch (data[0]) { case 0x02: + if (size < 2) { + hid_err(hdev, "Received HID report of bad size (%d)", + size); + return 1; + } if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) { input_event(wd->input, EV_KEY, BTN_LEFT, !!(data[1] & 0x01)); input_event(wd->input, EV_KEY, BTN_RIGHT, !!(data[1] & 0x02)); input_sync(wd->input); + return 0; } else { if (size < 21) return 1; return wtp_mouse_raw_xy_event(hidpp, &data[7]); } case REPORT_ID_HIDPP_LONG: + /* size is already checked in hidpp_raw_event. */ if ((report->fap.feature_index != wd->mt_feature_index) || (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY)) return 1; diff --git a/drivers/hid/hid-roccat-pyra.c b/drivers/hid/hid-roccat-pyra.c index 1a07e07..47d7e74 100644 --- a/drivers/hid/hid-roccat-pyra.c +++ b/drivers/hid/hid-roccat-pyra.c @@ -35,6 +35,8 @@ static struct class *pyra_class; static void profile_activated(struct pyra_device *pyra, unsigned int new_profile) { + if (new_profile >= ARRAY_SIZE(pyra->profile_settings)) + return; pyra->actual_profile = new_profile; pyra->actual_cpi = pyra->profile_settings[pyra->actual_profile].y_cpi; } @@ -257,9 +259,11 @@ static ssize_t pyra_sysfs_write_settings(struct file *fp, if (off != 0 || count != PYRA_SIZE_SETTINGS) return -EINVAL; - mutex_lock(&pyra->pyra_lock); - settings = (struct pyra_settings const *)buf; + if (settings->startup_profile >= ARRAY_SIZE(pyra->profile_settings)) + return -EINVAL; + + mutex_lock(&pyra->pyra_lock); retval = pyra_set_settings(usb_dev, settings); if (retval) { diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c index d32037c..d43e967 100644 --- a/drivers/hid/i2c-hid/i2c-hid.c +++ b/drivers/hid/i2c-hid/i2c-hid.c @@ -706,12 +706,7 @@ static int i2c_hid_start(struct hid_device *hid) static void i2c_hid_stop(struct hid_device *hid) { - struct i2c_client *client = hid->driver_data; - struct i2c_hid *ihid = i2c_get_clientdata(client); - hid->claimed = 0; - - i2c_hid_free_buffers(ihid); } static int i2c_hid_open(struct hid_device *hid) diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c index dc89be9..b27b3d3 100644 --- a/drivers/hid/usbhid/hid-quirks.c +++ b/drivers/hid/usbhid/hid-quirks.c @@ -124,6 +124,7 @@ static const struct hid_blacklist { { USB_VENDOR_ID_CHICONY, USB_DEVICE_ID_CHICONY_WIRELESS, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_SIGMA_MICRO, USB_DEVICE_ID_SIGMA_MICRO_KEYBOARD, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X, HID_QUIRK_MULTI_INPUT }, + { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_MOUSEPEN_I608X_2, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_KYE, USB_DEVICE_ID_KYE_EASYPEN_M610X, HID_QUIRK_MULTI_INPUT }, { USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_DUOSENSE, HID_QUIRK_NO_INIT_REPORTS }, { USB_VENDOR_ID_SEMICO, USB_DEVICE_ID_SEMICO_USB_KEYKOARD, HID_QUIRK_NO_INIT_REPORTS }, diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c index e37412d..b99de00 100644 --- a/drivers/iio/adc/ad799x.c +++ b/drivers/iio/adc/ad799x.c @@ -143,9 +143,15 @@ static int ad799x_write_config(struct ad799x_state *st, u16 val) case ad7998: return i2c_smbus_write_word_swapped(st->client, AD7998_CONF_REG, val); - default: + case ad7992: + case ad7993: + case ad7994: return i2c_smbus_write_byte_data(st->client, AD7998_CONF_REG, val); + default: + /* Will be written when doing a conversion */ + st->config = val; + return 0; } } @@ -155,8 +161,13 @@ static int ad799x_read_config(struct ad799x_state *st) case ad7997: case ad7998: return i2c_smbus_read_word_swapped(st->client, AD7998_CONF_REG); - default: + case ad7992: + case ad7993: + case ad7994: return i2c_smbus_read_byte_data(st->client, AD7998_CONF_REG); + default: + /* No readback support */ + return st->config; } } diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c index 866fe90..90c8cb7 100644 --- a/drivers/iio/inkern.c +++ b/drivers/iio/inkern.c @@ -449,6 +449,9 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, if (val2 == NULL) val2 = &unused; + if(!iio_channel_has_info(chan->channel, info)) + return -EINVAL; + if (chan->indio_dev->info->read_raw_multi) { ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, chan->channel, INDIO_MAX_RAW_ELEMENTS, diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 57ecc5b..9117b7a 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -1114,7 +1114,8 @@ static int mlx4_ib_tunnel_steer_add(struct ib_qp *qp, struct ib_flow_attr *flow_ struct mlx4_dev *dev = to_mdev(qp->device)->dev; int err = 0; - if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || + dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) return 0; /* do nothing */ ib_flow = flow_attr + 1; diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c index f2b9780..77ecf6d 100644 --- a/drivers/input/mouse/elantech.c +++ b/drivers/input/mouse/elantech.c @@ -1520,6 +1520,8 @@ static int elantech_set_properties(struct elantech_data *etd) case 7: case 8: case 9: + case 10: + case 13: etd->hw_version = 4; break; default: diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index c66d1b5..764857b 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -415,6 +415,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = { }, }, { + /* Acer Aspire 7738 */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Acer"), + DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 7738"), + }, + }, + { /* Gericom Bellagio */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Gericom"), @@ -745,6 +752,35 @@ static const struct dmi_system_id __initconst i8042_dmi_dritek_table[] = { { } }; +/* + * Some laptops need keyboard reset before probing for the trackpad to get + * it detected, initialised & finally work. + */ +static const struct dmi_system_id __initconst i8042_dmi_kbdreset_table[] = { + { + /* Gigabyte P35 v2 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P35V2"), + }, + }, + { + /* Aorus branded Gigabyte X3 Plus - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "X3"), + }, + }, + { + /* Gigabyte P34 - Elantech touchpad */ + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "GIGABYTE"), + DMI_MATCH(DMI_PRODUCT_NAME, "P34"), + }, + }, + { } +}; + #endif /* CONFIG_X86 */ #ifdef CONFIG_PNP @@ -1040,6 +1076,9 @@ static int __init i8042_platform_init(void) if (dmi_check_system(i8042_dmi_dritek_table)) i8042_dritek = true; + if (dmi_check_system(i8042_dmi_kbdreset_table)) + i8042_kbdreset = true; + /* * A20 was already enabled during early kernel init. But some buggy * BIOSes (in MSI Laptops) require A20 to be enabled using 8042 to diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c index 924e4bf..986a71c 100644 --- a/drivers/input/serio/i8042.c +++ b/drivers/input/serio/i8042.c @@ -67,6 +67,10 @@ static bool i8042_notimeout; module_param_named(notimeout, i8042_notimeout, bool, 0); MODULE_PARM_DESC(notimeout, "Ignore timeouts signalled by i8042"); +static bool i8042_kbdreset; +module_param_named(kbdreset, i8042_kbdreset, bool, 0); +MODULE_PARM_DESC(kbdreset, "Reset device connected to KBD port"); + #ifdef CONFIG_X86 static bool i8042_dritek; module_param_named(dritek, i8042_dritek, bool, 0); @@ -790,6 +794,16 @@ static int __init i8042_check_aux(void) return -1; /* + * Reset keyboard (needed on some laptops to successfully detect + * touchpad, e.g., some Gigabyte laptop models with Elantech + * touchpads). + */ + if (i8042_kbdreset) { + pr_warn("Attempting to reset device connected to KBD port\n"); + i8042_kbd_write(NULL, (unsigned char) 0xff); + } + +/* * Test AUX IRQ delivery to make sure BIOS did not grab the IRQ and * used it for a PCI card or somethig else. */ diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c index 1232336..40dfbc0 100644 --- a/drivers/iommu/intel-iommu.c +++ b/drivers/iommu/intel-iommu.c @@ -4029,14 +4029,6 @@ static int device_notifier(struct notifier_block *nb, if (action != BUS_NOTIFY_REMOVED_DEVICE) return 0; - /* - * If the device is still attached to a device driver we can't - * tear down the domain yet as DMA mappings may still be in use. - * Wait for the BUS_NOTIFY_UNBOUND_DRIVER event to do that. - */ - if (action == BUS_NOTIFY_DEL_DEVICE && dev->driver != NULL) - return 0; - domain = find_domain(dev); if (!domain) return 0; @@ -4428,6 +4420,10 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, domain_remove_one_dev_info(old_domain, dev); else domain_remove_dev_info(old_domain); + + if (!domain_type_is_vm_or_si(old_domain) && + list_empty(&old_domain->devices)) + domain_exit(old_domain); } } diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c index 68dfb0f..7486931 100644 --- a/drivers/iommu/ipmmu-vmsa.c +++ b/drivers/iommu/ipmmu-vmsa.c @@ -558,7 +558,7 @@ static pmd_t *ipmmu_alloc_pmd(struct ipmmu_vmsa_device *mmu, pgd_t *pgd, static u64 ipmmu_page_prot(unsigned int prot, u64 type) { - u64 pgprot = ARM_VMSA_PTE_XN | ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF + u64 pgprot = ARM_VMSA_PTE_nG | ARM_VMSA_PTE_AF | ARM_VMSA_PTE_SH_IS | ARM_VMSA_PTE_AP_UNPRIV | ARM_VMSA_PTE_NS | type; @@ -568,8 +568,8 @@ static u64 ipmmu_page_prot(unsigned int prot, u64 type) if (prot & IOMMU_CACHE) pgprot |= IMMAIR_ATTR_IDX_WBRWA << ARM_VMSA_PTE_ATTRINDX_SHIFT; - if (prot & IOMMU_EXEC) - pgprot &= ~ARM_VMSA_PTE_XN; + if (prot & IOMMU_NOEXEC) + pgprot |= ARM_VMSA_PTE_XN; else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) /* If no access create a faulting entry to avoid TLB fills. */ pgprot &= ~ARM_VMSA_PTE_PAGE; diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c index b2023af..6a8b1ec 100644 --- a/drivers/iommu/rockchip-iommu.c +++ b/drivers/iommu/rockchip-iommu.c @@ -1009,7 +1009,6 @@ static struct platform_driver rk_iommu_driver = { .remove = rk_iommu_remove, .driver = { .name = "rk_iommu", - .owner = THIS_MODULE, .of_match_table = of_match_ptr(rk_iommu_dt_ids), }, }; diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c index a82e542..0b38060 100644 --- a/drivers/isdn/hardware/eicon/message.c +++ b/drivers/isdn/hardware/eicon/message.c @@ -4880,7 +4880,7 @@ static void sig_ind(PLCI *plci) byte SS_Ind[] = "\x05\x02\x00\x02\x00\x00"; /* Hold_Ind struct*/ byte CF_Ind[] = "\x09\x02\x00\x06\x00\x00\x00\x00\x00\x00"; byte Interr_Err_Ind[] = "\x0a\x02\x00\x07\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"; - byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\0x00\0x00\0x00\0x00"; + byte CONF_Ind[] = "\x09\x16\x00\x06\x00\x00\x00\x00\x00\x00"; byte force_mt_info = false; byte dir; dword d; diff --git a/drivers/leds/leds-netxbig.c b/drivers/leds/leds-netxbig.c index 26515c2..25e4197 100644 --- a/drivers/leds/leds-netxbig.c +++ b/drivers/leds/leds-netxbig.c @@ -330,18 +330,18 @@ create_netxbig_led(struct platform_device *pdev, led_dat->sata = 0; led_dat->cdev.brightness = LED_OFF; led_dat->cdev.flags |= LED_CORE_SUSPENDRESUME; - /* - * If available, expose the SATA activity blink capability through - * a "sata" sysfs attribute. - */ - if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) - led_dat->cdev.groups = netxbig_led_groups; led_dat->mode_addr = template->mode_addr; led_dat->mode_val = template->mode_val; led_dat->bright_addr = template->bright_addr; led_dat->bright_max = (1 << pdata->gpio_ext->num_data) - 1; led_dat->timer = pdata->timer; led_dat->num_timer = pdata->num_timer; + /* + * If available, expose the SATA activity blink capability through + * a "sata" sysfs attribute. + */ + if (led_dat->mode_val[NETXBIG_LED_SATA] != NETXBIG_LED_INVALID_MODE) + led_dat->cdev.groups = netxbig_led_groups; return led_classdev_register(&pdev->dev, &led_dat->cdev); } diff --git a/drivers/mcb/mcb-internal.h b/drivers/mcb/mcb-internal.h index f956ef2..fb7493d 100644 --- a/drivers/mcb/mcb-internal.h +++ b/drivers/mcb/mcb-internal.h @@ -7,6 +7,7 @@ #define PCI_DEVICE_ID_MEN_CHAMELEON 0x4d45 #define CHAMELEON_FILENAME_LEN 12 #define CHAMELEONV2_MAGIC 0xabce +#define CHAM_HEADER_SIZE 0x200 enum chameleon_descriptor_type { CHAMELEON_DTYPE_GENERAL = 0x0, diff --git a/drivers/mcb/mcb-pci.c b/drivers/mcb/mcb-pci.c index b591819..5e1bd5d 100644 --- a/drivers/mcb/mcb-pci.c +++ b/drivers/mcb/mcb-pci.c @@ -17,6 +17,7 @@ struct priv { struct mcb_bus *bus; + phys_addr_t mapbase; void __iomem *base; }; @@ -31,8 +32,8 @@ static int mcb_pci_get_irq(struct mcb_device *mdev) static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { + struct resource *res; struct priv *priv; - phys_addr_t mapbase; int ret; int num_cells; unsigned long flags; @@ -47,19 +48,21 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) return -ENODEV; } - mapbase = pci_resource_start(pdev, 0); - if (!mapbase) { + priv->mapbase = pci_resource_start(pdev, 0); + if (!priv->mapbase) { dev_err(&pdev->dev, "No PCI resource\n"); goto err_start; } - ret = pci_request_region(pdev, 0, KBUILD_MODNAME); - if (ret) { - dev_err(&pdev->dev, "Failed to request PCI BARs\n"); + res = request_mem_region(priv->mapbase, CHAM_HEADER_SIZE, + KBUILD_MODNAME); + if (IS_ERR(res)) { + dev_err(&pdev->dev, "Failed to request PCI memory\n"); + ret = PTR_ERR(res); goto err_start; } - priv->base = pci_iomap(pdev, 0, 0); + priv->base = ioremap(priv->mapbase, CHAM_HEADER_SIZE); if (!priv->base) { dev_err(&pdev->dev, "Cannot ioremap\n"); ret = -ENOMEM; @@ -84,7 +87,7 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) priv->bus->get_irq = mcb_pci_get_irq; - ret = chameleon_parse_cells(priv->bus, mapbase, priv->base); + ret = chameleon_parse_cells(priv->bus, priv->mapbase, priv->base); if (ret < 0) goto err_drvdata; num_cells = ret; @@ -93,8 +96,10 @@ static int mcb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) mcb_bus_add_devices(priv->bus); + return 0; + err_drvdata: - pci_iounmap(pdev, priv->base); + iounmap(priv->base); err_ioremap: pci_release_region(pdev, 0); err_start: @@ -107,6 +112,10 @@ static void mcb_pci_remove(struct pci_dev *pdev) struct priv *priv = pci_get_drvdata(pdev); mcb_release_bus(priv->bus); + + iounmap(priv->base); + release_region(priv->mapbase, CHAM_HEADER_SIZE); + pci_disable_device(pdev); } static const struct pci_device_id mcb_pci_tbl[] = { diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c index 52a0c2f..ae498b5 100644 --- a/drivers/mfd/da9052-core.c +++ b/drivers/mfd/da9052-core.c @@ -554,7 +554,8 @@ int da9052_device_init(struct da9052 *da9052, u8 chip_id) return ret; } - ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info, + ret = mfd_add_devices(da9052->dev, PLATFORM_DEVID_AUTO, + da9052_subdev_info, ARRAY_SIZE(da9052_subdev_info), NULL, 0, NULL); if (ret) { dev_err(da9052->dev, "mfd_add_devices failed: %d\n", ret); diff --git a/drivers/mfd/rtsx_usb.c b/drivers/mfd/rtsx_usb.c index dbdd0fa..210d1f8 100644 --- a/drivers/mfd/rtsx_usb.c +++ b/drivers/mfd/rtsx_usb.c @@ -681,21 +681,9 @@ static void rtsx_usb_disconnect(struct usb_interface *intf) #ifdef CONFIG_PM static int rtsx_usb_suspend(struct usb_interface *intf, pm_message_t message) { - struct rtsx_ucr *ucr = - (struct rtsx_ucr *)usb_get_intfdata(intf); - dev_dbg(&intf->dev, "%s called with pm message 0x%04x\n", __func__, message.event); - /* - * Call to make sure LED is off during suspend to save more power. - * It is NOT a permanent state and could be turned on anytime later. - * Thus no need to call turn_on when resunming. - */ - mutex_lock(&ucr->dev_mutex); - rtsx_usb_turn_off_led(ucr); - mutex_unlock(&ucr->dev_mutex); - return 0; } diff --git a/drivers/mfd/tps65218.c b/drivers/mfd/tps65218.c index 0d256cb..d6b7643 100644 --- a/drivers/mfd/tps65218.c +++ b/drivers/mfd/tps65218.c @@ -125,10 +125,21 @@ int tps65218_clear_bits(struct tps65218 *tps, unsigned int reg, } EXPORT_SYMBOL_GPL(tps65218_clear_bits); +static const struct regmap_range tps65218_yes_ranges[] = { + regmap_reg_range(TPS65218_REG_INT1, TPS65218_REG_INT2), + regmap_reg_range(TPS65218_REG_STATUS, TPS65218_REG_STATUS), +}; + +static const struct regmap_access_table tps65218_volatile_table = { + .yes_ranges = tps65218_yes_ranges, + .n_yes_ranges = ARRAY_SIZE(tps65218_yes_ranges), +}; + static struct regmap_config tps65218_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, + .volatile_table = &tps65218_volatile_table, }; static const struct regmap_irq tps65218_irqs[] = { @@ -193,6 +204,7 @@ static struct regmap_irq_chip tps65218_irq_chip = { .num_regs = 2, .mask_base = TPS65218_REG_INT_MASK1, + .status_base = TPS65218_REG_INT1, }; static const struct of_device_id of_tps65218_match_table[] = { diff --git a/drivers/misc/cxl/context.c b/drivers/misc/cxl/context.c index 51fd6b5..d1b55fe 100644 --- a/drivers/misc/cxl/context.c +++ b/drivers/misc/cxl/context.c @@ -100,6 +100,46 @@ int cxl_context_init(struct cxl_context *ctx, struct cxl_afu *afu, bool master, return 0; } +static int cxl_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct cxl_context *ctx = vma->vm_file->private_data; + unsigned long address = (unsigned long)vmf->virtual_address; + u64 area, offset; + + offset = vmf->pgoff << PAGE_SHIFT; + + pr_devel("%s: pe: %i address: 0x%lx offset: 0x%llx\n", + __func__, ctx->pe, address, offset); + + if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { + area = ctx->afu->psn_phys; + if (offset > ctx->afu->adapter->ps_size) + return VM_FAULT_SIGBUS; + } else { + area = ctx->psn_phys; + if (offset > ctx->psn_size) + return VM_FAULT_SIGBUS; + } + + mutex_lock(&ctx->status_mutex); + + if (ctx->status != STARTED) { + mutex_unlock(&ctx->status_mutex); + pr_devel("%s: Context not started, failing problem state access\n", __func__); + return VM_FAULT_SIGBUS; + } + + vm_insert_pfn(vma, address, (area + offset) >> PAGE_SHIFT); + + mutex_unlock(&ctx->status_mutex); + + return VM_FAULT_NOPAGE; +} + +static const struct vm_operations_struct cxl_mmap_vmops = { + .fault = cxl_mmap_fault, +}; + /* * Map a per-context mmio space into the given vma. */ @@ -108,26 +148,25 @@ int cxl_context_iomap(struct cxl_context *ctx, struct vm_area_struct *vma) u64 len = vma->vm_end - vma->vm_start; len = min(len, ctx->psn_size); - if (ctx->afu->current_mode == CXL_MODE_DEDICATED) { - vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, ctx->afu->psn_phys, ctx->afu->adapter->ps_size); - } + if (ctx->afu->current_mode != CXL_MODE_DEDICATED) { + /* make sure there is a valid per process space for this AFU */ + if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { + pr_devel("AFU doesn't support mmio space\n"); + return -EINVAL; + } - /* make sure there is a valid per process space for this AFU */ - if ((ctx->master && !ctx->afu->psa) || (!ctx->afu->pp_psa)) { - pr_devel("AFU doesn't support mmio space\n"); - return -EINVAL; + /* Can't mmap until the AFU is enabled */ + if (!ctx->afu->enabled) + return -EBUSY; } - /* Can't mmap until the AFU is enabled */ - if (!ctx->afu->enabled) - return -EBUSY; - pr_devel("%s: mmio physical: %llx pe: %i master:%i\n", __func__, ctx->psn_phys, ctx->pe , ctx->master); + vma->vm_flags |= VM_IO | VM_PFNMAP; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - return vm_iomap_memory(vma, ctx->psn_phys, len); + vma->vm_ops = &cxl_mmap_vmops; + return 0; } /* @@ -150,12 +189,6 @@ static void __detach_context(struct cxl_context *ctx) afu_release_irqs(ctx); flush_work(&ctx->fault_work); /* Only needed for dedicated process */ wake_up_all(&ctx->wq); - - /* Release Problem State Area mapping */ - mutex_lock(&ctx->mapping_lock); - if (ctx->mapping) - unmap_mapping_range(ctx->mapping, 0, 0, 1); - mutex_unlock(&ctx->mapping_lock); } /* @@ -184,6 +217,17 @@ void cxl_context_detach_all(struct cxl_afu *afu) * created and torn down after the IDR removed */ __detach_context(ctx); + + /* + * We are force detaching - remove any active PSA mappings so + * userspace cannot interfere with the card if it comes back. + * Easiest way to exercise this is to unbind and rebind the + * driver via sysfs while it is in use. + */ + mutex_lock(&ctx->mapping_lock); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, 0, 0, 1); + mutex_unlock(&ctx->mapping_lock); } mutex_unlock(&afu->contexts_lock); } diff --git a/drivers/misc/cxl/file.c b/drivers/misc/cxl/file.c index e9f2f10..b15d811 100644 --- a/drivers/misc/cxl/file.c +++ b/drivers/misc/cxl/file.c @@ -140,18 +140,20 @@ static long afu_ioctl_start_work(struct cxl_context *ctx, pr_devel("%s: pe: %i\n", __func__, ctx->pe); - mutex_lock(&ctx->status_mutex); - if (ctx->status != OPENED) { - rc = -EIO; - goto out; - } - + /* Do this outside the status_mutex to avoid a circular dependency with + * the locking in cxl_mmap_fault() */ if (copy_from_user(&work, uwork, sizeof(struct cxl_ioctl_start_work))) { rc = -EFAULT; goto out; } + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { + rc = -EIO; + goto out; + } + /* * if any of the reserved fields are set or any of the unused * flags are set it's invalid diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c index ff27550..06ff0a2 100644 --- a/drivers/misc/mei/hw-me.c +++ b/drivers/misc/mei/hw-me.c @@ -234,6 +234,18 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable) struct mei_me_hw *hw = to_me_hw(dev); u32 hcsr = mei_hcsr_read(hw); + /* H_RST may be found lit before reset is started, + * for example if preceding reset flow hasn't completed. + * In that case asserting H_RST will be ignored, therefore + * we need to clean H_RST bit to start a successful reset sequence. + */ + if ((hcsr & H_RST) == H_RST) { + dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr); + hcsr &= ~H_RST; + mei_me_reg_write(hw, H_CSR, hcsr); + hcsr = mei_hcsr_read(hw); + } + hcsr |= H_RST | H_IG | H_IS; if (intr_enable) diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c index e3e56d3..970314e 100644 --- a/drivers/mmc/host/sdhci-acpi.c +++ b/drivers/mmc/host/sdhci-acpi.c @@ -247,6 +247,7 @@ static const struct sdhci_acpi_uid_slot sdhci_acpi_uids[] = { { "INT33BB" , "3" , &sdhci_acpi_slot_int_sd }, { "INT33C6" , NULL, &sdhci_acpi_slot_int_sdio }, { "INT3436" , NULL, &sdhci_acpi_slot_int_sdio }, + { "INT344D" , NULL, &sdhci_acpi_slot_int_sdio }, { "PNP0D40" }, { }, }; @@ -257,6 +258,7 @@ static const struct acpi_device_id sdhci_acpi_ids[] = { { "INT33BB" }, { "INT33C6" }, { "INT3436" }, + { "INT344D" }, { "PNP0D40" }, { }, }; diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c index 0342775..4f38554 100644 --- a/drivers/mmc/host/sdhci-pci.c +++ b/drivers/mmc/host/sdhci-pci.c @@ -993,6 +993,31 @@ static const struct pci_device_id pci_ids[] = { .subdevice = PCI_ANY_ID, .driver_data = (kernel_ulong_t)&sdhci_intel_mrfl_mmc, }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_SPT_EMMC, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_emmc, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_SPT_SDIO, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sdio, + }, + + { + .vendor = PCI_VENDOR_ID_INTEL, + .device = PCI_DEVICE_ID_INTEL_SPT_SD, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .driver_data = (kernel_ulong_t)&sdhci_intel_byt_sd, + }, + { .vendor = PCI_VENDOR_ID_O2, .device = PCI_DEVICE_ID_O2_8120, diff --git a/drivers/mmc/host/sdhci-pci.h b/drivers/mmc/host/sdhci-pci.h index d57c3d1..1ec684d 100644 --- a/drivers/mmc/host/sdhci-pci.h +++ b/drivers/mmc/host/sdhci-pci.h @@ -21,6 +21,9 @@ #define PCI_DEVICE_ID_INTEL_CLV_EMMC0 0x08e5 #define PCI_DEVICE_ID_INTEL_CLV_EMMC1 0x08e6 #define PCI_DEVICE_ID_INTEL_QRK_SD 0x08A7 +#define PCI_DEVICE_ID_INTEL_SPT_EMMC 0x9d2b +#define PCI_DEVICE_ID_INTEL_SPT_SDIO 0x9d2c +#define PCI_DEVICE_ID_INTEL_SPT_SD 0x9d2d /* * PCI registers diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c index 4523887..ca3424e 100644 --- a/drivers/mmc/host/sdhci-pxav3.c +++ b/drivers/mmc/host/sdhci-pxav3.c @@ -300,13 +300,6 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (IS_ERR(host)) return PTR_ERR(host); - if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { - ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); - if (ret < 0) - goto err_mbus_win; - } - - pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; @@ -325,6 +318,12 @@ static int sdhci_pxav3_probe(struct platform_device *pdev) if (!IS_ERR(pxa->clk_core)) clk_prepare_enable(pxa->clk_core); + if (of_device_is_compatible(np, "marvell,armada-380-sdhci")) { + ret = mv_conf_mbus_windows(pdev, mv_mbus_dram_info()); + if (ret < 0) + goto err_mbus_win; + } + /* enable 1/8V DDR capable */ host->mmc->caps |= MMC_CAP_1_8V_DDR; @@ -396,11 +395,11 @@ err_add_host: pm_runtime_disable(&pdev->dev); err_of_parse: err_cd_req: +err_mbus_win: clk_disable_unprepare(pxa->clk_io); if (!IS_ERR(pxa->clk_core)) clk_disable_unprepare(pxa->clk_core); err_clk_get: -err_mbus_win: sdhci_pltfm_free(pdev); return ret; } diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c index cbb245b..f1a488e 100644 --- a/drivers/mmc/host/sdhci.c +++ b/drivers/mmc/host/sdhci.c @@ -259,8 +259,6 @@ static void sdhci_reinit(struct sdhci_host *host) del_timer_sync(&host->tuning_timer); host->flags &= ~SDHCI_NEEDS_RETUNING; - host->mmc->max_blk_count = - (host->quirks & SDHCI_QUIRK_NO_MULTIBLOCK) ? 1 : 65535; } sdhci_enable_card_detection(host); } @@ -1273,6 +1271,12 @@ static void sdhci_set_power(struct sdhci_host *host, unsigned char mode, spin_unlock_irq(&host->lock); mmc_regulator_set_ocr(mmc, mmc->supply.vmmc, vdd); spin_lock_irq(&host->lock); + + if (mode != MMC_POWER_OFF) + sdhci_writeb(host, SDHCI_POWER_ON, SDHCI_POWER_CONTROL); + else + sdhci_writeb(host, 0, SDHCI_POWER_CONTROL); + return; } @@ -1353,6 +1357,8 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) sdhci_runtime_pm_get(host); + present = mmc_gpio_get_cd(host->mmc); + spin_lock_irqsave(&host->lock, flags); WARN_ON(host->mrq != NULL); @@ -1381,7 +1387,6 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq) * zero: cd-gpio is used, and card is removed * one: cd-gpio is used, and card is present */ - present = mmc_gpio_get_cd(host->mmc); if (present < 0) { /* If polling, assume that the card is always present. */ if (host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION) @@ -1880,6 +1885,18 @@ static int sdhci_card_busy(struct mmc_host *mmc) return !(present_state & SDHCI_DATA_LVL_MASK); } +static int sdhci_prepare_hs400_tuning(struct mmc_host *mmc, struct mmc_ios *ios) +{ + struct sdhci_host *host = mmc_priv(mmc); + unsigned long flags; + + spin_lock_irqsave(&host->lock, flags); + host->flags |= SDHCI_HS400_TUNING; + spin_unlock_irqrestore(&host->lock, flags); + + return 0; +} + static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) { struct sdhci_host *host = mmc_priv(mmc); @@ -1887,10 +1904,18 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) int tuning_loop_counter = MAX_TUNING_LOOP; int err = 0; unsigned long flags; + unsigned int tuning_count = 0; + bool hs400_tuning; sdhci_runtime_pm_get(host); spin_lock_irqsave(&host->lock, flags); + hs400_tuning = host->flags & SDHCI_HS400_TUNING; + host->flags &= ~SDHCI_HS400_TUNING; + + if (host->tuning_mode == SDHCI_TUNING_MODE_1) + tuning_count = host->tuning_count; + /* * The Host Controller needs tuning only in case of SDR104 mode * and for SDR50 mode when Use Tuning for SDR50 is set in the @@ -1899,8 +1924,20 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) * tuning function has to be executed. */ switch (host->timing) { + /* HS400 tuning is done in HS200 mode */ case MMC_TIMING_MMC_HS400: + err = -EINVAL; + goto out_unlock; + case MMC_TIMING_MMC_HS200: + /* + * Periodic re-tuning for HS400 is not expected to be needed, so + * disable it here. + */ + if (hs400_tuning) + tuning_count = 0; + break; + case MMC_TIMING_UHS_SDR104: break; @@ -1911,9 +1948,7 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) /* FALLTHROUGH */ default: - spin_unlock_irqrestore(&host->lock, flags); - sdhci_runtime_pm_put(host); - return 0; + goto out_unlock; } if (host->ops->platform_execute_tuning) { @@ -2037,24 +2072,11 @@ static int sdhci_execute_tuning(struct mmc_host *mmc, u32 opcode) } out: - /* - * If this is the very first time we are here, we start the retuning - * timer. Since only during the first time, SDHCI_NEEDS_RETUNING - * flag won't be set, we check this condition before actually starting - * the timer. - */ - if (!(host->flags & SDHCI_NEEDS_RETUNING) && host->tuning_count && - (host->tuning_mode == SDHCI_TUNING_MODE_1)) { + host->flags &= ~SDHCI_NEEDS_RETUNING; + + if (tuning_count) { host->flags |= SDHCI_USING_RETUNING_TIMER; - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); - /* Tuning mode 1 limits the maximum data length to 4MB */ - mmc->max_blk_count = (4 * 1024 * 1024) / mmc->max_blk_size; - } else if (host->flags & SDHCI_USING_RETUNING_TIMER) { - host->flags &= ~SDHCI_NEEDS_RETUNING; - /* Reload the new initial value for timer */ - mod_timer(&host->tuning_timer, jiffies + - host->tuning_count * HZ); + mod_timer(&host->tuning_timer, jiffies + tuning_count * HZ); } /* @@ -2070,6 +2092,7 @@ out: sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); +out_unlock: spin_unlock_irqrestore(&host->lock, flags); sdhci_runtime_pm_put(host); @@ -2110,15 +2133,18 @@ static void sdhci_card_event(struct mmc_host *mmc) { struct sdhci_host *host = mmc_priv(mmc); unsigned long flags; + int present; /* First check if client has provided their own card event */ if (host->ops->card_event) host->ops->card_event(host); + present = sdhci_do_get_cd(host); + spin_lock_irqsave(&host->lock, flags); /* Check host->mrq first in case we are runtime suspended */ - if (host->mrq && !sdhci_do_get_cd(host)) { + if (host->mrq && !present) { pr_err("%s: Card removed during transfer!\n", mmc_hostname(host->mmc)); pr_err("%s: Resetting controller.\n", @@ -2142,6 +2168,7 @@ static const struct mmc_host_ops sdhci_ops = { .hw_reset = sdhci_hw_reset, .enable_sdio_irq = sdhci_enable_sdio_irq, .start_signal_voltage_switch = sdhci_start_signal_voltage_switch, + .prepare_hs400_tuning = sdhci_prepare_hs400_tuning, .execute_tuning = sdhci_execute_tuning, .card_event = sdhci_card_event, .card_busy = sdhci_card_busy, @@ -3260,8 +3287,9 @@ int sdhci_add_host(struct sdhci_host *host) mmc->max_segs = SDHCI_MAX_SEGS; /* - * Maximum number of sectors in one transfer. Limited by DMA boundary - * size (512KiB). + * Maximum number of sectors in one transfer. Limited by SDMA boundary + * size (512KiB). Note some tuning modes impose a 4MiB limit, but this + * is less anyway. */ mmc->max_req_size = 524288; diff --git a/drivers/net/can/c_can/c_can_platform.c b/drivers/net/can/c_can/c_can_platform.c index f363972..e36d105 100644 --- a/drivers/net/can/c_can/c_can_platform.c +++ b/drivers/net/can/c_can/c_can_platform.c @@ -103,27 +103,34 @@ static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable) mask = 1 << raminit->bits.start | 1 << raminit->bits.done; regmap_read(raminit->syscon, raminit->reg, &ctrl); - /* We clear the done and start bit first. The start bit is + /* We clear the start bit first. The start bit is * looking at the 0 -> transition, but is not self clearing; - * And we clear the init done bit as well. * NOTE: DONE must be written with 1 to clear it. + * We can't clear the DONE bit here using regmap_update_bits() + * as it will bypass the write if initial condition is START:0 DONE:1 + * e.g. on DRA7 which needs START pulse. */ - ctrl &= ~(1 << raminit->bits.start); - ctrl |= 1 << raminit->bits.done; - regmap_write(raminit->syscon, raminit->reg, ctrl); + ctrl &= ~mask; /* START = 0, DONE = 0 */ + regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); - ctrl &= ~(1 << raminit->bits.done); - c_can_hw_raminit_wait_syscon(priv, mask, ctrl); + /* check if START bit is 0. Ignore DONE bit for now + * as it can be either 0 or 1. + */ + c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl); if (enable) { - /* Set start bit and wait for the done bit. */ + /* Clear DONE bit & set START bit. */ ctrl |= 1 << raminit->bits.start; - regmap_write(raminit->syscon, raminit->reg, ctrl); - + /* DONE must be written with 1 to clear it */ + ctrl |= 1 << raminit->bits.done; + regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); + /* prevent further clearing of DONE bit */ + ctrl &= ~(1 << raminit->bits.done); /* clear START bit if start pulse is needed */ if (raminit->needs_pulse) { ctrl &= ~(1 << raminit->bits.start); - regmap_write(raminit->syscon, raminit->reg, ctrl); + regmap_update_bits(raminit->syscon, raminit->reg, + mask, ctrl); } ctrl |= 1 << raminit->bits.done; diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c index 3ec8f6f..847c1f8 100644 --- a/drivers/net/can/dev.c +++ b/drivers/net/can/dev.c @@ -807,10 +807,14 @@ static int can_changelink(struct net_device *dev, if (dev->flags & IFF_UP) return -EBUSY; cm = nla_data(data[IFLA_CAN_CTRLMODE]); - if (cm->flags & ~priv->ctrlmode_supported) + + /* check whether changed bits are allowed to be modified */ + if (cm->mask & ~priv->ctrlmode_supported) return -EOPNOTSUPP; + + /* clear bits to be modified and copy the flag values */ priv->ctrlmode &= ~cm->mask; - priv->ctrlmode |= cm->flags; + priv->ctrlmode |= (cm->flags & cm->mask); /* CAN_CTRLMODE_FD can only be set when driver supports FD */ if (priv->ctrlmode & CAN_CTRLMODE_FD) diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c index d7bc462..2445298 100644 --- a/drivers/net/can/m_can/m_can.c +++ b/drivers/net/can/m_can/m_can.c @@ -955,6 +955,11 @@ static struct net_device *alloc_m_can_dev(void) priv->can.data_bittiming_const = &m_can_data_bittiming_const; priv->can.do_set_mode = m_can_set_mode; priv->can.do_get_berr_counter = m_can_get_berr_counter; + + /* CAN_CTRLMODE_FD_NON_ISO is fixed with M_CAN IP v3.0.1 */ + priv->can.ctrlmode = CAN_CTRLMODE_FD_NON_ISO; + + /* CAN_CTRLMODE_FD_NON_ISO can not be changed with M_CAN IP v3.0.1 */ priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c index 541fb7a..c32cd61 100644 --- a/drivers/net/can/usb/kvaser_usb.c +++ b/drivers/net/can/usb/kvaser_usb.c @@ -520,10 +520,10 @@ static void kvaser_usb_tx_acknowledge(const struct kvaser_usb *dev, skb = alloc_can_err_skb(priv->netdev, &cf); if (skb) { cf->can_id |= CAN_ERR_RESTARTED; - netif_rx(skb); stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } else { netdev_err(priv->netdev, "No memory left for err_skb\n"); @@ -770,10 +770,9 @@ static void kvaser_usb_rx_error(const struct kvaser_usb *dev, priv->can.state = new_state; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, @@ -805,10 +804,9 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv, stats->rx_over_errors++; stats->rx_errors++; - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } } @@ -887,10 +885,9 @@ static void kvaser_usb_rx_can_msg(const struct kvaser_usb *dev, cf->can_dlc); } - netif_rx(skb); - stats->rx_packets++; stats->rx_bytes += cf->can_dlc; + netif_rx(skb); } static void kvaser_usb_start_chip_reply(const struct kvaser_usb *dev, @@ -1246,6 +1243,9 @@ static int kvaser_usb_close(struct net_device *netdev) if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); + /* reset tx contexts */ + kvaser_usb_unlink_tx_urbs(priv); + priv->can.state = CAN_STATE_STOPPED; close_candev(priv->netdev); @@ -1294,12 +1294,14 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (!urb) { netdev_err(netdev, "No memory left for URBs\n"); stats->tx_dropped++; - goto nourbmem; + dev_kfree_skb(skb); + return NETDEV_TX_OK; } buf = kmalloc(sizeof(struct kvaser_msg), GFP_ATOMIC); if (!buf) { stats->tx_dropped++; + dev_kfree_skb(skb); goto nobufmem; } @@ -1334,6 +1336,7 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, } } + /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); ret = NETDEV_TX_BUSY; @@ -1364,9 +1367,6 @@ static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index); - skb = NULL; /* set to NULL to avoid double free in - * dev_kfree_skb(skb) */ - atomic_dec(&priv->active_tx_urbs); usb_unanchor_urb(urb); @@ -1388,8 +1388,6 @@ releasebuf: kfree(buf); nobufmem: usb_free_urb(urb); -nourbmem: - dev_kfree_skb(skb); return ret; } @@ -1502,6 +1500,10 @@ static int kvaser_usb_init_one(struct usb_interface *intf, struct kvaser_usb_net_priv *priv; int i, err; + err = kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, channel); + if (err) + return err; + netdev = alloc_candev(sizeof(*priv), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Cannot alloc candev\n"); @@ -1606,9 +1608,6 @@ static int kvaser_usb_probe(struct usb_interface *intf, usb_set_intfdata(intf, dev); - for (i = 0; i < MAX_NET_DEVICES; i++) - kvaser_usb_send_simple_msg(dev, CMD_RESET_CHIP, i); - err = kvaser_usb_get_software_info(dev); if (err) { dev_err(&intf->dev, diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c index 1fcd556..f3470d9 100644 --- a/drivers/net/ethernet/allwinner/sun4i-emac.c +++ b/drivers/net/ethernet/allwinner/sun4i-emac.c @@ -850,8 +850,10 @@ static int emac_probe(struct platform_device *pdev) } db->clk = devm_clk_get(&pdev->dev, NULL); - if (IS_ERR(db->clk)) + if (IS_ERR(db->clk)) { + ret = PTR_ERR(db->clk); goto out; + } clk_prepare_enable(db->clk); diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c index 3498760..760c72c 100644 --- a/drivers/net/ethernet/altera/altera_tse_main.c +++ b/drivers/net/ethernet/altera/altera_tse_main.c @@ -1170,10 +1170,6 @@ tx_request_irq_error: init_error: free_skbufs(dev); alloc_skbuf_error: - if (priv->phydev) { - phy_disconnect(priv->phydev); - priv->phydev = NULL; - } phy_error: return ret; } @@ -1186,12 +1182,9 @@ static int tse_shutdown(struct net_device *dev) int ret; unsigned long int flags; - /* Stop and disconnect the PHY */ - if (priv->phydev) { + /* Stop the PHY */ + if (priv->phydev) phy_stop(priv->phydev); - phy_disconnect(priv->phydev); - priv->phydev = NULL; - } netif_stop_queue(dev); napi_disable(&priv->napi); @@ -1525,6 +1518,10 @@ err_free_netdev: static int altera_tse_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); + struct altera_tse_private *priv = netdev_priv(ndev); + + if (priv->phydev) + phy_disconnect(priv->phydev); platform_set_drvdata(pdev, NULL); altera_tse_mdio_destroy(ndev); diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c index e398eda..c8af3ce 100644 --- a/drivers/net/ethernet/atheros/alx/main.c +++ b/drivers/net/ethernet/atheros/alx/main.c @@ -184,15 +184,16 @@ static void alx_schedule_reset(struct alx_priv *alx) schedule_work(&alx->reset_wk); } -static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) +static int alx_clean_rx_irq(struct alx_priv *alx, int budget) { struct alx_rx_queue *rxq = &alx->rxq; struct alx_rrd *rrd; struct alx_buffer *rxb; struct sk_buff *skb; u16 length, rfd_cleaned = 0; + int work = 0; - while (budget > 0) { + while (work < budget) { rrd = &rxq->rrd[rxq->rrd_read_idx]; if (!(rrd->word3 & cpu_to_le32(1 << RRD_UPDATED_SHIFT))) break; @@ -203,7 +204,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) ALX_GET_FIELD(le32_to_cpu(rrd->word0), RRD_NOR) != 1) { alx_schedule_reset(alx); - return 0; + return work; } rxb = &rxq->bufs[rxq->read_idx]; @@ -243,7 +244,7 @@ static bool alx_clean_rx_irq(struct alx_priv *alx, int budget) } napi_gro_receive(&alx->napi, skb); - budget--; + work++; next_pkt: if (++rxq->read_idx == alx->rx_ringsz) @@ -258,21 +259,22 @@ next_pkt: if (rfd_cleaned) alx_refill_rx_ring(alx, GFP_ATOMIC); - return budget > 0; + return work; } static int alx_poll(struct napi_struct *napi, int budget) { struct alx_priv *alx = container_of(napi, struct alx_priv, napi); struct alx_hw *hw = &alx->hw; - bool complete = true; unsigned long flags; + bool tx_complete; + int work; - complete = alx_clean_tx_irq(alx) && - alx_clean_rx_irq(alx, budget); + tx_complete = alx_clean_tx_irq(alx); + work = alx_clean_rx_irq(alx, budget); - if (!complete) - return 1; + if (!tx_complete || work == budget) + return budget; napi_complete(&alx->napi); @@ -284,7 +286,7 @@ static int alx_poll(struct napi_struct *napi, int budget) alx_post_write(hw); - return 0; + return work; } static irqreturn_t alx_intr_handle(struct alx_priv *alx, u32 intr) diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c index 05c6af6..3007d95 100644 --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c @@ -1167,10 +1167,10 @@ static int bgmac_poll(struct napi_struct *napi, int weight) bgmac->int_status = 0; } - if (handled < weight) + if (handled < weight) { napi_complete(napi); - - bgmac_chip_intrs_on(bgmac); + bgmac_chip_intrs_on(bgmac); + } return handled; } @@ -1515,6 +1515,8 @@ static int bgmac_probe(struct bcma_device *core) if (core->bus->sprom.boardflags_lo & BGMAC_BFL_ENETADM) bgmac_warn(bgmac, "Support for ADMtek ethernet switch not implemented\n"); + netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); + err = bgmac_mii_register(bgmac); if (err) { bgmac_err(bgmac, "Cannot register MDIO\n"); @@ -1529,8 +1531,6 @@ static int bgmac_probe(struct bcma_device *core) netif_carrier_off(net_dev); - netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); - return 0; err_mii_unregister: @@ -1549,9 +1549,9 @@ static void bgmac_remove(struct bcma_device *core) { struct bgmac *bgmac = bcma_get_drvdata(core); - netif_napi_del(&bgmac->napi); unregister_netdev(bgmac->net_dev); bgmac_mii_unregister(bgmac); + netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); bcma_set_drvdata(core, NULL); free_netdev(bgmac->net_dev); diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c index 553dcd8..96bf01b 100644 --- a/drivers/net/ethernet/broadcom/tg3.c +++ b/drivers/net/ethernet/broadcom/tg3.c @@ -7413,6 +7413,8 @@ static inline void tg3_netif_start(struct tg3 *tp) } static void tg3_irq_quiesce(struct tg3 *tp) + __releases(tp->lock) + __acquires(tp->lock) { int i; @@ -7421,8 +7423,12 @@ static void tg3_irq_quiesce(struct tg3 *tp) tp->irq_sync = 1; smp_mb(); + spin_unlock_bh(&tp->lock); + for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); + + spin_lock_bh(&tp->lock); } /* Fully shutdown all tg3 driver activity elsewhere in the system. @@ -9018,6 +9024,8 @@ static void tg3_restore_clk(struct tg3 *tp) /* tp->lock is held. */ static int tg3_chip_reset(struct tg3 *tp) + __releases(tp->lock) + __acquires(tp->lock) { u32 val; void (*write_op)(struct tg3 *, u32, u32); @@ -9073,9 +9081,13 @@ static int tg3_chip_reset(struct tg3 *tp) } smp_mb(); + tg3_full_unlock(tp); + for (i = 0; i < tp->irq_cnt; i++) synchronize_irq(tp->napi[i].irq_vec); + tg3_full_lock(tp, 0); + if (tg3_asic_rev(tp) == ASIC_REV_57780) { val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN; tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS); @@ -10903,11 +10915,13 @@ static void tg3_timer(unsigned long __opaque) { struct tg3 *tp = (struct tg3 *) __opaque; - if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) - goto restart_timer; - spin_lock(&tp->lock); + if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING)) { + spin_unlock(&tp->lock); + goto restart_timer; + } + if (tg3_asic_rev(tp) == ASIC_REV_5717 || tg3_flag(tp, 57765_CLASS)) tg3_chk_missed_msi(tp); @@ -11101,11 +11115,13 @@ static void tg3_reset_task(struct work_struct *work) struct tg3 *tp = container_of(work, struct tg3, reset_task); int err; + rtnl_lock(); tg3_full_lock(tp, 0); if (!netif_running(tp->dev)) { tg3_flag_clear(tp, RESET_TASK_PENDING); tg3_full_unlock(tp); + rtnl_unlock(); return; } @@ -11138,6 +11154,7 @@ out: tg3_phy_start(tp); tg3_flag_clear(tp, RESET_TASK_PENDING); + rtnl_unlock(); } static int tg3_request_irq(struct tg3 *tp, int irq_num) diff --git a/drivers/net/ethernet/cadence/at91_ether.c b/drivers/net/ethernet/cadence/at91_ether.c index 55eb7f2..7ef55f5 100644 --- a/drivers/net/ethernet/cadence/at91_ether.c +++ b/drivers/net/ethernet/cadence/at91_ether.c @@ -340,7 +340,7 @@ static int __init at91ether_probe(struct platform_device *pdev) res = PTR_ERR(lp->pclk); goto err_free_dev; } - clk_enable(lp->pclk); + clk_prepare_enable(lp->pclk); lp->hclk = ERR_PTR(-ENOENT); lp->tx_clk = ERR_PTR(-ENOENT); @@ -406,7 +406,7 @@ static int __init at91ether_probe(struct platform_device *pdev) err_out_unregister_netdev: unregister_netdev(dev); err_disable_clock: - clk_disable(lp->pclk); + clk_disable_unprepare(lp->pclk); err_free_dev: free_netdev(dev); return res; @@ -424,7 +424,7 @@ static int at91ether_remove(struct platform_device *pdev) kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); - clk_disable(lp->pclk); + clk_disable_unprepare(lp->pclk); free_netdev(dev); return 0; @@ -440,7 +440,7 @@ static int at91ether_suspend(struct platform_device *pdev, pm_message_t mesg) netif_stop_queue(net_dev); netif_device_detach(net_dev); - clk_disable(lp->pclk); + clk_disable_unprepare(lp->pclk); } return 0; } @@ -451,7 +451,7 @@ static int at91ether_resume(struct platform_device *pdev) struct macb *lp = netdev_priv(net_dev); if (netif_running(net_dev)) { - clk_enable(lp->pclk); + clk_prepare_enable(lp->pclk); netif_device_attach(net_dev); netif_start_queue(net_dev); diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c index 2215d43..a936ee8 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c @@ -2430,7 +2430,7 @@ static void cfg_queues(struct adapter *adapter) */ n10g = 0; for_each_port(adapter, pidx) - n10g += is_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); + n10g += is_x_10g_port(&adap2pinfo(adapter, pidx)->link_cfg); /* * We default to 1 queue per non-10G port and up to # of cores queues diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c index 21dc9a2..60426cf 100644 --- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c +++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c @@ -323,6 +323,8 @@ int t4vf_port_init(struct adapter *adapter, int pidx) return v; v = be32_to_cpu(port_rpl.u.info.lstatus_to_modtype); + pi->mdio_addr = (v & FW_PORT_CMD_MDIOCAP_F) ? + FW_PORT_CMD_MDIOADDR_G(v) : -1; pi->port_type = FW_PORT_CMD_PTYPE_G(v); pi->mod_type = FW_PORT_MOD_TYPE_NA; diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c index 705f334..b29e027 100644 --- a/drivers/net/ethernet/cisco/enic/enic_main.c +++ b/drivers/net/ethernet/cisco/enic/enic_main.c @@ -1616,7 +1616,7 @@ static int enic_open(struct net_device *netdev) if (vnic_rq_desc_used(&enic->rq[i]) == 0) { netdev_err(netdev, "Unable to alloc receive buffers\n"); err = -ENOMEM; - goto err_out_notify_unset; + goto err_out_free_rq; } } @@ -1649,7 +1649,9 @@ static int enic_open(struct net_device *netdev) return 0; -err_out_notify_unset: +err_out_free_rq: + for (i = 0; i < enic->rq_count; i++) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); enic_dev_notify_unset(enic); err_out_free_intr: enic_free_intr(enic); diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c index a379c3e..13d00a3 100644 --- a/drivers/net/ethernet/dnet.c +++ b/drivers/net/ethernet/dnet.c @@ -398,13 +398,8 @@ static int dnet_poll(struct napi_struct *napi, int budget) * break out of while loop if there are no more * packets waiting */ - if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { - napi_complete(napi); - int_enable = dnet_readl(bp, INTR_ENB); - int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; - dnet_writel(bp, int_enable, INTR_ENB); - return 0; - } + if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) + break; cmd_word = dnet_readl(bp, RX_LEN_FIFO); pkt_len = cmd_word & 0xFFFF; @@ -433,20 +428,17 @@ static int dnet_poll(struct napi_struct *napi, int budget) "size %u.\n", dev->name, pkt_len); } - budget -= npackets; - if (npackets < budget) { /* We processed all packets available. Tell NAPI it can - * stop polling then re-enable rx interrupts */ + * stop polling then re-enable rx interrupts. + */ napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); - return 0; } - /* There are still packets waiting */ - return 1; + return npackets; } static irqreturn_t dnet_interrupt(int irq, void *dev_id) diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index 41a0a54..d48806b 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -4383,8 +4383,9 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, * distinguish various types of transports (VxLAN, GRE, NVGRE ..). So, offload * is expected to work across all types of IP tunnels once exported. Skyhawk * supports offloads for either VxLAN or NVGRE, exclusively. So we export VxLAN - * offloads in hw_enc_features only when a VxLAN port is added. Note this only - * ensures that other tunnels work fine while VxLAN offloads are not enabled. + * offloads in hw_enc_features only when a VxLAN port is added. If other (non + * VxLAN) tunnels are configured while VxLAN offloads are enabled, offloads for + * those other tunnels are unexported on the fly through ndo_features_check(). * * Skyhawk supports VxLAN offloads only for one UDP dport. So, if the stack * adds more than one port, disable offloads and don't re-enable them again @@ -4463,7 +4464,41 @@ static netdev_features_t be_features_check(struct sk_buff *skb, struct net_device *dev, netdev_features_t features) { - return vxlan_features_check(skb, features); + struct be_adapter *adapter = netdev_priv(dev); + u8 l4_hdr = 0; + + /* The code below restricts offload features for some tunneled packets. + * Offload features for normal (non tunnel) packets are unchanged. + */ + if (!skb->encapsulation || + !(adapter->flags & BE_FLAGS_VXLAN_OFFLOADS)) + return features; + + /* It's an encapsulated packet and VxLAN offloads are enabled. We + * should disable tunnel offload features if it's not a VxLAN packet, + * as tunnel offloads have been enabled only for VxLAN. This is done to + * allow other tunneled traffic like GRE work fine while VxLAN + * offloads are configured in Skyhawk-R. + */ + switch (vlan_get_protocol(skb)) { + case htons(ETH_P_IP): + l4_hdr = ip_hdr(skb)->protocol; + break; + case htons(ETH_P_IPV6): + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; + default: + return features; + } + + if (l4_hdr != IPPROTO_UDP || + skb->inner_protocol_type != ENCAP_TYPE_ETHER || + skb->inner_protocol != htons(ETH_P_TEB) || + skb_inner_mac_header(skb) - skb_transport_header(skb) != + sizeof(struct udphdr) + sizeof(struct vxlanhdr)) + return features & ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + + return features; } #endif diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h index 469691a..4013292 100644 --- a/drivers/net/ethernet/freescale/fec.h +++ b/drivers/net/ethernet/freescale/fec.h @@ -424,6 +424,8 @@ struct bufdesc_ex { * (40ns * 6). */ #define FEC_QUIRK_BUG_CAPTURE (1 << 10) +/* Controller has only one MDIO bus */ +#define FEC_QUIRK_SINGLE_MDIO (1 << 11) struct fec_enet_priv_tx_q { int index; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 5ebdf8d..bba8777 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -91,7 +91,8 @@ static struct platform_device_id fec_devtype[] = { .driver_data = 0, }, { .name = "imx28-fec", - .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME, + .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME | + FEC_QUIRK_SINGLE_MDIO, }, { .name = "imx6q-fec", .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT | @@ -1937,7 +1938,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) int err = -ENXIO, i; /* - * The dual fec interfaces are not equivalent with enet-mac. + * The i.MX28 dual fec interfaces are not equal. * Here are the differences: * * - fec0 supports MII & RMII modes while fec1 only supports RMII @@ -1952,7 +1953,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) * mdio interface in board design, and need to be configured by * fec0 mii_bus. */ - if ((fep->quirks & FEC_QUIRK_ENET_MAC) && fep->dev_id > 0) { + if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) { /* fec1 uses fec0 mii_bus */ if (mii_cnt && fec0_mii_bus) { fep->mii_bus = fec0_mii_bus; @@ -2015,7 +2016,7 @@ static int fec_enet_mii_init(struct platform_device *pdev) mii_cnt++; /* save fec0 mii_bus */ - if (fep->quirks & FEC_QUIRK_ENET_MAC) + if (fep->quirks & FEC_QUIRK_SINGLE_MDIO) fec0_mii_bus = fep->mii_bus; return 0; @@ -3129,6 +3130,7 @@ fec_probe(struct platform_device *pdev) pdev->id_entry = of_id->data; fep->quirks = pdev->id_entry->driver_data; + fep->netdev = ndev; fep->num_rx_queues = num_rx_qs; fep->num_tx_queues = num_tx_qs; diff --git a/drivers/net/ethernet/intel/Kconfig b/drivers/net/ethernet/intel/Kconfig index 5b8300a..4d61ef5 100644 --- a/drivers/net/ethernet/intel/Kconfig +++ b/drivers/net/ethernet/intel/Kconfig @@ -281,6 +281,17 @@ config I40E_DCB If unsure, say N. +config I40E_FCOE + bool "Fibre Channel over Ethernet (FCoE)" + default n + depends on I40E && DCB && FCOE + ---help--- + Say Y here if you want to use Fibre Channel over Ethernet (FCoE) + in the driver. This will create new netdev for exclusive FCoE + use with XL710 FCoE offloads enabled. + + If unsure, say N. + config I40EVF tristate "Intel(R) XL710 X710 Virtual Function Ethernet support" depends on PCI_MSI diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c index 781065e..e9c3a87 100644 --- a/drivers/net/ethernet/intel/e100.c +++ b/drivers/net/ethernet/intel/e100.c @@ -1543,7 +1543,7 @@ static int e100_phy_init(struct nic *nic) mdio_write(netdev, nic->mii.phy_id, MII_BMCR, bmcr); } else if ((nic->mac >= mac_82550_D102) || ((nic->flags & ich) && (mdio_read(netdev, nic->mii.phy_id, MII_TPISTATUS) & 0x8000) && - !(nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { + (nic->eeprom[eeprom_cnfg_mdix] & eeprom_mdix_enabled))) { /* enable/disable MDI/MDI-X auto-switching. */ mdio_write(netdev, nic->mii.phy_id, MII_NCONFIG, nic->mii.force_media ? 0 : NCONFIG_AUTO_SWITCH); diff --git a/drivers/net/ethernet/intel/i40e/Makefile b/drivers/net/ethernet/intel/i40e/Makefile index 4b94ddb..c405819 100644 --- a/drivers/net/ethernet/intel/i40e/Makefile +++ b/drivers/net/ethernet/intel/i40e/Makefile @@ -44,4 +44,4 @@ i40e-objs := i40e_main.o \ i40e_virtchnl_pf.o i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o -i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o +i40e-$(CONFIG_I40E_FCOE) += i40e_fcoe.o diff --git a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c index 433a558..cb0de45 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_debugfs.c +++ b/drivers/net/ethernet/intel/i40e/i40e_debugfs.c @@ -829,7 +829,7 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, if (desc_n >= ring->count || desc_n < 0) { dev_info(&pf->pdev->dev, "descriptor %d not found\n", desc_n); - return; + goto out; } if (!is_rx_ring) { txd = I40E_TX_DESC(ring, desc_n); @@ -855,6 +855,8 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, } else { dev_info(&pf->pdev->dev, "dump desc rx/tx <vsi_seid> <ring_id> [<desc_n>]\n"); } + +out: kfree(ring); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_osdep.h b/drivers/net/ethernet/intel/i40e/i40e_osdep.h index 045b5c4..ad802dd 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_osdep.h +++ b/drivers/net/ethernet/intel/i40e/i40e_osdep.h @@ -78,7 +78,7 @@ do { \ } while (0) typedef enum i40e_status_code i40e_status; -#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +#ifdef CONFIG_I40E_FCOE #define I40E_FCOE -#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ +#endif #endif /* _I40E_OSDEP_H_ */ diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index 04b4414..cecb340 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -658,6 +658,8 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) return le32_to_cpu(*(volatile __le32 *)head); } +#define WB_STRIDE 0x3 + /** * i40e_clean_tx_irq - Reclaim resources after transmit completes * @tx_ring: tx ring to clean @@ -759,6 +761,18 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_packets += total_packets; + /* check to see if there are any non-cache aligned descriptors + * waiting to be written back, and kick the hardware to force + * them to be written back in case of napi polling + */ + if (budget && + !((i & WB_STRIDE) == WB_STRIDE) && + !test_bit(__I40E_DOWN, &tx_ring->vsi->state) && + (I40E_DESC_UNUSED(tx_ring) != tx_ring->count)) + tx_ring->arm_wb = true; + else + tx_ring->arm_wb = false; + if (check_for_tx_hang(tx_ring) && i40e_check_tx_hang(tx_ring)) { /* schedule immediate reset if we believe we hung */ dev_info(tx_ring->dev, "Detected Tx Unit Hang\n" @@ -777,13 +791,16 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); dev_info(tx_ring->dev, - "tx hang detected on queue %d, resetting adapter\n", + "tx hang detected on queue %d, reset requested\n", tx_ring->queue_index); - tx_ring->netdev->netdev_ops->ndo_tx_timeout(tx_ring->netdev); + /* do not fire the reset immediately, wait for the stack to + * decide we are truly stuck, also prevents every queue from + * simultaneously requesting a reset + */ - /* the adapter is about to reset, no point in enabling stuff */ - return true; + /* the adapter is about to reset, no point in enabling polling */ + budget = 1; } netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, @@ -806,7 +823,25 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) } } - return budget > 0; + return !!budget; +} + +/** + * i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors + * @vsi: the VSI we care about + * @q_vector: the vector on which to force writeback + * + **/ +static void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) +{ + u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | + I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | + I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK + /* allow 00 to be written to the index */; + + wr32(&vsi->back->hw, + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1), + val); } /** @@ -1290,9 +1325,7 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, * so the total length of IPv4 header is IHL*4 bytes * The UDP_0 bit *may* bet set if the *inner* header is UDP */ - if (ipv4_tunnel && - (decoded.inner_prot != I40E_RX_PTYPE_INNER_PROT_UDP) && - !(rx_status & (1 << I40E_RX_DESC_STATUS_UDP_0_SHIFT))) { + if (ipv4_tunnel) { skb->transport_header = skb->mac_header + sizeof(struct ethhdr) + (ip_hdr(skb)->ihl * 4); @@ -1302,15 +1335,19 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi, skb->protocol == htons(ETH_P_8021AD)) ? VLAN_HLEN : 0; - rx_udp_csum = udp_csum(skb); - iph = ip_hdr(skb); - csum = csum_tcpudp_magic( - iph->saddr, iph->daddr, - (skb->len - skb_transport_offset(skb)), - IPPROTO_UDP, rx_udp_csum); + if ((ip_hdr(skb)->protocol == IPPROTO_UDP) && + (udp_hdr(skb)->check != 0)) { + rx_udp_csum = udp_csum(skb); + iph = ip_hdr(skb); + csum = csum_tcpudp_magic( + iph->saddr, iph->daddr, + (skb->len - skb_transport_offset(skb)), + IPPROTO_UDP, rx_udp_csum); - if (udp_hdr(skb)->check != csum) - goto checksum_fail; + if (udp_hdr(skb)->check != csum) + goto checksum_fail; + + } /* else its GRE and so no outer UDP header */ } skb->ip_summed = CHECKSUM_UNNECESSARY; @@ -1581,6 +1618,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) struct i40e_vsi *vsi = q_vector->vsi; struct i40e_ring *ring; bool clean_complete = true; + bool arm_wb = false; int budget_per_ring; if (test_bit(__I40E_DOWN, &vsi->state)) { @@ -1591,8 +1629,10 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) /* Since the actual Tx work is minimal, we can give the Tx a larger * budget and be more aggressive about cleaning up the Tx descriptors. */ - i40e_for_each_ring(ring, q_vector->tx) + i40e_for_each_ring(ring, q_vector->tx) { clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); + arm_wb |= ring->arm_wb; + } /* We attempt to distribute budget to each Rx queue fairly, but don't * allow the budget to go below 1 because that would exit polling early. @@ -1603,8 +1643,11 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) clean_complete &= i40e_clean_rx_irq(ring, budget_per_ring); /* If work not completed, return budget and polling will return */ - if (!clean_complete) + if (!clean_complete) { + if (arm_wb) + i40e_force_wb(vsi, q_vector); return budget; + } /* Work is done so exit the polling mode and re-enable the interrupt */ napi_complete(napi); @@ -1840,17 +1883,16 @@ static int i40e_tso(struct i40e_ring *tx_ring, struct sk_buff *skb, if (err < 0) return err; - if (protocol == htons(ETH_P_IP)) { - iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + iph = skb->encapsulation ? inner_ip_hdr(skb) : ip_hdr(skb); + ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) : ipv6_hdr(skb); + + if (iph->version == 4) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); iph->tot_len = 0; iph->check = 0; tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); - } else if (skb_is_gso_v6(skb)) { - - ipv6h = skb->encapsulation ? inner_ipv6_hdr(skb) - : ipv6_hdr(skb); + } else if (ipv6h->version == 6) { tcph = skb->encapsulation ? inner_tcp_hdr(skb) : tcp_hdr(skb); ipv6h->payload_len = 0; tcph->check = ~csum_ipv6_magic(&ipv6h->saddr, &ipv6h->daddr, @@ -1946,13 +1988,9 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; } } else if (tx_flags & I40E_TX_FLAGS_IPV6) { - if (tx_flags & I40E_TX_FLAGS_TSO) { - *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + *cd_tunneling |= I40E_TX_CTX_EXT_IP_IPV6; + if (tx_flags & I40E_TX_FLAGS_TSO) ip_hdr(skb)->check = 0; - } else { - *cd_tunneling |= - I40E_TX_CTX_EXT_IP_IPV4_NO_CSUM; - } } /* Now set the ctx descriptor fields */ @@ -1962,7 +2000,10 @@ static void i40e_tx_enable_csum(struct sk_buff *skb, u32 tx_flags, ((skb_inner_network_offset(skb) - skb_transport_offset(skb)) >> 1) << I40E_TXD_CTX_QW0_NATLEN_SHIFT; - + if (this_ip_hdr->version == 6) { + tx_flags &= ~I40E_TX_FLAGS_IPV4; + tx_flags |= I40E_TX_FLAGS_IPV6; + } } else { network_hdr_len = skb_network_header_len(skb); this_ip_hdr = ip_hdr(skb); @@ -2198,7 +2239,6 @@ static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, /* Place RS bit on last descriptor of any packet that spans across the * 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline. */ -#define WB_STRIDE 0x3 if (((i & WB_STRIDE) != WB_STRIDE) && (first <= &tx_ring->tx_bi[i]) && (first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) { diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h index e60d3ac..18b0023 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h @@ -241,6 +241,7 @@ struct i40e_ring { unsigned long last_rx_timestamp; bool ring_active; /* is ring online or not */ + bool arm_wb; /* do something to arm write back */ /* stats structs */ struct i40e_queue_stats stats; diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c index 051ea94..0f69ef8 100644 --- a/drivers/net/ethernet/intel/igb/e1000_82575.c +++ b/drivers/net/ethernet/intel/igb/e1000_82575.c @@ -1125,7 +1125,7 @@ static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask) u32 swmask = mask; u32 fwmask = mask << 16; s32 ret_val = 0; - s32 i = 0, timeout = 200; /* FIXME: find real value to use here */ + s32 i = 0, timeout = 200; while (i < timeout) { if (igb_get_hw_semaphore(hw)) { diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index d0d6dc1..ac6a8f1 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -475,7 +475,8 @@ static int mlx4_en_tunnel_steer_add(struct mlx4_en_priv *priv, unsigned char *ad { int err; - if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) + if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN || + priv->mdev->dev->caps.dmfs_high_steer_mode == MLX4_STEERING_DMFS_A0_STATIC) return 0; /* do nothing */ err = mlx4_tunnel_steer_add(priv->mdev->dev, addr, priv->port, qpn, diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 943cbd4..6e08352 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -1744,8 +1744,7 @@ static void choose_tunnel_offload_mode(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap) { if (dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED && - dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS && - dev->caps.dmfs_high_steer_mode != MLX4_STEERING_DMFS_A0_STATIC) + dev_cap->flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_VXLAN; else dev->caps.tunnel_offload_mode = MLX4_TUNNEL_OFFLOAD_MODE_NONE; @@ -1829,7 +1828,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_dev_cap(dev, &dev_cap); if (err) { mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting\n"); - goto err_stop_fw; + return err; } choose_steering_mode(dev, &dev_cap); @@ -1860,7 +1859,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) &init_hca); if ((long long) icm_size < 0) { err = icm_size; - goto err_stop_fw; + return err; } dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1; @@ -1874,7 +1873,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size); if (err) - goto err_stop_fw; + return err; err = mlx4_INIT_HCA(dev, &init_hca); if (err) { @@ -1886,7 +1885,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev) err = mlx4_query_func(dev, &dev_cap); if (err < 0) { mlx4_err(dev, "QUERY_FUNC command failed, aborting.\n"); - goto err_stop_fw; + goto err_close; } else if (err & MLX4_QUERY_FUNC_NUM_SYS_EQS) { dev->caps.num_eqs = dev_cap.max_eqs; dev->caps.reserved_eqs = dev_cap.reserved_eqs; @@ -2006,11 +2005,6 @@ err_free_icm: if (!mlx4_is_slave(dev)) mlx4_free_icms(dev); -err_stop_fw: - if (!mlx4_is_slave(dev)) { - mlx4_UNMAP_FA(dev); - mlx4_free_icm(dev, priv->fw.fw_icm, 0); - } return err; } diff --git a/drivers/net/ethernet/mellanox/mlx4/mr.c b/drivers/net/ethernet/mellanox/mlx4/mr.c index d6f5496..7094a9c 100644 --- a/drivers/net/ethernet/mellanox/mlx4/mr.c +++ b/drivers/net/ethernet/mellanox/mlx4/mr.c @@ -584,6 +584,7 @@ EXPORT_SYMBOL_GPL(mlx4_mr_free); void mlx4_mr_rereg_mem_cleanup(struct mlx4_dev *dev, struct mlx4_mr *mr) { mlx4_mtt_cleanup(dev, &mr->mtt); + mr->mtt.order = -1; } EXPORT_SYMBOL_GPL(mlx4_mr_rereg_mem_cleanup); @@ -593,14 +594,14 @@ int mlx4_mr_rereg_mem_write(struct mlx4_dev *dev, struct mlx4_mr *mr, { int err; - mpt_entry->start = cpu_to_be64(iova); - mpt_entry->length = cpu_to_be64(size); - mpt_entry->entity_size = cpu_to_be32(page_shift); - err = mlx4_mtt_init(dev, npages, page_shift, &mr->mtt); if (err) return err; + mpt_entry->start = cpu_to_be64(mr->iova); + mpt_entry->length = cpu_to_be64(mr->size); + mpt_entry->entity_size = cpu_to_be32(mr->mtt.page_shift); + mpt_entry->pd_flags &= cpu_to_be32(MLX4_MPT_PD_MASK | MLX4_MPT_PD_FLAG_EN_INV); mpt_entry->flags &= cpu_to_be32(MLX4_MPT_FLAG_FREE | diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c index af09905..71af98b 100644 --- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c +++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c @@ -4033,8 +4033,10 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent) (void)pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); mgp->cmd = dma_alloc_coherent(&pdev->dev, sizeof(*mgp->cmd), &mgp->cmd_bus, GFP_KERNEL); - if (mgp->cmd == NULL) + if (!mgp->cmd) { + status = -ENOMEM; goto abort_with_enabled; + } mgp->board_span = pci_resource_len(pdev, 0); mgp->iomem_base = pci_resource_start(pdev, 0); diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c index f5e4b82..db0c7a9 100644 --- a/drivers/net/ethernet/neterion/s2io.c +++ b/drivers/net/ethernet/neterion/s2io.c @@ -6987,7 +6987,9 @@ static int s2io_add_isr(struct s2io_nic *sp) if (sp->s2io_entries[i].in_use == MSIX_FLG) { if (sp->s2io_entries[i].type == MSIX_RING_TYPE) { - sprintf(sp->desc[i], "%s:MSI-X-%d-RX", + snprintf(sp->desc[i], + sizeof(sp->desc[i]), + "%s:MSI-X-%d-RX", dev->name, i); err = request_irq(sp->entries[i].vector, s2io_msix_ring_handle, @@ -6996,7 +6998,9 @@ static int s2io_add_isr(struct s2io_nic *sp) sp->s2io_entries[i].arg); } else if (sp->s2io_entries[i].type == MSIX_ALARM_TYPE) { - sprintf(sp->desc[i], "%s:MSI-X-%d-TX", + snprintf(sp->desc[i], + sizeof(sp->desc[i]), + "%s:MSI-X-%d-TX", dev->name, i); err = request_irq(sp->entries[i].vector, s2io_msix_fifo_handle, @@ -8154,7 +8158,8 @@ s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre) "%s: UDP Fragmentation Offload(UFO) enabled\n", dev->name); /* Initialize device name */ - sprintf(sp->name, "%s Neterion %s", dev->name, sp->product_name); + snprintf(sp->name, sizeof(sp->name), "%s Neterion %s", dev->name, + sp->product_name); if (vlan_tag_strip) sp->vlan_strip_flag = 1; diff --git a/drivers/net/ethernet/qlogic/qla3xxx.c b/drivers/net/ethernet/qlogic/qla3xxx.c index c2f09af..4847713 100644 --- a/drivers/net/ethernet/qlogic/qla3xxx.c +++ b/drivers/net/ethernet/qlogic/qla3xxx.c @@ -146,10 +146,7 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) { int i = 0; - while (i < 10) { - if (i) - ssleep(1); - + do { if (ql_sem_lock(qdev, QL_DRVR_SEM_MASK, (QL_RESOURCE_BITS_BASE_CODE | (qdev->mac_index) @@ -158,7 +155,8 @@ static int ql_wait_for_drvr_lock(struct ql3_adapter *qdev) "driver lock acquired\n"); return 1; } - } + ssleep(1); + } while (++i < 10); netdev_err(qdev->ndev, "Timed out waiting for driver lock...\n"); return 0; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c index 9929b97..2528c3f 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c @@ -2605,6 +2605,7 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) } else { dev_err(&pdev->dev, "%s: failed. Please Reboot\n", __func__); + err = -ENODEV; goto err_out_free_hw; } diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c index c29ba80..6576243 100644 --- a/drivers/net/ethernet/renesas/sh_eth.c +++ b/drivers/net/ethernet/renesas/sh_eth.c @@ -473,6 +473,7 @@ static struct sh_eth_cpu_data r8a777x_data = { .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x00000f0f, .apr = 1, .mpr = 1, @@ -495,6 +496,9 @@ static struct sh_eth_cpu_data r8a779x_data = { .eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE | EESR_RDE | EESR_RFRMER | EESR_TFE | EESR_TDE | EESR_ECI, + .fdr_value = 0x00000f0f, + + .trscer_err_mask = DESC_I_RINT8, .apr = 1, .mpr = 1, @@ -856,6 +860,9 @@ static void sh_eth_set_default_cpu_data(struct sh_eth_cpu_data *cd) if (!cd->eesr_err_check) cd->eesr_err_check = DEFAULT_EESR_ERR_CHECK; + + if (!cd->trscer_err_mask) + cd->trscer_err_mask = DEFAULT_TRSCER_ERR_MASK; } static int sh_eth_check_reset(struct net_device *ndev) @@ -1294,7 +1301,7 @@ static int sh_eth_dev_init(struct net_device *ndev, bool start) /* Frame recv control (enable multiple-packets per rx irq) */ sh_eth_write(ndev, RMCR_RNC, RMCR); - sh_eth_write(ndev, DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2, TRSCER); + sh_eth_write(ndev, mdp->cd->trscer_err_mask, TRSCER); if (mdp->cd->bculr) sh_eth_write(ndev, 0x800, BCULR); /* Burst sycle set */ @@ -1820,6 +1827,9 @@ static int sh_eth_get_settings(struct net_device *ndev, unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); ret = phy_ethtool_gset(mdp->phydev, ecmd); spin_unlock_irqrestore(&mdp->lock, flags); @@ -1834,6 +1844,9 @@ static int sh_eth_set_settings(struct net_device *ndev, unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); /* disable tx and rx */ @@ -1868,6 +1881,9 @@ static int sh_eth_nway_reset(struct net_device *ndev) unsigned long flags; int ret; + if (!mdp->phydev) + return -ENODEV; + spin_lock_irqsave(&mdp->lock, flags); ret = phy_start_aneg(mdp->phydev); spin_unlock_irqrestore(&mdp->lock, flags); @@ -2177,6 +2193,7 @@ static int sh_eth_close(struct net_device *ndev) if (mdp->phydev) { phy_stop(mdp->phydev); phy_disconnect(mdp->phydev); + mdp->phydev = NULL; } free_irq(ndev->irq, ndev); @@ -2410,7 +2427,7 @@ static int sh_eth_tsu_purge_all(struct net_device *ndev) struct sh_eth_private *mdp = netdev_priv(ndev); int i, ret; - if (unlikely(!mdp->cd->tsu)) + if (!mdp->cd->tsu) return 0; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++) { @@ -2433,7 +2450,7 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) void *reg_offset = sh_eth_tsu_get_offset(mdp, TSU_ADRH0); int i; - if (unlikely(!mdp->cd->tsu)) + if (!mdp->cd->tsu) return; for (i = 0; i < SH_ETH_TSU_CAM_ENTRIES; i++, reg_offset += 8) { @@ -2443,8 +2460,8 @@ static void sh_eth_tsu_purge_mcast(struct net_device *ndev) } } -/* Multicast reception directions set */ -static void sh_eth_set_multicast_list(struct net_device *ndev) +/* Update promiscuous flag and multicast filter */ +static void sh_eth_set_rx_mode(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 ecmr_bits; @@ -2455,7 +2472,9 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) /* Initial condition is MCT = 1, PRM = 0. * Depending on ndev->flags, set PRM or clear MCT */ - ecmr_bits = (sh_eth_read(ndev, ECMR) & ~ECMR_PRM) | ECMR_MCT; + ecmr_bits = sh_eth_read(ndev, ECMR) & ~ECMR_PRM; + if (mdp->cd->tsu) + ecmr_bits |= ECMR_MCT; if (!(ndev->flags & IFF_MULTICAST)) { sh_eth_tsu_purge_mcast(ndev); @@ -2484,9 +2503,6 @@ static void sh_eth_set_multicast_list(struct net_device *ndev) } } } - } else { - /* Normal, unicast/broadcast-only mode. */ - ecmr_bits = (ecmr_bits & ~ECMR_PRM) | ECMR_MCT; } /* update the ethernet mode */ @@ -2694,6 +2710,7 @@ static const struct net_device_ops sh_eth_netdev_ops = { .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, + .ndo_set_rx_mode = sh_eth_set_rx_mode, .ndo_tx_timeout = sh_eth_tx_timeout, .ndo_do_ioctl = sh_eth_do_ioctl, .ndo_validate_addr = eth_validate_addr, @@ -2706,7 +2723,7 @@ static const struct net_device_ops sh_eth_netdev_ops_tsu = { .ndo_stop = sh_eth_close, .ndo_start_xmit = sh_eth_start_xmit, .ndo_get_stats = sh_eth_get_stats, - .ndo_set_rx_mode = sh_eth_set_multicast_list, + .ndo_set_rx_mode = sh_eth_set_rx_mode, .ndo_vlan_rx_add_vid = sh_eth_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = sh_eth_vlan_rx_kill_vid, .ndo_tx_timeout = sh_eth_tx_timeout, diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h index 22301bf..71f5de1 100644 --- a/drivers/net/ethernet/renesas/sh_eth.h +++ b/drivers/net/ethernet/renesas/sh_eth.h @@ -369,6 +369,8 @@ enum DESC_I_BIT { DESC_I_RINT1 = 0x0001, }; +#define DEFAULT_TRSCER_ERR_MASK (DESC_I_RINT8 | DESC_I_RINT5 | DESC_I_TINT2) + /* RPADIR */ enum RPADIR_BIT { RPADIR_PADS1 = 0x20000, RPADIR_PADS0 = 0x10000, @@ -470,6 +472,9 @@ struct sh_eth_cpu_data { unsigned long tx_check; unsigned long eesr_err_check; + /* Error mask */ + unsigned long trscer_err_mask; + /* hardware features */ unsigned long irq_flags; /* IRQ configuration flags */ unsigned no_psr:1; /* EtherC DO NOT have PSR */ diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c index 6984944..b1a2718 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c @@ -474,13 +474,19 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, /* allocate memory for RX skbuff array */ rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, sizeof(dma_addr_t), GFP_KERNEL); - if (rx_ring->rx_skbuff_dma == NULL) - goto dmamem_err; + if (!rx_ring->rx_skbuff_dma) { + dma_free_coherent(priv->device, + rx_rsize * sizeof(struct sxgbe_rx_norm_desc), + rx_ring->dma_rx, rx_ring->dma_rx_phy); + goto error; + } rx_ring->rx_skbuff = kmalloc_array(rx_rsize, sizeof(struct sk_buff *), GFP_KERNEL); - if (rx_ring->rx_skbuff == NULL) - goto rxbuff_err; + if (!rx_ring->rx_skbuff) { + kfree(rx_ring->rx_skbuff_dma); + goto error; + } /* initialise the buffers */ for (desc_index = 0; desc_index < rx_rsize; desc_index++) { @@ -502,13 +508,6 @@ static int init_rx_ring(struct net_device *dev, u8 queue_no, err_init_rx_buffers: while (--desc_index >= 0) free_rx_ring(priv->device, rx_ring, desc_index); - kfree(rx_ring->rx_skbuff); -rxbuff_err: - kfree(rx_ring->rx_skbuff_dma); -dmamem_err: - dma_free_coherent(priv->device, - rx_rsize * sizeof(struct sxgbe_rx_norm_desc), - rx_ring->dma_rx, rx_ring->dma_rx_phy); error: return -ENOMEM; } diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c index 866560e..b02eed1 100644 --- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c +++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_platform.c @@ -108,10 +108,6 @@ static int sxgbe_platform_probe(struct platform_device *pdev) } } - /* Get MAC address if available (DT) */ - if (mac) - ether_addr_copy(priv->dev->dev_addr, mac); - priv = sxgbe_drv_probe(&(pdev->dev), plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed\n", __func__); @@ -125,6 +121,10 @@ static int sxgbe_platform_probe(struct platform_device *pdev) goto err_drv_remove; } + /* Get MAC address if available (DT) */ + if (mac) + ether_addr_copy(priv->dev->dev_addr, mac); + /* Get the TX/RX IRQ numbers */ for (i = 0, chan = 1; i < SXGBE_TX_QUEUES; i++) { priv->txq[i]->irq_no = irq_of_parse_and_map(node, chan++); diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c index c560f9a..e068d48 100644 --- a/drivers/net/ethernet/ti/cpsw.c +++ b/drivers/net/ethernet/ti/cpsw.c @@ -610,7 +610,7 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) /* Clear all mcast from ALE */ cpsw_ale_flush_multicast(ale, ALE_ALL_PORTS << - priv->host_port); + priv->host_port, -1); /* Flood All Unicast Packets to Host port */ cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); @@ -634,6 +634,12 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable) static void cpsw_ndo_set_rx_mode(struct net_device *ndev) { struct cpsw_priv *priv = netdev_priv(ndev); + int vid; + + if (priv->data.dual_emac) + vid = priv->slaves[priv->emac_port].port_vlan; + else + vid = priv->data.default_vlan; if (ndev->flags & IFF_PROMISC) { /* Enable promiscuous mode */ @@ -649,7 +655,8 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev) cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI); /* Clear all mcast from ALE */ - cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); + cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port, + vid); if (!netdev_mc_empty(ndev)) { struct netdev_hw_addr *ha; @@ -757,6 +764,14 @@ requeue: static irqreturn_t cpsw_interrupt(int irq, void *dev_id) { struct cpsw_priv *priv = dev_id; + int value = irq - priv->irqs_table[0]; + + /* NOTICE: Ending IRQ here. The trick with the 'value' variable above + * is to make sure we will always write the correct value to the EOI + * register. Namely 0 for RX_THRESH Interrupt, 1 for RX Interrupt, 2 + * for TX Interrupt and 3 for MISC Interrupt. + */ + cpdma_ctlr_eoi(priv->dma, value); cpsw_intr_disable(priv); if (priv->irq_enabled == true) { @@ -786,8 +801,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) int num_tx, num_rx; num_tx = cpdma_chan_process(priv->txch, 128); - if (num_tx) - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); num_rx = cpdma_chan_process(priv->rxch, budget); if (num_rx < budget) { @@ -795,7 +808,6 @@ static int cpsw_poll(struct napi_struct *napi, int budget) napi_complete(napi); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { prim_cpsw->irq_enabled = true; @@ -1310,8 +1322,6 @@ static int cpsw_ndo_open(struct net_device *ndev) napi_enable(&priv->napi); cpdma_ctlr_start(priv->dma); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); prim_cpsw = cpsw_get_slave_priv(priv, 0); if (prim_cpsw->irq_enabled == false) { @@ -1578,9 +1588,6 @@ static void cpsw_ndo_tx_timeout(struct net_device *ndev) cpdma_chan_start(priv->txch); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - } static int cpsw_ndo_set_mac_address(struct net_device *ndev, void *p) @@ -1620,9 +1627,6 @@ static void cpsw_ndo_poll_controller(struct net_device *ndev) cpsw_interrupt(ndev->irq, priv); cpdma_ctlr_int_ctrl(priv->dma, true); cpsw_intr_enable(priv); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_RX); - cpdma_ctlr_eoi(priv->dma, CPDMA_EOI_TX); - } #endif @@ -1630,16 +1634,24 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, unsigned short vid) { int ret; - int unreg_mcast_mask; + int unreg_mcast_mask = 0; + u32 port_mask; - if (priv->ndev->flags & IFF_ALLMULTI) - unreg_mcast_mask = ALE_ALL_PORTS; - else - unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + if (priv->data.dual_emac) { + port_mask = (1 << (priv->emac_port + 1)) | ALE_PORT_HOST; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = port_mask; + } else { + port_mask = ALE_ALL_PORTS; + + if (priv->ndev->flags & IFF_ALLMULTI) + unreg_mcast_mask = ALE_ALL_PORTS; + else + unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2; + } - ret = cpsw_ale_add_vlan(priv->ale, vid, - ALE_ALL_PORTS << priv->host_port, - 0, ALE_ALL_PORTS << priv->host_port, + ret = cpsw_ale_add_vlan(priv->ale, vid, port_mask, 0, port_mask, unreg_mcast_mask << priv->host_port); if (ret != 0) return ret; @@ -1650,8 +1662,7 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv, goto clean_vid; ret = cpsw_ale_add_mcast(priv->ale, priv->ndev->broadcast, - ALE_ALL_PORTS << priv->host_port, - ALE_VLAN, vid, 0); + port_mask, ALE_VLAN, vid, 0); if (ret != 0) goto clean_vlan_ucast; return 0; diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c index 097ebe7..5246b3a 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.c +++ b/drivers/net/ethernet/ti/cpsw_ale.c @@ -234,7 +234,7 @@ static void cpsw_ale_flush_mcast(struct cpsw_ale *ale, u32 *ale_entry, cpsw_ale_set_entry_type(ale_entry, ALE_TYPE_FREE); } -int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid) { u32 ale_entry[ALE_ENTRY_WORDS]; int ret, idx; @@ -245,6 +245,14 @@ int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask) if (ret != ALE_TYPE_ADDR && ret != ALE_TYPE_VLAN_ADDR) continue; + /* if vid passed is -1 then remove all multicast entry from + * the table irrespective of vlan id, if a valid vlan id is + * passed then remove only multicast added to that vlan id. + * if vlan id doesn't match then move on to next entry. + */ + if (vid != -1 && cpsw_ale_get_vlan_id(ale_entry) != vid) + continue; + if (cpsw_ale_get_mcast(ale_entry)) { u8 addr[6]; diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h index c0d4127..af1e7ec 100644 --- a/drivers/net/ethernet/ti/cpsw_ale.h +++ b/drivers/net/ethernet/ti/cpsw_ale.h @@ -92,7 +92,7 @@ void cpsw_ale_stop(struct cpsw_ale *ale); int cpsw_ale_set_ageout(struct cpsw_ale *ale, int ageout); int cpsw_ale_flush(struct cpsw_ale *ale, int port_mask); -int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask); +int cpsw_ale_flush_multicast(struct cpsw_ale *ale, int port_mask, int vid); int cpsw_ale_add_ucast(struct cpsw_ale *ale, u8 *addr, int port, int flags, u16 vid); int cpsw_ale_del_ucast(struct cpsw_ale *ale, u8 *addr, int port, diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c index ea71251..5fae435 100644 --- a/drivers/net/ethernet/ti/davinci_emac.c +++ b/drivers/net/ethernet/ti/davinci_emac.c @@ -62,6 +62,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/of_mdio.h> #include <linux/of_irq.h> #include <linux/of_net.h> @@ -343,9 +344,7 @@ struct emac_priv { u32 multicast_hash_cnt[EMAC_NUM_MULTICAST_BITS]; u32 rx_addr_type; const char *phy_id; -#ifdef CONFIG_OF struct device_node *phy_node; -#endif struct phy_device *phydev; spinlock_t lock; /*platform specific members*/ @@ -922,6 +921,16 @@ static void emac_int_disable(struct emac_priv *priv) if (priv->int_disable) priv->int_disable(); + /* NOTE: Rx Threshold and Misc interrupts are not enabled */ + + /* ack rxen only then a new pulse will be generated */ + emac_write(EMAC_DM646X_MACEOIVECTOR, + EMAC_DM646X_MAC_EOI_C0_RXEN); + + /* ack txen- only then a new pulse will be generated */ + emac_write(EMAC_DM646X_MACEOIVECTOR, + EMAC_DM646X_MAC_EOI_C0_TXEN); + local_irq_restore(flags); } else { @@ -951,15 +960,6 @@ static void emac_int_enable(struct emac_priv *priv) * register */ /* NOTE: Rx Threshold and Misc interrupts are not enabled */ - - /* ack rxen only then a new pulse will be generated */ - emac_write(EMAC_DM646X_MACEOIVECTOR, - EMAC_DM646X_MAC_EOI_C0_RXEN); - - /* ack txen- only then a new pulse will be generated */ - emac_write(EMAC_DM646X_MACEOIVECTOR, - EMAC_DM646X_MAC_EOI_C0_TXEN); - } else { /* Set DM644x control registers for interrupt control */ emac_ctrl_write(EMAC_CTRL_EWCTL, 0x1); @@ -1537,7 +1537,13 @@ static int emac_dev_open(struct net_device *ndev) int i = 0; struct emac_priv *priv = netdev_priv(ndev); - pm_runtime_get(&priv->pdev->dev); + ret = pm_runtime_get_sync(&priv->pdev->dev); + if (ret < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, ret); + return ret; + } netif_carrier_off(ndev); for (cnt = 0; cnt < ETH_ALEN; cnt++) @@ -1596,8 +1602,20 @@ static int emac_dev_open(struct net_device *ndev) cpdma_ctlr_start(priv->dma); priv->phydev = NULL; + + if (priv->phy_node) { + priv->phydev = of_phy_connect(ndev, priv->phy_node, + &emac_adjust_link, 0, 0); + if (!priv->phydev) { + dev_err(emac_dev, "could not connect to phy %s\n", + priv->phy_node->full_name); + ret = -ENODEV; + goto err; + } + } + /* use the first phy on the bus if pdata did not give us a phy id */ - if (!priv->phy_id) { + if (!priv->phydev && !priv->phy_id) { struct device *phy; phy = bus_find_device(&mdio_bus_type, NULL, NULL, @@ -1606,7 +1624,7 @@ static int emac_dev_open(struct net_device *ndev) priv->phy_id = dev_name(phy); } - if (priv->phy_id && *priv->phy_id) { + if (!priv->phydev && priv->phy_id && *priv->phy_id) { priv->phydev = phy_connect(ndev, priv->phy_id, &emac_adjust_link, PHY_INTERFACE_MODE_MII); @@ -1627,7 +1645,9 @@ static int emac_dev_open(struct net_device *ndev) "(mii_bus:phy_addr=%s, id=%x)\n", priv->phydev->drv->name, dev_name(&priv->phydev->dev), priv->phydev->phy_id); - } else { + } + + if (!priv->phydev) { /* No PHY , fix the link, speed and duplex settings */ dev_notice(emac_dev, "no phy, defaulting to 100/full\n"); priv->link = 1; @@ -1724,6 +1744,15 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) struct emac_priv *priv = netdev_priv(ndev); u32 mac_control; u32 stats_clear_mask; + int err; + + err = pm_runtime_get_sync(&priv->pdev->dev); + if (err < 0) { + pm_runtime_put_noidle(&priv->pdev->dev); + dev_err(&priv->pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, err); + return &ndev->stats; + } /* update emac hardware stats and reset the registers*/ @@ -1766,6 +1795,8 @@ static struct net_device_stats *emac_dev_getnetstats(struct net_device *ndev) ndev->stats.tx_fifo_errors += emac_read(EMAC_TXUNDERRUN); emac_write(EMAC_TXUNDERRUN, stats_clear_mask); + pm_runtime_put(&priv->pdev->dev); + return &ndev->stats; } @@ -1859,7 +1890,7 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv) static int davinci_emac_probe(struct platform_device *pdev) { int rc = 0; - struct resource *res; + struct resource *res, *res_ctrl; struct net_device *ndev; struct emac_priv *priv; unsigned long hw_ram_addr; @@ -1876,6 +1907,7 @@ static int davinci_emac_probe(struct platform_device *pdev) return -EBUSY; } emac_bus_frequency = clk_get_rate(emac_clk); + devm_clk_put(&pdev->dev, emac_clk); /* TODO: Probe PHY here if possible */ @@ -1917,11 +1949,20 @@ static int davinci_emac_probe(struct platform_device *pdev) rc = PTR_ERR(priv->remap_addr); goto no_pdata; } + + res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 1); + if (res_ctrl) { + priv->ctrl_base = + devm_ioremap_resource(&pdev->dev, res_ctrl); + if (IS_ERR(priv->ctrl_base)) + goto no_pdata; + } else { + priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; + } + priv->emac_base = priv->remap_addr + pdata->ctrl_reg_offset; ndev->base_addr = (unsigned long)priv->remap_addr; - priv->ctrl_base = priv->remap_addr + pdata->ctrl_mod_reg_offset; - hw_ram_addr = pdata->hw_ram_addr; if (!hw_ram_addr) hw_ram_addr = (u32 __force)res->start + pdata->ctrl_ram_offset; @@ -1980,12 +2021,22 @@ static int davinci_emac_probe(struct platform_device *pdev) ndev->ethtool_ops = ðtool_ops; netif_napi_add(ndev, &priv->napi, emac_poll, EMAC_POLL_WEIGHT); + pm_runtime_enable(&pdev->dev); + rc = pm_runtime_get_sync(&pdev->dev); + if (rc < 0) { + pm_runtime_put_noidle(&pdev->dev); + dev_err(&pdev->dev, "%s: failed to get_sync(%d)\n", + __func__, rc); + goto no_cpdma_chan; + } + /* register the network device */ SET_NETDEV_DEV(ndev, &pdev->dev); rc = register_netdev(ndev); if (rc) { dev_err(&pdev->dev, "error in register_netdev\n"); rc = -ENODEV; + pm_runtime_put(&pdev->dev); goto no_cpdma_chan; } @@ -1995,9 +2046,7 @@ static int davinci_emac_probe(struct platform_device *pdev) "(regs: %p, irq: %d)\n", (void *)priv->emac_base_phys, ndev->irq); } - - pm_runtime_enable(&pdev->dev); - pm_runtime_resume(&pdev->dev); + pm_runtime_put(&pdev->dev); return 0; @@ -2071,9 +2120,14 @@ static const struct emac_platform_data am3517_emac_data = { .hw_ram_addr = 0x01e20000, }; +static const struct emac_platform_data dm816_emac_data = { + .version = EMAC_VERSION_2, +}; + static const struct of_device_id davinci_emac_of_match[] = { {.compatible = "ti,davinci-dm6467-emac", }, {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, }, + {.compatible = "ti,dm816-emac", .data = &dm816_emac_data, }, {}, }; MODULE_DEVICE_TABLE(of, davinci_emac_of_match); diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c index 9c2d91e..dbcbf0c 100644 --- a/drivers/net/ethernet/xilinx/ll_temac_main.c +++ b/drivers/net/ethernet/xilinx/ll_temac_main.c @@ -1043,6 +1043,7 @@ static int temac_of_probe(struct platform_device *op) lp->regs = of_iomap(op->dev.of_node, 0); if (!lp->regs) { dev_err(&op->dev, "could not map temac regs.\n"); + rc = -ENOMEM; goto nodev; } @@ -1062,6 +1063,7 @@ static int temac_of_probe(struct platform_device *op) np = of_parse_phandle(op->dev.of_node, "llink-connected", 0); if (!np) { dev_err(&op->dev, "could not find DMA node\n"); + rc = -ENODEV; goto err_iounmap; } diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c index c18a0c6..a6d2860 100644 --- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c +++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c @@ -1501,6 +1501,7 @@ static int axienet_of_probe(struct platform_device *op) lp->regs = of_iomap(op->dev.of_node, 0); if (!lp->regs) { dev_err(&op->dev, "could not map Axi Ethernet regs.\n"); + ret = -ENOMEM; goto nodev; } /* Setup checksum offload, but default to off if not specified */ @@ -1563,6 +1564,7 @@ static int axienet_of_probe(struct platform_device *op) np = of_parse_phandle(op->dev.of_node, "axistream-connected", 0); if (!np) { dev_err(&op->dev, "could not find DMA node\n"); + ret = -ENODEV; goto err_iounmap; } lp->dma_regs = of_iomap(np, 0); diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c index 2485879..9d4ce38 100644 --- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c +++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c @@ -1109,6 +1109,7 @@ static int xemaclite_of_probe(struct platform_device *ofdev) res = platform_get_resource(ofdev, IORESOURCE_IRQ, 0); if (!res) { dev_err(dev, "no IRQ found\n"); + rc = -ENXIO; goto error; } diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c index 93e2242..f7ff493 100644 --- a/drivers/net/team/team.c +++ b/drivers/net/team/team.c @@ -629,6 +629,7 @@ static int team_change_mode(struct team *team, const char *kind) static void team_notify_peers_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, notify_peers.dw.work); @@ -636,9 +637,14 @@ static void team_notify_peers_work(struct work_struct *work) schedule_delayed_work(&team->notify_peers.dw, 0); return; } + val = atomic_dec_if_positive(&team->notify_peers.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->notify_peers.count_pending)) + if (val) schedule_delayed_work(&team->notify_peers.dw, msecs_to_jiffies(team->notify_peers.interval)); } @@ -669,6 +675,7 @@ static void team_notify_peers_fini(struct team *team) static void team_mcast_rejoin_work(struct work_struct *work) { struct team *team; + int val; team = container_of(work, struct team, mcast_rejoin.dw.work); @@ -676,9 +683,14 @@ static void team_mcast_rejoin_work(struct work_struct *work) schedule_delayed_work(&team->mcast_rejoin.dw, 0); return; } + val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending); + if (val < 0) { + rtnl_unlock(); + return; + } call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev); rtnl_unlock(); - if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending)) + if (val) schedule_delayed_work(&team->mcast_rejoin.dw, msecs_to_jiffies(team->mcast_rejoin.interval)); } diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c index dcb6d33..1e9cdca 100644 --- a/drivers/net/usb/kaweth.c +++ b/drivers/net/usb/kaweth.c @@ -1276,7 +1276,7 @@ static int usb_start_wait_urb(struct urb *urb, int timeout, int* actual_length) awd.done = 0; urb->context = &awd; - status = usb_submit_urb(urb, GFP_NOIO); + status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { // something went wrong usb_free_urb(urb); diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index b8a82b8..602dc66 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c @@ -56,6 +56,8 @@ struct qmi_wwan_state { /* default ethernet address used by the modem */ static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3}; +static const u8 buggy_fw_addr[ETH_ALEN] = {0x00, 0xa0, 0xc6, 0x00, 0x00, 0x00}; + /* Make up an ethernet header if the packet doesn't have one. * * A firmware bug common among several devices cause them to send raw @@ -332,10 +334,12 @@ next_desc: usb_driver_release_interface(driver, info->data); } - /* Never use the same address on both ends of the link, even - * if the buggy firmware told us to. + /* Never use the same address on both ends of the link, even if the + * buggy firmware told us to. Or, if device is assigned the well-known + * buggy firmware MAC address, replace it with a random address, */ - if (ether_addr_equal(dev->net->dev_addr, default_modem_addr)) + if (ether_addr_equal(dev->net->dev_addr, default_modem_addr) || + ether_addr_equal(dev->net->dev_addr, buggy_fw_addr)) eth_hw_addr_random(dev->net); /* make MAC addr easily distinguishable from an IP header */ diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index 2d1c77e..bf405f1 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -833,9 +833,6 @@ static void ocp_write_word(struct r8152 *tp, u16 type, u16 index, u32 data) index &= ~3; } - generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); - - data |= __le32_to_cpu(tmp) & ~mask; tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); @@ -874,9 +871,6 @@ static void ocp_write_byte(struct r8152 *tp, u16 type, u16 index, u32 data) index &= ~3; } - generic_ocp_read(tp, index, sizeof(tmp), &tmp, type); - - data |= __le32_to_cpu(tmp) & ~mask; tmp = __cpu_to_le32(data); generic_ocp_write(tp, index, byen, sizeof(tmp), &tmp, type); @@ -926,12 +920,6 @@ static void sram_write(struct r8152 *tp, u16 addr, u16 data) ocp_reg_write(tp, OCP_SRAM_DATA, data); } -static u16 sram_read(struct r8152 *tp, u16 addr) -{ - ocp_reg_write(tp, OCP_SRAM_ADDR, addr); - return ocp_reg_read(tp, OCP_SRAM_DATA); -} - static int read_mii_word(struct net_device *netdev, int phy_id, int reg) { struct r8152 *tp = netdev_priv(netdev); @@ -1897,6 +1885,22 @@ static void _rtl8152_set_rx_mode(struct net_device *netdev) netif_wake_queue(netdev); } +static netdev_features_t +rtl8152_features_check(struct sk_buff *skb, struct net_device *dev, + netdev_features_t features) +{ + u32 mss = skb_shinfo(skb)->gso_size; + int max_offset = mss ? GTTCPHO_MAX : TCPHO_MAX; + int offset = skb_transport_offset(skb); + + if ((mss || skb->ip_summed == CHECKSUM_PARTIAL) && offset > max_offset) + features &= ~(NETIF_F_ALL_CSUM | NETIF_F_GSO_MASK); + else if ((skb->len + sizeof(struct tx_desc)) > agg_buf_sz) + features &= ~NETIF_F_GSO_MASK; + + return features; +} + static netdev_tx_t rtl8152_start_xmit(struct sk_buff *skb, struct net_device *netdev) { @@ -2502,24 +2506,18 @@ static void r8153_hw_phy_cfg(struct r8152 *tp) data = ocp_reg_read(tp, OCP_POWER_CFG); data |= EN_10M_PLLOFF; ocp_reg_write(tp, OCP_POWER_CFG, data); - data = sram_read(tp, SRAM_IMPEDANCE); - data &= ~RX_DRIVING_MASK; - sram_write(tp, SRAM_IMPEDANCE, data); + sram_write(tp, SRAM_IMPEDANCE, 0x0b13); ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR); ocp_data |= PFM_PWM_SWITCH; ocp_write_word(tp, MCU_TYPE_PLA, PLA_PHY_PWR, ocp_data); - data = sram_read(tp, SRAM_LPF_CFG); - data |= LPF_AUTO_TUNE; - sram_write(tp, SRAM_LPF_CFG, data); + /* Enable LPF corner auto tune */ + sram_write(tp, SRAM_LPF_CFG, 0xf70f); - data = sram_read(tp, SRAM_10M_AMP1); - data |= GDAC_IB_UPALL; - sram_write(tp, SRAM_10M_AMP1, data); - data = sram_read(tp, SRAM_10M_AMP2); - data |= AMP_DN; - sram_write(tp, SRAM_10M_AMP2, data); + /* Adjust 10M Amplitude */ + sram_write(tp, SRAM_10M_AMP1, 0x00af); + sram_write(tp, SRAM_10M_AMP2, 0x0208); set_bit(PHY_RESET, &tp->flags); } @@ -3706,6 +3704,7 @@ static const struct net_device_ops rtl8152_netdev_ops = { .ndo_set_mac_address = rtl8152_set_mac_address, .ndo_change_mtu = rtl8152_change_mtu, .ndo_validate_addr = eth_validate_addr, + .ndo_features_check = rtl8152_features_check, }; static void r8152b_get_version(struct r8152 *tp) diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c index e5be2d2..a5f9198 100644 --- a/drivers/net/wireless/iwlwifi/iwl-7000.c +++ b/drivers/net/wireless/iwlwifi/iwl-7000.c @@ -69,8 +69,8 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL7260_UCODE_API_MAX 10 -#define IWL3160_UCODE_API_MAX 10 +#define IWL7260_UCODE_API_MAX 12 +#define IWL3160_UCODE_API_MAX 12 /* Oldest version we won't warn about */ #define IWL7260_UCODE_API_OK 10 @@ -105,7 +105,7 @@ #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" #define IWL7265D_FW_PRE "iwlwifi-7265D-" -#define IWL7265D_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode" +#define IWL7265D_MODULE_FIRMWARE(api) IWL7265D_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_7000 0 diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c index bf0a95c..3668fc5 100644 --- a/drivers/net/wireless/iwlwifi/iwl-8000.c +++ b/drivers/net/wireless/iwlwifi/iwl-8000.c @@ -69,7 +69,7 @@ #include "iwl-agn-hw.h" /* Highest firmware API version supported */ -#define IWL8000_UCODE_API_MAX 10 +#define IWL8000_UCODE_API_MAX 12 /* Oldest version we won't warn about */ #define IWL8000_UCODE_API_OK 10 diff --git a/drivers/net/wireless/iwlwifi/iwl-fw-file.h b/drivers/net/wireless/iwlwifi/iwl-fw-file.h index f2a047f..1bbe4fc 100644 --- a/drivers/net/wireless/iwlwifi/iwl-fw-file.h +++ b/drivers/net/wireless/iwlwifi/iwl-fw-file.h @@ -243,6 +243,9 @@ enum iwl_ucode_tlv_flag { * @IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF: ucode supports disabling dummy notif. * @IWL_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time * longer than the passive one, which is essential for fragmented scan. + * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command, + * regardless of the band or the number of the probes. FW will calculate + * the actual dwell time. */ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0), @@ -253,6 +256,7 @@ enum iwl_ucode_tlv_api { IWL_UCODE_TLV_API_LMAC_SCAN = BIT(6), IWL_UCODE_TLV_API_SF_NO_DUMMY_NOTIF = BIT(7), IWL_UCODE_TLV_API_FRAGMENTED_SCAN = BIT(8), + IWL_UCODE_TLV_API_BASIC_DWELL = BIT(13), }; /** diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h index 1f2acf4..201846d 100644 --- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h +++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h @@ -672,6 +672,7 @@ struct iwl_scan_channel_opt { * @IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented * @IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report * and DS parameter set IEs into probe requests. + * @IWL_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches */ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL = BIT(0), @@ -681,6 +682,7 @@ enum iwl_mvm_lmac_scan_flags { IWL_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = BIT(4), IWL_MVM_LMAC_SCAN_FLAG_FRAGMENTED = BIT(5), IWL_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = BIT(6), + IWL_MVM_LMAC_SCAN_FLAG_MATCH = BIT(9), }; enum iwl_scan_priority { diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c index e5294d0..ec9a8e7 100644 --- a/drivers/net/wireless/iwlwifi/mvm/scan.c +++ b/drivers/net/wireless/iwlwifi/mvm/scan.c @@ -171,15 +171,21 @@ static void iwl_mvm_scan_fill_ssids(struct iwl_ssid_ie *cmd_ssid, * already included in the probe template, so we need to set only * req->n_ssids - 1 bits in addition to the first bit. */ -static u16 iwl_mvm_get_active_dwell(enum ieee80211_band band, int n_ssids) +static u16 iwl_mvm_get_active_dwell(struct iwl_mvm *mvm, + enum ieee80211_band band, int n_ssids) { + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + return 10; if (band == IEEE80211_BAND_2GHZ) return 20 + 3 * (n_ssids + 1); return 10 + 2 * (n_ssids + 1); } -static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band) +static u16 iwl_mvm_get_passive_dwell(struct iwl_mvm *mvm, + enum ieee80211_band band) { + if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_BASIC_DWELL) + return 110; return band == IEEE80211_BAND_2GHZ ? 100 + 20 : 100 + 10; } @@ -331,7 +337,8 @@ static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm, */ if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { u32 passive_dwell = - iwl_mvm_get_passive_dwell(IEEE80211_BAND_2GHZ); + iwl_mvm_get_passive_dwell(mvm, + IEEE80211_BAND_2GHZ); params->max_out_time = passive_dwell; } else { params->passive_fragmented = true; @@ -348,8 +355,8 @@ not_bound: params->dwell[band].passive = frag_passive_dwell; else params->dwell[band].passive = - iwl_mvm_get_passive_dwell(band); - params->dwell[band].active = iwl_mvm_get_active_dwell(band, + iwl_mvm_get_passive_dwell(mvm, band); + params->dwell[band].active = iwl_mvm_get_active_dwell(mvm, band, n_ssids); } } @@ -1448,6 +1455,8 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm, if (iwl_mvm_scan_pass_all(mvm, req)) flags |= IWL_MVM_LMAC_SCAN_FLAG_PASS_ALL; + else + flags |= IWL_MVM_LMAC_SCAN_FLAG_MATCH; if (req->n_ssids == 1 && req->ssids[0].ssid_len != 0) flags |= IWL_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION; diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c index 4f15d9d..4333306 100644 --- a/drivers/net/wireless/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/iwlwifi/mvm/tx.c @@ -108,8 +108,12 @@ void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb, tx_flags &= ~TX_CMD_FLG_SEQ_CTL; } - /* tid_tspec will default to 0 = BE when QOS isn't enabled */ - ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; + /* Default to 0 (BE) when tid_spec is set to IWL_TID_NON_QOS */ + if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT) + ac = tid_to_mac80211_ac[tx_cmd->tid_tspec]; + else + ac = tid_to_mac80211_ac[0]; + tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) << TX_CMD_FLG_BT_PRIO_POS; diff --git a/drivers/net/wireless/iwlwifi/mvm/utils.c b/drivers/net/wireless/iwlwifi/mvm/utils.c index e56e77e..917431e 100644 --- a/drivers/net/wireless/iwlwifi/mvm/utils.c +++ b/drivers/net/wireless/iwlwifi/mvm/utils.c @@ -665,7 +665,7 @@ bool iwl_mvm_rx_diversity_allowed(struct iwl_mvm *mvm) if (num_of_ant(mvm->fw->valid_rx_ant) == 1) return false; - if (!mvm->cfg->rx_with_siso_diversity) + if (mvm->cfg->rx_with_siso_diversity) return false; ieee80211_iterate_active_interfaces_atomic( diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 2f0c4b1..d5aadb0 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -527,8 +527,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) else if (cfg == &iwl7265_n_cfg) cfg_7265d = &iwl7265d_n_cfg; if (cfg_7265d && - (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) + (iwl_trans->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_7265D) { cfg = cfg_7265d; + iwl_trans->cfg = cfg_7265d; + } #endif pci_set_drvdata(pdev, iwl_trans); diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c index 846a2e6..c70efb9 100644 --- a/drivers/net/wireless/rtlwifi/pci.c +++ b/drivers/net/wireless/rtlwifi/pci.c @@ -666,7 +666,8 @@ tx_status_ok: } static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, - u8 *entry, int rxring_idx, int desc_idx) + struct sk_buff *new_skb, u8 *entry, + int rxring_idx, int desc_idx) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw)); @@ -674,11 +675,15 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, u8 tmp_one = 1; struct sk_buff *skb; + if (likely(new_skb)) { + skb = new_skb; + goto remap; + } skb = dev_alloc_skb(rtlpci->rxbuffersize); if (!skb) return 0; - rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; +remap: /* just set skb->cb to mapping addr for pci_unmap_single use */ *((dma_addr_t *)skb->cb) = pci_map_single(rtlpci->pdev, skb_tail_pointer(skb), @@ -686,6 +691,7 @@ static int _rtl_pci_init_one_rxdesc(struct ieee80211_hw *hw, bufferaddress = *((dma_addr_t *)skb->cb); if (pci_dma_mapping_error(rtlpci->pdev, bufferaddress)) return 0; + rtlpci->rx_ring[rxring_idx].rx_buf[desc_idx] = skb; if (rtlpriv->use_new_trx_flow) { rtlpriv->cfg->ops->set_desc(hw, (u8 *)entry, false, HW_DESC_RX_PREPARE, @@ -781,6 +787,7 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) /*rx pkt */ struct sk_buff *skb = rtlpci->rx_ring[rxring_idx].rx_buf[ rtlpci->rx_ring[rxring_idx].idx]; + struct sk_buff *new_skb; if (rtlpriv->use_new_trx_flow) { rx_remained_cnt = @@ -807,6 +814,13 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) pci_unmap_single(rtlpci->pdev, *((dma_addr_t *)skb->cb), rtlpci->rxbuffersize, PCI_DMA_FROMDEVICE); + /* get a new skb - if fail, old one will be reused */ + new_skb = dev_alloc_skb(rtlpci->rxbuffersize); + if (unlikely(!new_skb)) { + pr_err("Allocation of new skb failed in %s\n", + __func__); + goto no_new; + } if (rtlpriv->use_new_trx_flow) { buffer_desc = &rtlpci->rx_ring[rxring_idx].buffer_desc @@ -911,14 +925,16 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) schedule_work(&rtlpriv->works.lps_change_work); } end: + skb = new_skb; +no_new: if (rtlpriv->use_new_trx_flow) { - _rtl_pci_init_one_rxdesc(hw, (u8 *)buffer_desc, + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)buffer_desc, rxring_idx, - rtlpci->rx_ring[rxring_idx].idx); + rtlpci->rx_ring[rxring_idx].idx); } else { - _rtl_pci_init_one_rxdesc(hw, (u8 *)pdesc, rxring_idx, + _rtl_pci_init_one_rxdesc(hw, skb, (u8 *)pdesc, + rxring_idx, rtlpci->rx_ring[rxring_idx].idx); - if (rtlpci->rx_ring[rxring_idx].idx == rtlpci->rxringcount - 1) rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, @@ -1307,7 +1323,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) rtlpci->rx_ring[rxring_idx].idx = 0; for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].buffer_desc[i]; - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, rxring_idx, i)) return -ENOMEM; } @@ -1332,7 +1348,7 @@ static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw, int rxring_idx) for (i = 0; i < rtlpci->rxringcount; i++) { entry = &rtlpci->rx_ring[rxring_idx].desc[i]; - if (!_rtl_pci_init_one_rxdesc(hw, (u8 *)entry, + if (!_rtl_pci_init_one_rxdesc(hw, NULL, (u8 *)entry, rxring_idx, i)) return -ENOMEM; } diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c index efbaf2a..794204e 100644 --- a/drivers/net/xen-netback/xenbus.c +++ b/drivers/net/xen-netback/xenbus.c @@ -737,6 +737,7 @@ static void connect(struct backend_info *be) } queue->remaining_credit = credit_bytes; + queue->credit_usec = credit_usec; err = connect_rings(be, queue); if (err) { diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 22bcb4e..d8c1076 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c @@ -88,10 +88,8 @@ struct netfront_cb { #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) struct netfront_stats { - u64 rx_packets; - u64 tx_packets; - u64 rx_bytes; - u64 tx_bytes; + u64 packets; + u64 bytes; struct u64_stats_sync syncp; }; @@ -160,7 +158,8 @@ struct netfront_info { struct netfront_queue *queues; /* Statistics */ - struct netfront_stats __percpu *stats; + struct netfront_stats __percpu *rx_stats; + struct netfront_stats __percpu *tx_stats; atomic_t rx_gso_checksum_fixup; }; @@ -565,7 +564,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned short id; struct netfront_info *np = netdev_priv(dev); - struct netfront_stats *stats = this_cpu_ptr(np->stats); + struct netfront_stats *tx_stats = this_cpu_ptr(np->tx_stats); struct xen_netif_tx_request *tx; char *data = skb->data; RING_IDX i; @@ -672,10 +671,10 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev) if (notify) notify_remote_via_irq(queue->tx_irq); - u64_stats_update_begin(&stats->syncp); - stats->tx_bytes += skb->len; - stats->tx_packets++; - u64_stats_update_end(&stats->syncp); + u64_stats_update_begin(&tx_stats->syncp); + tx_stats->bytes += skb->len; + tx_stats->packets++; + u64_stats_update_end(&tx_stats->syncp); /* Note: It is not safe to access skb after xennet_tx_buf_gc()! */ xennet_tx_buf_gc(queue); @@ -931,7 +930,7 @@ static int checksum_setup(struct net_device *dev, struct sk_buff *skb) static int handle_incoming_queue(struct netfront_queue *queue, struct sk_buff_head *rxq) { - struct netfront_stats *stats = this_cpu_ptr(queue->info->stats); + struct netfront_stats *rx_stats = this_cpu_ptr(queue->info->rx_stats); int packets_dropped = 0; struct sk_buff *skb; @@ -952,10 +951,10 @@ static int handle_incoming_queue(struct netfront_queue *queue, continue; } - u64_stats_update_begin(&stats->syncp); - stats->rx_packets++; - stats->rx_bytes += skb->len; - u64_stats_update_end(&stats->syncp); + u64_stats_update_begin(&rx_stats->syncp); + rx_stats->packets++; + rx_stats->bytes += skb->len; + u64_stats_update_end(&rx_stats->syncp); /* Pass it up. */ napi_gro_receive(&queue->napi, skb); @@ -1079,18 +1078,22 @@ static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev, int cpu; for_each_possible_cpu(cpu) { - struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu); + struct netfront_stats *rx_stats = per_cpu_ptr(np->rx_stats, cpu); + struct netfront_stats *tx_stats = per_cpu_ptr(np->tx_stats, cpu); u64 rx_packets, rx_bytes, tx_packets, tx_bytes; unsigned int start; do { - start = u64_stats_fetch_begin_irq(&stats->syncp); + start = u64_stats_fetch_begin_irq(&tx_stats->syncp); + tx_packets = tx_stats->packets; + tx_bytes = tx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start)); - rx_packets = stats->rx_packets; - tx_packets = stats->tx_packets; - rx_bytes = stats->rx_bytes; - tx_bytes = stats->tx_bytes; - } while (u64_stats_fetch_retry_irq(&stats->syncp, start)); + do { + start = u64_stats_fetch_begin_irq(&rx_stats->syncp); + rx_packets = rx_stats->packets; + rx_bytes = rx_stats->bytes; + } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start)); tot->rx_packets += rx_packets; tot->tx_packets += tx_packets; @@ -1275,6 +1278,15 @@ static const struct net_device_ops xennet_netdev_ops = { #endif }; +static void xennet_free_netdev(struct net_device *netdev) +{ + struct netfront_info *np = netdev_priv(netdev); + + free_percpu(np->rx_stats); + free_percpu(np->tx_stats); + free_netdev(netdev); +} + static struct net_device *xennet_create_dev(struct xenbus_device *dev) { int err; @@ -1295,8 +1307,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) np->queues = NULL; err = -ENOMEM; - np->stats = netdev_alloc_pcpu_stats(struct netfront_stats); - if (np->stats == NULL) + np->rx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); + if (np->rx_stats == NULL) + goto exit; + np->tx_stats = netdev_alloc_pcpu_stats(struct netfront_stats); + if (np->tx_stats == NULL) goto exit; netdev->netdev_ops = &xennet_netdev_ops; @@ -1327,7 +1342,7 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) return netdev; exit: - free_netdev(netdev); + xennet_free_netdev(netdev); return ERR_PTR(err); } @@ -1369,7 +1384,7 @@ static int netfront_probe(struct xenbus_device *dev, return 0; fail: - free_netdev(netdev); + xennet_free_netdev(netdev); dev_set_drvdata(&dev->dev, NULL); return err; } @@ -2189,9 +2204,7 @@ static int xennet_remove(struct xenbus_device *dev) info->queues = NULL; } - free_percpu(info->stats); - - free_netdev(info->netdev); + xennet_free_netdev(info->netdev); return 0; } diff --git a/drivers/phy/phy-miphy28lp.c b/drivers/phy/phy-miphy28lp.c index e34da13..27fa62c 100644 --- a/drivers/phy/phy-miphy28lp.c +++ b/drivers/phy/phy-miphy28lp.c @@ -1050,7 +1050,8 @@ static int miphy28lp_init(struct phy *phy) ret = miphy28lp_init_usb3(miphy_phy); break; default: - return -EINVAL; + ret = -EINVAL; + break; } mutex_unlock(&miphy_dev->miphy_mutex); diff --git a/drivers/phy/phy-omap-control.c b/drivers/phy/phy-omap-control.c index c96e818..efe724f 100644 --- a/drivers/phy/phy-omap-control.c +++ b/drivers/phy/phy-omap-control.c @@ -29,10 +29,9 @@ /** * omap_control_pcie_pcs - set the PCS delay count * @dev: the control module device - * @id: index of the pcie PHY (should be 1 or 2) * @delay: 8 bit delay value */ -void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) +void omap_control_pcie_pcs(struct device *dev, u8 delay) { u32 val; struct omap_control_phy *control_phy; @@ -55,8 +54,8 @@ void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) val = readl(control_phy->pcie_pcs); val &= ~(OMAP_CTRL_PCIE_PCS_MASK << - (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT)); - val |= delay << (id * OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); + OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); + val |= (delay << OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT); writel(val, control_phy->pcie_pcs); } EXPORT_SYMBOL_GPL(omap_control_pcie_pcs); diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c index fb02a67..a2b08f3c 100644 --- a/drivers/phy/phy-sun4i-usb.c +++ b/drivers/phy/phy-sun4i-usb.c @@ -244,7 +244,8 @@ static int sun4i_usb_phy_probe(struct platform_device *pdev) else data->num_phys = 3; - if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy")) + if (of_device_is_compatible(np, "allwinner,sun4i-a10-usb-phy") || + of_device_is_compatible(np, "allwinner,sun6i-a31-usb-phy")) data->disc_thresh = 3; else data->disc_thresh = 2; diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c index 1387b4d..465de2c 100644 --- a/drivers/phy/phy-ti-pipe3.c +++ b/drivers/phy/phy-ti-pipe3.c @@ -82,7 +82,6 @@ struct ti_pipe3 { struct clk *refclk; struct clk *div_clk; struct pipe3_dpll_map *dpll_map; - u8 id; }; static struct pipe3_dpll_map dpll_map_usb[] = { @@ -217,8 +216,13 @@ static int ti_pipe3_init(struct phy *x) u32 val; int ret = 0; + /* + * Set pcie_pcs register to 0x96 for proper functioning of phy + * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table + * 18-1804. + */ if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) { - omap_control_pcie_pcs(phy->control_dev, phy->id, 0xF1); + omap_control_pcie_pcs(phy->control_dev, 0x96); return 0; } @@ -347,8 +351,6 @@ static int ti_pipe3_probe(struct platform_device *pdev) } if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { - if (of_property_read_u8(node, "id", &phy->id) < 0) - phy->id = 1; clk = devm_clk_get(phy->dev, "dpll_ref"); if (IS_ERR(clk)) { diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c index e4f6551..89dca77 100644 --- a/drivers/pinctrl/core.c +++ b/drivers/pinctrl/core.c @@ -1801,14 +1801,15 @@ void pinctrl_unregister(struct pinctrl_dev *pctldev) if (pctldev == NULL) return; - mutex_lock(&pinctrldev_list_mutex); mutex_lock(&pctldev->mutex); - pinctrl_remove_device_debugfs(pctldev); + mutex_unlock(&pctldev->mutex); if (!IS_ERR(pctldev->p)) pinctrl_put(pctldev->p); + mutex_lock(&pinctrldev_list_mutex); + mutex_lock(&pctldev->mutex); /* TODO: check that no pinmuxes are still active? */ list_del(&pctldev->node); /* Destroy descriptor tree */ diff --git a/drivers/pinctrl/pinctrl-rockchip.c b/drivers/pinctrl/pinctrl-rockchip.c index ba74f0a..43eacc9 100644 --- a/drivers/pinctrl/pinctrl-rockchip.c +++ b/drivers/pinctrl/pinctrl-rockchip.c @@ -89,6 +89,7 @@ struct rockchip_iomux { * @reg_pull: optional separate register for additional pull settings * @clk: clock of the gpio bank * @irq: interrupt of the gpio bank + * @saved_enables: Saved content of GPIO_INTEN at suspend time. * @pin_base: first pin number * @nr_pins: number of pins in this bank * @name: name of the bank @@ -107,6 +108,7 @@ struct rockchip_pin_bank { struct regmap *regmap_pull; struct clk *clk; int irq; + u32 saved_enables; u32 pin_base; u8 nr_pins; char *name; @@ -1396,10 +1398,7 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_get_chip(irq); struct rockchip_pin_bank *bank = irq_get_handler_data(irq); - u32 polarity = 0, data = 0; u32 pend; - bool edge_changed = false; - unsigned long flags; dev_dbg(bank->drvdata->dev, "got irq for bank %s\n", bank->name); @@ -1407,12 +1406,6 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) pend = readl_relaxed(bank->reg_base + GPIO_INT_STATUS); - if (bank->toggle_edge_mode) { - polarity = readl_relaxed(bank->reg_base + - GPIO_INT_POLARITY); - data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT); - } - while (pend) { unsigned int virq; @@ -1432,27 +1425,31 @@ static void rockchip_irq_demux(unsigned int irq, struct irq_desc *desc) * needs manual intervention. */ if (bank->toggle_edge_mode & BIT(irq)) { - if (data & BIT(irq)) - polarity &= ~BIT(irq); - else - polarity |= BIT(irq); + u32 data, data_old, polarity; + unsigned long flags; - edge_changed = true; - } + data = readl_relaxed(bank->reg_base + GPIO_EXT_PORT); + do { + spin_lock_irqsave(&bank->slock, flags); - generic_handle_irq(virq); - } + polarity = readl_relaxed(bank->reg_base + + GPIO_INT_POLARITY); + if (data & BIT(irq)) + polarity &= ~BIT(irq); + else + polarity |= BIT(irq); + writel(polarity, + bank->reg_base + GPIO_INT_POLARITY); - if (bank->toggle_edge_mode && edge_changed) { - /* Interrupt params should only be set with ints disabled */ - spin_lock_irqsave(&bank->slock, flags); + spin_unlock_irqrestore(&bank->slock, flags); - data = readl_relaxed(bank->reg_base + GPIO_INTEN); - writel_relaxed(0, bank->reg_base + GPIO_INTEN); - writel(polarity, bank->reg_base + GPIO_INT_POLARITY); - writel(data, bank->reg_base + GPIO_INTEN); + data_old = data; + data = readl_relaxed(bank->reg_base + + GPIO_EXT_PORT); + } while ((data & BIT(irq)) != (data_old & BIT(irq))); + } - spin_unlock_irqrestore(&bank->slock, flags); + generic_handle_irq(virq); } chained_irq_exit(chip, desc); @@ -1543,6 +1540,51 @@ static int rockchip_irq_set_type(struct irq_data *d, unsigned int type) return 0; } +static void rockchip_irq_suspend(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct rockchip_pin_bank *bank = gc->private; + + bank->saved_enables = irq_reg_readl(gc, GPIO_INTEN); + irq_reg_writel(gc, gc->wake_active, GPIO_INTEN); +} + +static void rockchip_irq_resume(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + struct rockchip_pin_bank *bank = gc->private; + + irq_reg_writel(gc, bank->saved_enables, GPIO_INTEN); +} + +static void rockchip_irq_disable(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + u32 val; + + irq_gc_lock(gc); + + val = irq_reg_readl(gc, GPIO_INTEN); + val &= ~d->mask; + irq_reg_writel(gc, val, GPIO_INTEN); + + irq_gc_unlock(gc); +} + +static void rockchip_irq_enable(struct irq_data *d) +{ + struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); + u32 val; + + irq_gc_lock(gc); + + val = irq_reg_readl(gc, GPIO_INTEN); + val |= d->mask; + irq_reg_writel(gc, val, GPIO_INTEN); + + irq_gc_unlock(gc); +} + static int rockchip_interrupts_register(struct platform_device *pdev, struct rockchip_pinctrl *info) { @@ -1581,12 +1623,16 @@ static int rockchip_interrupts_register(struct platform_device *pdev, gc = irq_get_domain_generic_chip(bank->domain, 0); gc->reg_base = bank->reg_base; gc->private = bank; - gc->chip_types[0].regs.mask = GPIO_INTEN; + gc->chip_types[0].regs.mask = GPIO_INTMASK; gc->chip_types[0].regs.ack = GPIO_PORTS_EOI; gc->chip_types[0].chip.irq_ack = irq_gc_ack_set_bit; - gc->chip_types[0].chip.irq_mask = irq_gc_mask_clr_bit; - gc->chip_types[0].chip.irq_unmask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; + gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; + gc->chip_types[0].chip.irq_enable = rockchip_irq_enable; + gc->chip_types[0].chip.irq_disable = rockchip_irq_disable; gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; + gc->chip_types[0].chip.irq_suspend = rockchip_irq_suspend; + gc->chip_types[0].chip.irq_resume = rockchip_irq_resume; gc->chip_types[0].chip.irq_set_type = rockchip_irq_set_type; gc->wake_enabled = IRQ_MSK(bank->nr_pins); diff --git a/drivers/pinctrl/pinctrl-st.c b/drivers/pinctrl/pinctrl-st.c index 7c9d513..9e5ec00 100644 --- a/drivers/pinctrl/pinctrl-st.c +++ b/drivers/pinctrl/pinctrl-st.c @@ -1012,8 +1012,10 @@ static void st_pinconf_dbg_show(struct pinctrl_dev *pctldev, struct seq_file *s, unsigned pin_id) { unsigned long config; - st_pinconf_get(pctldev, pin_id, &config); + mutex_unlock(&pctldev->mutex); + st_pinconf_get(pctldev, pin_id, &config); + mutex_lock(&pctldev->mutex); seq_printf(s, "[OE:%ld,PU:%ld,OD:%ld]\n" "\t\t[retime:%ld,invclk:%ld,clknotdat:%ld," "de:%ld,rt-clk:%ld,rt-delay:%ld]", @@ -1443,6 +1445,7 @@ static struct gpio_chip st_gpio_template = { static struct irq_chip st_gpio_irqchip = { .name = "GPIO", + .irq_disable = st_gpio_irq_mask, .irq_mask = st_gpio_irq_mask, .irq_unmask = st_gpio_irq_unmask, .irq_set_type = st_gpio_irq_set_type, diff --git a/drivers/pinctrl/pinctrl-xway.c b/drivers/pinctrl/pinctrl-xway.c index c5cef59..779950c 100644 --- a/drivers/pinctrl/pinctrl-xway.c +++ b/drivers/pinctrl/pinctrl-xway.c @@ -798,10 +798,8 @@ static int pinmux_xway_probe(struct platform_device *pdev) /* load the gpio chip */ xway_chip.dev = &pdev->dev; - of_gpiochip_add(&xway_chip); ret = gpiochip_add(&xway_chip); if (ret) { - of_gpiochip_remove(&xway_chip); dev_err(&pdev->dev, "Failed to register gpio chip\n"); return ret; } diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c index e730935..ed7017d 100644 --- a/drivers/pinctrl/qcom/pinctrl-msm.c +++ b/drivers/pinctrl/qcom/pinctrl-msm.c @@ -865,10 +865,10 @@ static int msm_ps_hold_restart(struct notifier_block *nb, unsigned long action, static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl) { - int i = 0; + int i; const struct msm_function *func = pctrl->soc->functions; - for (; i <= pctrl->soc->nfunctions; i++) + for (i = 0; i < pctrl->soc->nfunctions; i++) if (!strcmp(func[i].name, "ps_hold")) { pctrl->restart_nb.notifier_call = msm_ps_hold_restart; pctrl->restart_nb.priority = 128; diff --git a/drivers/reset/reset-sunxi.c b/drivers/reset/reset-sunxi.c index eebc52c..3d95c87 100644 --- a/drivers/reset/reset-sunxi.c +++ b/drivers/reset/reset-sunxi.c @@ -102,6 +102,8 @@ static int sunxi_reset_init(struct device_node *np) goto err_alloc; } + spin_lock_init(&data->lock); + data->rcdev.owner = THIS_MODULE; data->rcdev.nr_resets = size * 32; data->rcdev.ops = &sunxi_reset_ops; @@ -157,6 +159,8 @@ static int sunxi_reset_probe(struct platform_device *pdev) if (IS_ERR(data->membase)) return PTR_ERR(data->membase); + spin_lock_init(&data->lock); + data->rcdev.owner = THIS_MODULE; data->rcdev.nr_resets = resource_size(res) * 32; data->rcdev.ops = &sunxi_reset_ops; diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 91e97ec..4d41bf7 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c @@ -1163,9 +1163,13 @@ static inline int ap_test_config_card_id(unsigned int id) */ static inline int ap_test_config_domain(unsigned int domain) { - if (!ap_configuration) - return 1; - return ap_test_config(ap_configuration->aqm, domain); + if (!ap_configuration) /* QCI not supported */ + if (domain < 16) + return 1; /* then domains 0...15 are configured */ + else + return 0; + else + return ap_test_config(ap_configuration->aqm, domain); } /** diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c index 12ca291..cce1cbc 100644 --- a/drivers/scsi/qla2xxx/qla_os.c +++ b/drivers/scsi/qla2xxx/qla_os.c @@ -734,7 +734,9 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd) * Return target busy if we've received a non-zero retry_delay_timer * in a FCP_RSP. */ - if (time_after(jiffies, fcport->retry_delay_timestamp)) + if (fcport->retry_delay_timestamp == 0) { + /* retry delay not set */ + } else if (time_after(jiffies, fcport->retry_delay_timestamp)) fcport->retry_delay_timestamp = 0; else goto qc24_target_busy; diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c index 9ea95dd..6d5c0b8 100644 --- a/drivers/scsi/scsi_lib.c +++ b/drivers/scsi/scsi_lib.c @@ -591,7 +591,6 @@ static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) { struct scatterlist *first_chunk = NULL; - gfp_t gfp_mask = mq ? GFP_NOIO : GFP_ATOMIC; int ret; BUG_ON(!nents); @@ -606,7 +605,7 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq) } ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS, - first_chunk, gfp_mask, scsi_sg_alloc); + first_chunk, GFP_ATOMIC, scsi_sg_alloc); if (unlikely(ret)) scsi_free_sgtable(sdb, mq); return ret; diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c index 86c72ba..f8c5fc3 100644 --- a/drivers/staging/vt6655/baseband.c +++ b/drivers/staging/vt6655/baseband.c @@ -2177,7 +2177,7 @@ bool BBbVT3253Init(struct vnt_private *priv) /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */ /*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/ /* Select VC1/VC2, CR215 = 0x02->0x06 */ - bResult &= BBbWriteEmbedded(dwIoBase, 0xd7, 0x06); + bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06); /* }} */ for (ii = 0; ii < CB_VT3253B0_AGC; ii++) diff --git a/drivers/staging/vt6655/channel.c b/drivers/staging/vt6655/channel.c index c8f739d..70f8705 100644 --- a/drivers/staging/vt6655/channel.c +++ b/drivers/staging/vt6655/channel.c @@ -182,6 +182,14 @@ bool set_channel(void *pDeviceHandler, unsigned int uConnectionChannel) if (pDevice->byCurrentCh == uConnectionChannel) return bResult; + /* Set VGA to max sensitivity */ + if (pDevice->bUpdateBBVGA && + pDevice->byBBVGACurrent != pDevice->abyBBVGA[0]) { + pDevice->byBBVGACurrent = pDevice->abyBBVGA[0]; + + BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); + } + /* clear NAV */ MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MACCR, MACCR_CLRNAV); diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c index 83e4162..cd1a277 100644 --- a/drivers/staging/vt6655/device_main.c +++ b/drivers/staging/vt6655/device_main.c @@ -1232,7 +1232,7 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) head_td = priv->apCurrTD[dma_idx]; - head_td->m_td1TD1.byTCR = (TCR_EDP|TCR_STP); + head_td->m_td1TD1.byTCR = 0; head_td->pTDInfo->skb = skb; @@ -1257,6 +1257,11 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb) priv->bPWBitOn = false; + /* Set TSR1 & ReqCount in TxDescHead */ + head_td->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); + head_td->m_td1TD1.wReqCount = + cpu_to_le16((u16)head_td->pTDInfo->dwReqCount); + head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB; if (dma_idx == TYPE_AC0DMA) @@ -1500,9 +1505,11 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw, if (conf->enable_beacon) { vnt_beacon_enable(priv, vif, conf); - MACvRegBitsOn(priv, MAC_REG_TCR, TCR_AUTOBCNTX); + MACvRegBitsOn(priv->PortOffset, MAC_REG_TCR, + TCR_AUTOBCNTX); } else { - MACvRegBitsOff(priv, MAC_REG_TCR, TCR_AUTOBCNTX); + MACvRegBitsOff(priv->PortOffset, MAC_REG_TCR, + TCR_AUTOBCNTX); } } diff --git a/drivers/staging/vt6655/rxtx.c b/drivers/staging/vt6655/rxtx.c index 61c39dd..b5b0155 100644 --- a/drivers/staging/vt6655/rxtx.c +++ b/drivers/staging/vt6655/rxtx.c @@ -1204,13 +1204,10 @@ s_cbFillTxBufHead(struct vnt_private *pDevice, unsigned char byPktType, ptdCurr = (PSTxDesc)pHeadTD; - ptdCurr->pTDInfo->dwReqCount = cbReqCount - uPadding; + ptdCurr->pTDInfo->dwReqCount = cbReqCount; ptdCurr->pTDInfo->dwHeaderLength = cbHeaderLength; ptdCurr->pTDInfo->skb_dma = ptdCurr->pTDInfo->buf_dma; ptdCurr->buff_addr = cpu_to_le32(ptdCurr->pTDInfo->skb_dma); - /* Set TSR1 & ReqCount in TxDescHead */ - ptdCurr->m_td1TD1.byTCR |= (TCR_STP | TCR_EDP | EDMSDU); - ptdCurr->m_td1TD1.wReqCount = cpu_to_le16((unsigned short)(cbReqCount)); return cbHeaderLength; } diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 55f6774..aebde32 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -2027,10 +2027,10 @@ iscsit_process_text_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd, goto reject; } if (!strncmp("=All", text_ptr, 4)) { - cmd->cmd_flags |= IFC_SENDTARGETS_ALL; + cmd->cmd_flags |= ICF_SENDTARGETS_ALL; } else if (!strncmp("=iqn.", text_ptr, 5) || !strncmp("=eui.", text_ptr, 5)) { - cmd->cmd_flags |= IFC_SENDTARGETS_SINGLE; + cmd->cmd_flags |= ICF_SENDTARGETS_SINGLE; } else { pr_err("Unable to locate valid SendTargets=%s value\n", text_ptr); goto reject; @@ -3415,10 +3415,10 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, return -ENOMEM; } /* - * Locate pointer to iqn./eui. string for IFC_SENDTARGETS_SINGLE + * Locate pointer to iqn./eui. string for ICF_SENDTARGETS_SINGLE * explicit case.. */ - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) { + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) { text_ptr = strchr(text_in, '='); if (!text_ptr) { pr_err("Unable to locate '=' string in text_in:" @@ -3434,7 +3434,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd, spin_lock(&tiqn_lock); list_for_each_entry(tiqn, &g_tiqn_list, tiqn_list) { - if ((cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) && + if ((cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) && strcmp(tiqn->tiqn, text_ptr)) { continue; } @@ -3512,7 +3512,7 @@ eob: if (end_of_buf) break; - if (cmd->cmd_flags & IFC_SENDTARGETS_SINGLE) + if (cmd->cmd_flags & ICF_SENDTARGETS_SINGLE) break; } spin_unlock(&tiqn_lock); diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 09a522b..cbcff38 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -135,8 +135,8 @@ enum cmd_flags_table { ICF_CONTIG_MEMORY = 0x00000020, ICF_ATTACHED_TO_RQUEUE = 0x00000040, ICF_OOO_CMDSN = 0x00000080, - IFC_SENDTARGETS_ALL = 0x00000100, - IFC_SENDTARGETS_SINGLE = 0x00000200, + ICF_SENDTARGETS_ALL = 0x00000100, + ICF_SENDTARGETS_SINGLE = 0x00000200, }; /* struct iscsi_cmd->i_state */ diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index 7653cfb..58f49ff 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -1103,51 +1103,6 @@ int se_dev_set_queue_depth(struct se_device *dev, u32 queue_depth) } EXPORT_SYMBOL(se_dev_set_queue_depth); -int se_dev_set_fabric_max_sectors(struct se_device *dev, u32 fabric_max_sectors) -{ - int block_size = dev->dev_attrib.block_size; - - if (dev->export_count) { - pr_err("dev[%p]: Unable to change SE Device" - " fabric_max_sectors while export_count is %d\n", - dev, dev->export_count); - return -EINVAL; - } - if (!fabric_max_sectors) { - pr_err("dev[%p]: Illegal ZERO value for" - " fabric_max_sectors\n", dev); - return -EINVAL; - } - if (fabric_max_sectors < DA_STATUS_MAX_SECTORS_MIN) { - pr_err("dev[%p]: Passed fabric_max_sectors: %u less than" - " DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, fabric_max_sectors, - DA_STATUS_MAX_SECTORS_MIN); - return -EINVAL; - } - if (fabric_max_sectors > DA_STATUS_MAX_SECTORS_MAX) { - pr_err("dev[%p]: Passed fabric_max_sectors: %u" - " greater than DA_STATUS_MAX_SECTORS_MAX:" - " %u\n", dev, fabric_max_sectors, - DA_STATUS_MAX_SECTORS_MAX); - return -EINVAL; - } - /* - * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() - */ - if (!block_size) { - block_size = 512; - pr_warn("Defaulting to 512 for zero block_size\n"); - } - fabric_max_sectors = se_dev_align_max_sectors(fabric_max_sectors, - block_size); - - dev->dev_attrib.fabric_max_sectors = fabric_max_sectors; - pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", - dev, fabric_max_sectors); - return 0; -} -EXPORT_SYMBOL(se_dev_set_fabric_max_sectors); - int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) { if (dev->export_count) { @@ -1156,10 +1111,10 @@ int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors) dev, dev->export_count); return -EINVAL; } - if (optimal_sectors > dev->dev_attrib.fabric_max_sectors) { + if (optimal_sectors > dev->dev_attrib.hw_max_sectors) { pr_err("dev[%p]: Passed optimal_sectors %u cannot be" - " greater than fabric_max_sectors: %u\n", dev, - optimal_sectors, dev->dev_attrib.fabric_max_sectors); + " greater than hw_max_sectors: %u\n", dev, + optimal_sectors, dev->dev_attrib.hw_max_sectors); return -EINVAL; } @@ -1553,8 +1508,6 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name) dev->dev_attrib.unmap_granularity_alignment = DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT; dev->dev_attrib.max_write_same_len = DA_MAX_WRITE_SAME_LEN; - dev->dev_attrib.fabric_max_sectors = DA_FABRIC_MAX_SECTORS; - dev->dev_attrib.optimal_sectors = DA_FABRIC_MAX_SECTORS; xcopy_lun = &dev->xcopy_lun; xcopy_lun->lun_se_dev = dev; @@ -1595,6 +1548,7 @@ int target_configure_device(struct se_device *dev) dev->dev_attrib.hw_max_sectors = se_dev_align_max_sectors(dev->dev_attrib.hw_max_sectors, dev->dev_attrib.hw_block_size); + dev->dev_attrib.optimal_sectors = dev->dev_attrib.hw_max_sectors; dev->dev_index = scsi_get_new_index(SCSI_DEVICE_INDEX); dev->creation_time = get_jiffies_64(); diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index c2aea09..d836de2 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -621,7 +621,16 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents, struct fd_prot fd_prot; sense_reason_t rc; int ret = 0; - + /* + * We are currently limited by the number of iovecs (2048) per + * single vfs_[writev,readv] call. + */ + if (cmd->data_length > FD_MAX_BYTES) { + pr_err("FILEIO: Not able to process I/O of %u bytes due to" + "FD_MAX_BYTES: %u iovec count limitiation\n", + cmd->data_length, FD_MAX_BYTES); + return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + } /* * Call vectorized fileio functions to map struct scatterlist * physical memory addresses to struct iovec virtual memory. @@ -959,7 +968,6 @@ static struct configfs_attribute *fileio_backend_dev_attrs[] = { &fileio_dev_attrib_hw_block_size.attr, &fileio_dev_attrib_block_size.attr, &fileio_dev_attrib_hw_max_sectors.attr, - &fileio_dev_attrib_fabric_max_sectors.attr, &fileio_dev_attrib_optimal_sectors.attr, &fileio_dev_attrib_hw_queue_depth.attr, &fileio_dev_attrib_queue_depth.attr, diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 3efff94..78346b8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -124,7 +124,7 @@ static int iblock_configure_device(struct se_device *dev) q = bdev_get_queue(bd); dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd); - dev->dev_attrib.hw_max_sectors = UINT_MAX; + dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q); dev->dev_attrib.hw_queue_depth = q->nr_requests; /* @@ -883,7 +883,6 @@ static struct configfs_attribute *iblock_backend_dev_attrs[] = { &iblock_dev_attrib_hw_block_size.attr, &iblock_dev_attrib_block_size.attr, &iblock_dev_attrib_hw_max_sectors.attr, - &iblock_dev_attrib_fabric_max_sectors.attr, &iblock_dev_attrib_optimal_sectors.attr, &iblock_dev_attrib_hw_queue_depth.attr, &iblock_dev_attrib_queue_depth.attr, diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index d56f2aa..283cf78 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -528,6 +528,18 @@ static int core_scsi3_pr_seq_non_holder( return 0; } + } else if (we && registered_nexus) { + /* + * Reads are allowed for Write Exclusive locks + * from all registrants. + */ + if (cmd->data_direction == DMA_FROM_DEVICE) { + pr_debug("Allowing READ CDB: 0x%02x for %s" + " reservation\n", cdb[0], + core_scsi3_pr_dump_type(pr_reg_type)); + + return 0; + } } pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x" " for %s reservation\n", transport_dump_cmd_direction(cmd), diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 60ebd17..98e83ac 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -657,7 +657,6 @@ static struct configfs_attribute *rd_mcp_backend_dev_attrs[] = { &rd_mcp_dev_attrib_hw_block_size.attr, &rd_mcp_dev_attrib_block_size.attr, &rd_mcp_dev_attrib_hw_max_sectors.attr, - &rd_mcp_dev_attrib_fabric_max_sectors.attr, &rd_mcp_dev_attrib_optimal_sectors.attr, &rd_mcp_dev_attrib_hw_queue_depth.attr, &rd_mcp_dev_attrib_queue_depth.attr, diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 11bea19..cd4bed7 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -953,21 +953,6 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops) if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { unsigned long long end_lba; - - if (sectors > dev->dev_attrib.fabric_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds fabric_max_sectors:" - " %u\n", cdb[0], sectors, - dev->dev_attrib.fabric_max_sectors); - return TCM_INVALID_CDB_FIELD; - } - if (sectors > dev->dev_attrib.hw_max_sectors) { - printk_ratelimited(KERN_ERR "SCSI OP %02xh with too" - " big sectors %u exceeds backend hw_max_sectors:" - " %u\n", cdb[0], sectors, - dev->dev_attrib.hw_max_sectors); - return TCM_INVALID_CDB_FIELD; - } check_lba: end_lba = dev->transport->get_blocks(dev) + 1; if (cmd->t_task_lba + sectors > end_lba) { diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c index 1307600..4c71657 100644 --- a/drivers/target/target_core_spc.c +++ b/drivers/target/target_core_spc.c @@ -505,7 +505,6 @@ static sense_reason_t spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) { struct se_device *dev = cmd->se_dev; - u32 max_sectors; int have_tp = 0; int opt, min; @@ -539,9 +538,7 @@ spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) /* * Set MAXIMUM TRANSFER LENGTH */ - max_sectors = min(dev->dev_attrib.fabric_max_sectors, - dev->dev_attrib.hw_max_sectors); - put_unaligned_be32(max_sectors, &buf[8]); + put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); /* * Set OPTIMAL TRANSFER LENGTH diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c index 8bfa61c..1157b55 100644 --- a/drivers/target/target_core_user.c +++ b/drivers/target/target_core_user.c @@ -1118,7 +1118,6 @@ static struct configfs_attribute *tcmu_backend_dev_attrs[] = { &tcmu_dev_attrib_hw_block_size.attr, &tcmu_dev_attrib_block_size.attr, &tcmu_dev_attrib_hw_max_sectors.attr, - &tcmu_dev_attrib_fabric_max_sectors.attr, &tcmu_dev_attrib_optimal_sectors.attr, &tcmu_dev_attrib_hw_queue_depth.attr, &tcmu_dev_attrib_queue_depth.attr, diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c index c1188ac..2ccbc07 100644 --- a/drivers/thermal/imx_thermal.c +++ b/drivers/thermal/imx_thermal.c @@ -608,6 +608,7 @@ static int imx_thermal_suspend(struct device *dev) regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_MEASURE_TEMP); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_POWER_DOWN); data->mode = THERMAL_DEVICE_DISABLED; + clk_disable_unprepare(data->thermal_clk); return 0; } @@ -617,6 +618,7 @@ static int imx_thermal_resume(struct device *dev) struct imx_thermal_data *data = dev_get_drvdata(dev); struct regmap *map = data->tempmon; + clk_prepare_enable(data->thermal_clk); /* Enabled thermal sensor after resume */ regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c index 231cabc..2c2ec76 100644 --- a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c +++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c @@ -119,15 +119,11 @@ int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp, continue; result = acpi_bus_get_device(trt->source, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get source ACPI device\n"); result = acpi_bus_get_device(trt->target, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get target ACPI device\n"); } @@ -206,16 +202,12 @@ int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp, if (art->source) { result = acpi_bus_get_device(art->source, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get source ACPI device\n"); } if (art->target) { result = acpi_bus_get_device(art->target, &adev); - if (!result) - acpi_create_platform_device(adev); - else + if (result) pr_warn("Failed to get source ACPI device\n"); } } diff --git a/drivers/thermal/int340x_thermal/processor_thermal_device.c b/drivers/thermal/int340x_thermal/processor_thermal_device.c index 31bb553..0fe5dbb 100644 --- a/drivers/thermal/int340x_thermal/processor_thermal_device.c +++ b/drivers/thermal/int340x_thermal/processor_thermal_device.c @@ -130,6 +130,8 @@ static int proc_thermal_add(struct device *dev, int ret; adev = ACPI_COMPANION(dev); + if (!adev) + return -ENODEV; status = acpi_evaluate_object(adev->handle, "PPCC", NULL, &buf); if (ACPI_FAILURE(status)) diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c index e145b66..d717f3d 100644 --- a/drivers/thermal/of-thermal.c +++ b/drivers/thermal/of-thermal.c @@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(of_thermal_is_trip_valid); * * Return: pointer to trip points table, NULL otherwise */ -const struct thermal_trip * const +const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *tz) { struct __thermal_zone *data = tz->devdata; diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c index 8803e69..2580a48 100644 --- a/drivers/thermal/rcar_thermal.c +++ b/drivers/thermal/rcar_thermal.c @@ -63,7 +63,7 @@ struct rcar_thermal_priv { struct mutex lock; struct list_head list; int id; - int ctemp; + u32 ctemp; }; #define rcar_thermal_for_each_priv(pos, common) \ @@ -145,7 +145,7 @@ static int rcar_thermal_update_temp(struct rcar_thermal_priv *priv) { struct device *dev = rcar_priv_to_dev(priv); int i; - int ctemp, old, new; + u32 ctemp, old, new; int ret = -EINVAL; mutex_lock(&priv->lock); @@ -372,6 +372,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) int i; int ret = -ENODEV; int idle = IDLE_INTERVAL; + u32 enr_bits = 0; common = devm_kzalloc(dev, sizeof(*common), GFP_KERNEL); if (!common) @@ -390,7 +391,7 @@ static int rcar_thermal_probe(struct platform_device *pdev) /* * platform has IRQ support. - * Then, drier use common register + * Then, driver uses common registers */ ret = devm_request_irq(dev, irq->start, rcar_thermal_irq, 0, @@ -408,9 +409,6 @@ static int rcar_thermal_probe(struct platform_device *pdev) if (IS_ERR(common->base)) return PTR_ERR(common->base); - /* enable temperature comparation */ - rcar_thermal_common_write(common, ENR, 0x00030303); - idle = 0; /* polling delay is not needed */ } @@ -452,8 +450,15 @@ static int rcar_thermal_probe(struct platform_device *pdev) rcar_thermal_irq_enable(priv); list_move_tail(&priv->list, &common->head); + + /* update ENR bits */ + enr_bits |= 3 << (i * 8); } + /* enable temperature comparation */ + if (irq) + rcar_thermal_common_write(common, ENR, enr_bits); + platform_set_drvdata(pdev, common); dev_info(dev, "%d sensor probed\n", i); diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h index 9083e75..0531c75 100644 --- a/drivers/thermal/thermal_core.h +++ b/drivers/thermal/thermal_core.h @@ -91,7 +91,7 @@ int of_parse_thermal_zones(void); void of_thermal_destroy_zones(void); int of_thermal_get_ntrips(struct thermal_zone_device *); bool of_thermal_is_trip_valid(struct thermal_zone_device *, int); -const struct thermal_trip * const +const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *); #else static inline int of_parse_thermal_zones(void) { return 0; } @@ -105,7 +105,7 @@ static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz, { return 0; } -static inline const struct thermal_trip * const +static inline const struct thermal_trip * of_thermal_get_trip_points(struct thermal_zone_device *tz) { return NULL; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index d2b4967..4ddfa60 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -2399,17 +2399,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file, poll_wait(file, &tty->read_wait, wait); poll_wait(file, &tty->write_wait, wait); - if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) - mask |= POLLHUP; if (input_available_p(tty, 1)) mask |= POLLIN | POLLRDNORM; - else if (mask & POLLHUP) { - tty_flush_to_ldisc(tty); - if (input_available_p(tty, 1)) - mask |= POLLIN | POLLRDNORM; - } if (tty->packet && tty->link->ctrl_status) mask |= POLLPRI | POLLIN | POLLRDNORM; + if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) + mask |= POLLHUP; if (tty_hung_up_p(file)) mask |= POLLHUP; if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c index 31feeb2..d1f8dc6 100644 --- a/drivers/tty/serial/8250/8250_pci.c +++ b/drivers/tty/serial/8250/8250_pci.c @@ -1815,7 +1815,7 @@ pci_wch_ch353_setup(struct serial_private *priv, } static int -pci_wch_ch382_setup(struct serial_private *priv, +pci_wch_ch38x_setup(struct serial_private *priv, const struct pciserial_board *board, struct uart_8250_port *port, int idx) { @@ -1880,6 +1880,7 @@ pci_wch_ch382_setup(struct serial_private *priv, #define PCIE_VENDOR_ID_WCH 0x1c00 #define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250 +#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470 /* Unknown vendors/cards - this should not be in linux/pci_ids.h */ #define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584 @@ -2571,13 +2572,21 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = { .subdevice = PCI_ANY_ID, .setup = pci_wch_ch353_setup, }, - /* WCH CH382 2S1P card (16750 clone) */ + /* WCH CH382 2S1P card (16850 clone) */ { .vendor = PCIE_VENDOR_ID_WCH, .device = PCIE_DEVICE_ID_WCH_CH382_2S1P, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, - .setup = pci_wch_ch382_setup, + .setup = pci_wch_ch38x_setup, + }, + /* WCH CH384 4S card (16850 clone) */ + { + .vendor = PCIE_VENDOR_ID_WCH, + .device = PCIE_DEVICE_ID_WCH_CH384_4S, + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + .setup = pci_wch_ch38x_setup, }, /* * ASIX devices with FIFO bug @@ -2876,6 +2885,7 @@ enum pci_board_num_t { pbn_fintek_4, pbn_fintek_8, pbn_fintek_12, + pbn_wch384_4, }; /* @@ -3675,6 +3685,14 @@ static struct pciserial_board pci_boards[] = { .base_baud = 115200, .first_offset = 0x40, }, + + [pbn_wch384_4] = { + .flags = FL_BASE0, + .num_ports = 4, + .base_baud = 115200, + .uart_offset = 8, + .first_offset = 0xC0, + }, }; static const struct pci_device_id blacklist[] = { @@ -3687,6 +3705,7 @@ static const struct pci_device_id blacklist[] = { { PCI_DEVICE(0x4348, 0x7053), }, /* WCH CH353 2S1P */ { PCI_DEVICE(0x4348, 0x5053), }, /* WCH CH353 1S1P */ { PCI_DEVICE(0x1c00, 0x3250), }, /* WCH CH382 2S1P */ + { PCI_DEVICE(0x1c00, 0x3470), }, /* WCH CH384 4S */ }; /* @@ -5400,6 +5419,10 @@ static struct pci_device_id serial_pci_tbl[] = { PCI_ANY_ID, PCI_ANY_ID, 0, 0, pbn_b0_bt_2_115200 }, + { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S, + PCI_ANY_ID, PCI_ANY_ID, + 0, 0, pbn_wch384_4 }, + /* * Commtech, Inc. Fastcom adapters */ diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c index 19273e3..107e807 100644 --- a/drivers/tty/serial/samsung.c +++ b/drivers/tty/serial/samsung.c @@ -1757,32 +1757,43 @@ static struct s3c24xx_serial_drv_data s5pv210_serial_drv_data = { #endif #if defined(CONFIG_ARCH_EXYNOS) +#define EXYNOS_COMMON_SERIAL_DRV_DATA \ + .info = &(struct s3c24xx_uart_info) { \ + .name = "Samsung Exynos UART", \ + .type = PORT_S3C6400, \ + .has_divslot = 1, \ + .rx_fifomask = S5PV210_UFSTAT_RXMASK, \ + .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, \ + .rx_fifofull = S5PV210_UFSTAT_RXFULL, \ + .tx_fifofull = S5PV210_UFSTAT_TXFULL, \ + .tx_fifomask = S5PV210_UFSTAT_TXMASK, \ + .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, \ + .def_clk_sel = S3C2410_UCON_CLKSEL0, \ + .num_clks = 1, \ + .clksel_mask = 0, \ + .clksel_shift = 0, \ + }, \ + .def_cfg = &(struct s3c2410_uartcfg) { \ + .ucon = S5PV210_UCON_DEFAULT, \ + .ufcon = S5PV210_UFCON_DEFAULT, \ + .has_fracval = 1, \ + } \ + static struct s3c24xx_serial_drv_data exynos4210_serial_drv_data = { - .info = &(struct s3c24xx_uart_info) { - .name = "Samsung Exynos4 UART", - .type = PORT_S3C6400, - .has_divslot = 1, - .rx_fifomask = S5PV210_UFSTAT_RXMASK, - .rx_fifoshift = S5PV210_UFSTAT_RXSHIFT, - .rx_fifofull = S5PV210_UFSTAT_RXFULL, - .tx_fifofull = S5PV210_UFSTAT_TXFULL, - .tx_fifomask = S5PV210_UFSTAT_TXMASK, - .tx_fifoshift = S5PV210_UFSTAT_TXSHIFT, - .def_clk_sel = S3C2410_UCON_CLKSEL0, - .num_clks = 1, - .clksel_mask = 0, - .clksel_shift = 0, - }, - .def_cfg = &(struct s3c2410_uartcfg) { - .ucon = S5PV210_UCON_DEFAULT, - .ufcon = S5PV210_UFCON_DEFAULT, - .has_fracval = 1, - }, + EXYNOS_COMMON_SERIAL_DRV_DATA, .fifosize = { 256, 64, 16, 16 }, }; + +static struct s3c24xx_serial_drv_data exynos5433_serial_drv_data = { + EXYNOS_COMMON_SERIAL_DRV_DATA, + .fifosize = { 64, 256, 16, 256 }, +}; + #define EXYNOS4210_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos4210_serial_drv_data) +#define EXYNOS5433_SERIAL_DRV_DATA ((kernel_ulong_t)&exynos5433_serial_drv_data) #else #define EXYNOS4210_SERIAL_DRV_DATA (kernel_ulong_t)NULL +#define EXYNOS5433_SERIAL_DRV_DATA (kernel_ulong_t)NULL #endif static struct platform_device_id s3c24xx_serial_driver_ids[] = { @@ -1804,6 +1815,9 @@ static struct platform_device_id s3c24xx_serial_driver_ids[] = { }, { .name = "exynos4210-uart", .driver_data = EXYNOS4210_SERIAL_DRV_DATA, + }, { + .name = "exynos5433-uart", + .driver_data = EXYNOS5433_SERIAL_DRV_DATA, }, { }, }; @@ -1823,6 +1837,8 @@ static const struct of_device_id s3c24xx_uart_dt_match[] = { .data = (void *)S5PV210_SERIAL_DRV_DATA }, { .compatible = "samsung,exynos4210-uart", .data = (void *)EXYNOS4210_SERIAL_DRV_DATA }, + { .compatible = "samsung,exynos5433-uart", + .data = (void *)EXYNOS5433_SERIAL_DRV_DATA }, {}, }; MODULE_DEVICE_TABLE(of, s3c24xx_uart_dt_match); diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index 57ca61b..984605b 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -2164,7 +2164,9 @@ uart_report_port(struct uart_driver *drv, struct uart_port *port) break; } - dev_info(port->dev, "%s%d at %s (irq = %d, base_baud = %d) is a %s\n", + printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n", + port->dev ? dev_name(port->dev) : "", + port->dev ? ": " : "", drv->dev_name, drv->tty_driver->name_base + port->line, address, port->irq, port->uartclk / 16, uart_type(port)); diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index 4f35b43..51f066a 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -1464,6 +1464,9 @@ static int tty_reopen(struct tty_struct *tty) driver->subtype == PTY_TYPE_MASTER) return -EIO; + if (test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) + return -EBUSY; + tty->count++; WARN_ON(!tty->ldisc); @@ -2106,10 +2109,6 @@ retry_open: retval = -ENODEV; filp->f_flags = saved_flags; - if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && - !capable(CAP_SYS_ADMIN)) - retval = -EBUSY; - if (retval) { #ifdef TTY_DEBUG_HANGUP printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__, diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c index 5b9825a..a57dc88 100644 --- a/drivers/usb/chipidea/core.c +++ b/drivers/usb/chipidea/core.c @@ -669,7 +669,6 @@ static int ci_hdrc_probe(struct platform_device *pdev) if (!ci) return -ENOMEM; - platform_set_drvdata(pdev, ci); ci->dev = dev; ci->platdata = dev_get_platdata(dev); ci->imx28_write_fix = !!(ci->platdata->flags & @@ -783,6 +782,7 @@ static int ci_hdrc_probe(struct platform_device *pdev) } } + platform_set_drvdata(pdev, ci); ret = devm_request_irq(dev, ci->irq, ci_irq, IRQF_SHARED, ci->platdata->name, ci); if (ret) diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c index c1694cf..48731d0 100644 --- a/drivers/usb/chipidea/host.c +++ b/drivers/usb/chipidea/host.c @@ -91,6 +91,7 @@ static int host_start(struct ci_hdrc *ci) if (!hcd) return -ENOMEM; + dev_set_drvdata(ci->dev, ci); hcd->rsrc_start = ci->hw_bank.phys; hcd->rsrc_len = ci->hw_bank.size; hcd->regs = ci->hw_bank.abs; diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 200168e..7924200 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -2567,7 +2567,7 @@ error: * s3c_hsotg_ep_disable - disable given endpoint * @ep: The endpoint to disable. */ -static int s3c_hsotg_ep_disable(struct usb_ep *ep) +static int s3c_hsotg_ep_disable_force(struct usb_ep *ep, bool force) { struct s3c_hsotg_ep *hs_ep = our_ep(ep); struct dwc2_hsotg *hsotg = hs_ep->parent; @@ -2588,7 +2588,7 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep) spin_lock_irqsave(&hsotg->lock, flags); /* terminate all requests with shutdown */ - kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, false); + kill_all_requests(hsotg, hs_ep, -ESHUTDOWN, force); hsotg->fifo_map &= ~(1<<hs_ep->fifo_index); hs_ep->fifo_index = 0; @@ -2609,6 +2609,10 @@ static int s3c_hsotg_ep_disable(struct usb_ep *ep) return 0; } +static int s3c_hsotg_ep_disable(struct usb_ep *ep) +{ + return s3c_hsotg_ep_disable_force(ep, false); +} /** * on_list - check request is on the given endpoint * @ep: The endpoint to check. @@ -2924,7 +2928,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget) /* all endpoints should be shutdown */ for (ep = 1; ep < hsotg->num_of_eps; ep++) - s3c_hsotg_ep_disable(&hsotg->eps[ep].ep); + s3c_hsotg_ep_disable_force(&hsotg->eps[ep].ep, true); spin_lock_irqsave(&hsotg->lock, flags); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index 7c4faf7..b642a2f 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -33,6 +33,8 @@ #define PCI_DEVICE_ID_INTEL_BYT 0x0f37 #define PCI_DEVICE_ID_INTEL_MRFLD 0x119e #define PCI_DEVICE_ID_INTEL_BSW 0x22B7 +#define PCI_DEVICE_ID_INTEL_SPTLP 0x9d30 +#define PCI_DEVICE_ID_INTEL_SPTH 0xa130 struct dwc3_pci { struct device *dev; @@ -219,6 +221,8 @@ static const struct pci_device_id dwc3_pci_id_table[] = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTLP), }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SPTH), }, { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_NL_USB), }, { } /* Terminating Entry */ }; diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index f03b136..8f65ab3 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -882,8 +882,7 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) if (i == (request->num_mapped_sgs - 1) || sg_is_last(s)) { - if (list_is_last(&req->list, - &dep->request_list)) + if (list_empty(&dep->request_list)) last_one = true; chain = false; } @@ -901,6 +900,9 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting) if (last_one) break; } + + if (last_one) + break; } else { dma = req->request.dma; length = req->request.length; diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c index 6e04e30..a1bc3e3 100644 --- a/drivers/usb/gadget/function/f_hid.c +++ b/drivers/usb/gadget/function/f_hid.c @@ -399,8 +399,9 @@ static int hidg_setup(struct usb_function *f, value = __le16_to_cpu(ctrl->wValue); length = __le16_to_cpu(ctrl->wLength); - VDBG(cdev, "hid_setup crtl_request : bRequestType:0x%x bRequest:0x%x " - "Value:0x%x\n", ctrl->bRequestType, ctrl->bRequest, value); + VDBG(cdev, + "%s crtl_request : bRequestType:0x%x bRequest:0x%x Value:0x%x\n", + __func__, ctrl->bRequestType, ctrl->bRequest, value); switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index a904403..259b656 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -520,7 +520,7 @@ static void f_midi_transmit(struct f_midi *midi, struct usb_request *req) req = midi_alloc_ep_req(ep, midi->buflen); if (!req) { - ERROR(midi, "gmidi_transmit: alloc_ep_request failed\n"); + ERROR(midi, "%s: alloc_ep_request failed\n", __func__); return; } req->length = 0; diff --git a/drivers/usb/gadget/function/f_uac1.c b/drivers/usb/gadget/function/f_uac1.c index f7b2032..e971584 100644 --- a/drivers/usb/gadget/function/f_uac1.c +++ b/drivers/usb/gadget/function/f_uac1.c @@ -897,7 +897,6 @@ static void f_audio_free_inst(struct usb_function_instance *f) struct f_uac1_opts *opts; opts = container_of(f, struct f_uac1_opts, func_inst); - gaudio_cleanup(opts->card); if (opts->fn_play_alloc) kfree(opts->fn_play); if (opts->fn_cap_alloc) @@ -935,6 +934,7 @@ static void f_audio_free(struct usb_function *f) struct f_audio *audio = func_to_audio(f); struct f_uac1_opts *opts; + gaudio_cleanup(&audio->card); opts = container_of(f->fi, struct f_uac1_opts, func_inst); kfree(audio); mutex_lock(&opts->lock); diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c index c744e49..db49ec4 100644 --- a/drivers/usb/gadget/legacy/inode.c +++ b/drivers/usb/gadget/legacy/inode.c @@ -441,6 +441,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) kbuf = memdup_user(buf, len); if (IS_ERR(kbuf)) { value = PTR_ERR(kbuf); + kbuf = NULL; goto free1; } @@ -449,6 +450,7 @@ ep_write (struct file *fd, const char __user *buf, size_t len, loff_t *ptr) data->name, len, (int) value); free1: mutex_unlock(&data->lock); + kfree (kbuf); return value; } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index ce88237..9f93bed 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -716,10 +716,10 @@ static int queue_dma(struct usba_udc *udc, struct usba_ep *ep, req->using_dma = 1; req->ctrl = USBA_BF(DMA_BUF_LEN, req->req.length) | USBA_DMA_CH_EN | USBA_DMA_END_BUF_IE - | USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; + | USBA_DMA_END_BUF_EN; - if (ep->is_in) - req->ctrl |= USBA_DMA_END_BUF_EN; + if (!ep->is_in) + req->ctrl |= USBA_DMA_END_TR_EN | USBA_DMA_END_TR_IE; /* * Add this request to the queue and submit for DMA if @@ -828,7 +828,7 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct usba_ep *ep = to_usba_ep(_ep); struct usba_udc *udc = ep->udc; - struct usba_request *req = to_usba_req(_req); + struct usba_request *req; unsigned long flags; u32 status; @@ -837,6 +837,16 @@ static int usba_ep_dequeue(struct usb_ep *_ep, struct usb_request *_req) spin_lock_irqsave(&udc->lock, flags); + list_for_each_entry(req, &ep->queue, queue) { + if (&req->req == _req) + break; + } + + if (&req->req != _req) { + spin_unlock_irqrestore(&udc->lock, flags); + return -EINVAL; + } + if (req->using_dma) { /* * If this request is currently being transferred, @@ -1563,7 +1573,6 @@ static void usba_ep_irq(struct usba_udc *udc, struct usba_ep *ep) if ((epstatus & epctrl) & USBA_RX_BK_RDY) { DBG(DBG_BUS, "%s: RX data ready\n", ep->ep.name); receive_data(ep); - usba_ep_writel(ep, CLR_STA, USBA_RX_BK_RDY); } } diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c index ff67cea..d4fe8d7 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_ep.c +++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c @@ -718,10 +718,11 @@ static int ep_queue(struct bdc_ep *ep, struct bdc_req *req) struct bdc *bdc; int ret = 0; - bdc = ep->bdc; if (!req || !ep || !ep->usb_ep.desc) return -EINVAL; + bdc = ep->bdc; + req->usb_req.actual = 0; req->usb_req.status = -EINPROGRESS; req->epnum = ep->ep_num; diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index e113fd7..f9a3327 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -1581,6 +1581,10 @@ iso_stream_schedule ( else next = (now + 2 + 7) & ~0x07; /* full frame cache */ + /* If needed, initialize last_iso_frame so that this URB will be seen */ + if (ehci->isoc_count == 0) + ehci->last_iso_frame = now >> 3; + /* * Use ehci->last_iso_frame as the base. There can't be any * TDs scheduled for earlier than that. @@ -1600,11 +1604,11 @@ iso_stream_schedule ( */ now2 = (now - base) & (mod - 1); - /* Is the schedule already full? */ + /* Is the schedule about to wrap around? */ if (unlikely(!empty && start < period)) { - ehci_dbg(ehci, "iso sched full %p (%u-%u < %u mod %u)\n", + ehci_dbg(ehci, "request %p would overflow (%u-%u < %u mod %u)\n", urb, stream->next_uframe, base, period, mod); - status = -ENOSPC; + status = -EFBIG; goto fail; } @@ -1671,10 +1675,6 @@ iso_stream_schedule ( urb->start_frame = start & (mod - 1); if (!stream->highspeed) urb->start_frame >>= 3; - - /* Make sure scan_isoc() sees these */ - if (ehci->isoc_count == 0) - ehci->last_iso_frame = now >> 3; return status; fail: diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c index 19a9af1..ff9af29 100644 --- a/drivers/usb/host/ehci-tegra.c +++ b/drivers/usb/host/ehci-tegra.c @@ -451,7 +451,7 @@ static int tegra_ehci_probe(struct platform_device *pdev) u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0); if (IS_ERR(u_phy)) { - err = PTR_ERR(u_phy); + err = -EPROBE_DEFER; goto cleanup_clk_en; } hcd->usb_phy = u_phy; diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index dd483c1..ce63646 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -567,7 +567,8 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) { void __iomem *base; u32 control; - u32 fminterval; + u32 fminterval = 0; + bool no_fminterval = false; int cnt; if (!mmio_resource_enabled(pdev, 0)) @@ -577,6 +578,13 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) if (base == NULL) return; + /* + * ULi M5237 OHCI controller locks the whole system when accessing + * the OHCI_FMINTERVAL offset. + */ + if (pdev->vendor == PCI_VENDOR_ID_AL && pdev->device == 0x5237) + no_fminterval = true; + control = readl(base + OHCI_CONTROL); /* On PA-RISC, PDC can leave IR set incorrectly; ignore it there. */ @@ -615,7 +623,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) } /* software reset of the controller, preserving HcFmInterval */ - fminterval = readl(base + OHCI_FMINTERVAL); + if (!no_fminterval) + fminterval = readl(base + OHCI_FMINTERVAL); + writel(OHCI_HCR, base + OHCI_CMDSTATUS); /* reset requires max 10 us delay */ @@ -624,7 +634,9 @@ static void quirk_usb_handoff_ohci(struct pci_dev *pdev) break; udelay(1); } - writel(fminterval, base + OHCI_FMINTERVAL); + + if (!no_fminterval) + writel(fminterval, base + OHCI_FMINTERVAL); /* Now the controller is safely in SUSPEND and nothing can wake it up */ iounmap(base); diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 142b601..7f76c8a 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -82,6 +82,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) "must be suspended extra slowly", pdev->revision); } + if (pdev->device == PCI_DEVICE_ID_FRESCO_LOGIC_PDK) + xhci->quirks |= XHCI_BROKEN_STREAMS; /* Fresco Logic confirms: all revisions of this chip do not * support MSI, even though some of them claim to in their PCI * capabilities. diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 01fcbb5..c50d8d2 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -3803,6 +3803,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, return -EINVAL; } + if (setup == SETUP_CONTEXT_ONLY) { + slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx); + if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) == + SLOT_STATE_DEFAULT) { + xhci_dbg(xhci, "Slot already in default state\n"); + return 0; + } + } + command = xhci_alloc_command(xhci, false, false, GFP_KERNEL); if (!command) return -ENOMEM; diff --git a/drivers/usb/musb/Kconfig b/drivers/usb/musb/Kconfig index 9d68372..b005010 100644 --- a/drivers/usb/musb/Kconfig +++ b/drivers/usb/musb/Kconfig @@ -72,6 +72,8 @@ config USB_MUSB_DA8XX config USB_MUSB_TUSB6010 tristate "TUSB6010" + depends on ARCH_OMAP2PLUS || COMPILE_TEST + depends on NOP_USB_XCEIV = USB_MUSB_HDRC # both built-in or both modules config USB_MUSB_OMAP2PLUS tristate "OMAP2430 and onwards" @@ -85,6 +87,7 @@ config USB_MUSB_AM35X config USB_MUSB_DSPS tristate "TI DSPS platforms" select USB_MUSB_AM335X_CHILD + depends on ARCH_OMAP2PLUS || COMPILE_TEST depends on OF_IRQ config USB_MUSB_BLACKFIN @@ -93,6 +96,7 @@ config USB_MUSB_BLACKFIN config USB_MUSB_UX500 tristate "Ux500 platforms" + depends on ARCH_U8500 || COMPILE_TEST config USB_MUSB_JZ4740 tristate "JZ4740" diff --git a/drivers/usb/musb/blackfin.c b/drivers/usb/musb/blackfin.c index a441a2d..1782501 100644 --- a/drivers/usb/musb/blackfin.c +++ b/drivers/usb/musb/blackfin.c @@ -63,7 +63,7 @@ static void bfin_writew(void __iomem *addr, unsigned offset, u16 data) bfin_write16(addr + offset, data); } -static void binf_writel(void __iomem *addr, unsigned offset, u32 data) +static void bfin_writel(void __iomem *addr, unsigned offset, u32 data) { bfin_write16(addr + offset, (u16)data); } diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c index f64fd96..c39a16a 100644 --- a/drivers/usb/musb/musb_cppi41.c +++ b/drivers/usb/musb/musb_cppi41.c @@ -628,9 +628,9 @@ static int cppi41_dma_controller_start(struct cppi41_dma_controller *controller) ret = of_property_read_string_index(np, "dma-names", i, &str); if (ret) goto err; - if (!strncmp(str, "tx", 2)) + if (strstarts(str, "tx")) is_tx = 1; - else if (!strncmp(str, "rx", 2)) + else if (strstarts(str, "rx")) is_tx = 0; else { dev_err(dev, "Wrong dmatype %s\n", str); diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c index ad3701a..48131aa 100644 --- a/drivers/usb/musb/musb_debugfs.c +++ b/drivers/usb/musb/musb_debugfs.c @@ -59,20 +59,12 @@ static const struct musb_register_map musb_regmap[] = { { "RxMaxPp", MUSB_RXMAXP, 16 }, { "RxCSR", MUSB_RXCSR, 16 }, { "RxCount", MUSB_RXCOUNT, 16 }, - { "ConfigData", MUSB_CONFIGDATA,8 }, { "IntrRxE", MUSB_INTRRXE, 16 }, { "IntrTxE", MUSB_INTRTXE, 16 }, { "IntrUsbE", MUSB_INTRUSBE, 8 }, { "DevCtl", MUSB_DEVCTL, 8 }, - { "BabbleCtl", MUSB_BABBLE_CTL,8 }, - { "TxFIFOsz", MUSB_TXFIFOSZ, 8 }, - { "RxFIFOsz", MUSB_RXFIFOSZ, 8 }, - { "TxFIFOadd", MUSB_TXFIFOADD, 16 }, - { "RxFIFOadd", MUSB_RXFIFOADD, 16 }, { "VControl", 0x68, 32 }, { "HWVers", 0x69, 16 }, - { "EPInfo", MUSB_EPINFO, 8 }, - { "RAMInfo", MUSB_RAMINFO, 8 }, { "LinkInfo", MUSB_LINKINFO, 8 }, { "VPLen", MUSB_VPLEN, 8 }, { "HS_EOF1", MUSB_HS_EOF1, 8 }, @@ -103,6 +95,16 @@ static const struct musb_register_map musb_regmap[] = { { "DMA_CNTLch7", 0x274, 16 }, { "DMA_ADDRch7", 0x278, 32 }, { "DMA_COUNTch7", 0x27C, 32 }, +#ifndef CONFIG_BLACKFIN + { "ConfigData", MUSB_CONFIGDATA,8 }, + { "BabbleCtl", MUSB_BABBLE_CTL,8 }, + { "TxFIFOsz", MUSB_TXFIFOSZ, 8 }, + { "RxFIFOsz", MUSB_RXFIFOSZ, 8 }, + { "TxFIFOadd", MUSB_TXFIFOADD, 16 }, + { "RxFIFOadd", MUSB_RXFIFOADD, 16 }, + { "EPInfo", MUSB_EPINFO, 8 }, + { "RAMInfo", MUSB_RAMINFO, 8 }, +#endif { } /* Terminating Entry */ }; @@ -197,30 +199,30 @@ static ssize_t musb_test_mode_write(struct file *file, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; - if (!strncmp(buf, "force host", 9)) + if (strstarts(buf, "force host")) test = MUSB_TEST_FORCE_HOST; - if (!strncmp(buf, "fifo access", 11)) + if (strstarts(buf, "fifo access")) test = MUSB_TEST_FIFO_ACCESS; - if (!strncmp(buf, "force full-speed", 15)) + if (strstarts(buf, "force full-speed")) test = MUSB_TEST_FORCE_FS; - if (!strncmp(buf, "force high-speed", 15)) + if (strstarts(buf, "force high-speed")) test = MUSB_TEST_FORCE_HS; - if (!strncmp(buf, "test packet", 10)) { + if (strstarts(buf, "test packet")) { test = MUSB_TEST_PACKET; musb_load_testpacket(musb); } - if (!strncmp(buf, "test K", 6)) + if (strstarts(buf, "test K")) test = MUSB_TEST_K; - if (!strncmp(buf, "test J", 6)) + if (strstarts(buf, "test J")) test = MUSB_TEST_J; - if (!strncmp(buf, "test SE0 NAK", 12)) + if (strstarts(buf, "test SE0 NAK")) test = MUSB_TEST_SE0_NAK; musb_writeb(musb->mregs, MUSB_TESTMODE, test); diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c index 23d474d..883a9ad 100644 --- a/drivers/usb/musb/musb_host.c +++ b/drivers/usb/musb/musb_host.c @@ -2663,7 +2663,6 @@ void musb_host_cleanup(struct musb *musb) if (musb->port_mode == MUSB_PORT_MODE_GADGET) return; usb_remove_hcd(musb->hcd); - musb->hcd = NULL; } void musb_host_free(struct musb *musb) diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c index 699e38c..697a741 100644 --- a/drivers/usb/phy/phy-mv-usb.c +++ b/drivers/usb/phy/phy-mv-usb.c @@ -338,7 +338,6 @@ static void mv_otg_update_inputs(struct mv_otg *mvotg) static void mv_otg_update_state(struct mv_otg *mvotg) { struct mv_otg_ctrl *otg_ctrl = &mvotg->otg_ctrl; - struct usb_phy *phy = &mvotg->phy; int old_state = mvotg->phy.otg->state; switch (old_state) { @@ -858,10 +857,10 @@ static int mv_otg_suspend(struct platform_device *pdev, pm_message_t state) { struct mv_otg *mvotg = platform_get_drvdata(pdev); - if (mvotg->phy.state != OTG_STATE_B_IDLE) { + if (mvotg->phy.otg->state != OTG_STATE_B_IDLE) { dev_info(&pdev->dev, "OTG state is not B_IDLE, it is %d!\n", - mvotg->phy.state); + mvotg->phy.otg->state); return -EAGAIN; } diff --git a/drivers/usb/phy/phy.c b/drivers/usb/phy/phy.c index b4066a0..ccfdfb2 100644 --- a/drivers/usb/phy/phy.c +++ b/drivers/usb/phy/phy.c @@ -34,7 +34,7 @@ static struct usb_phy *__usb_find_phy(struct list_head *list, return phy; } - return ERR_PTR(-ENODEV); + return ERR_PTR(-EPROBE_DEFER); } static struct usb_phy *__usb_find_phy_dev(struct device *dev, @@ -59,6 +59,9 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node) { struct usb_phy *phy; + if (!of_device_is_available(node)) + return ERR_PTR(-ENODEV); + list_for_each_entry(phy, &phy_list, head) { if (node != phy->dev->of_node) continue; @@ -66,7 +69,7 @@ static struct usb_phy *__of_usb_find_phy(struct device_node *node) return phy; } - return ERR_PTR(-ENODEV); + return ERR_PTR(-EPROBE_DEFER); } static void devm_usb_phy_release(struct device *dev, void *res) @@ -190,10 +193,13 @@ struct usb_phy *devm_usb_get_phy_by_phandle(struct device *dev, spin_lock_irqsave(&phy_lock, flags); phy = __of_usb_find_phy(node); - if (IS_ERR(phy) || !try_module_get(phy->dev->driver->owner)) { - if (!IS_ERR(phy)) - phy = ERR_PTR(-EPROBE_DEFER); + if (IS_ERR(phy)) { + devres_free(ptr); + goto err1; + } + if (!try_module_get(phy->dev->driver->owner)) { + phy = ERR_PTR(-ENODEV); devres_free(ptr); goto err1; } diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c index 8d7fc48..29fa1c3 100644 --- a/drivers/usb/serial/console.c +++ b/drivers/usb/serial/console.c @@ -46,6 +46,8 @@ static struct console usbcons; * ------------------------------------------------------------ */ +static const struct tty_operations usb_console_fake_tty_ops = { +}; /* * The parsing of the command line works exactly like the @@ -137,13 +139,17 @@ static int usb_console_setup(struct console *co, char *options) goto reset_open_count; } kref_init(&tty->kref); - tty_port_tty_set(&port->port, tty); tty->driver = usb_serial_tty_driver; tty->index = co->index; + init_ldsem(&tty->ldisc_sem); + INIT_LIST_HEAD(&tty->tty_files); + kref_get(&tty->driver->kref); + tty->ops = &usb_console_fake_tty_ops; if (tty_init_termios(tty)) { retval = -ENOMEM; - goto free_tty; + goto put_tty; } + tty_port_tty_set(&port->port, tty); } /* only call the device specific open if this @@ -161,7 +167,7 @@ static int usb_console_setup(struct console *co, char *options) serial->type->set_termios(tty, port, &dummy); tty_port_tty_set(&port->port, NULL); - kfree(tty); + tty_kref_put(tty); } set_bit(ASYNCB_INITIALIZED, &port->port.flags); } @@ -177,8 +183,8 @@ static int usb_console_setup(struct console *co, char *options) fail: tty_port_tty_set(&port->port, NULL); - free_tty: - kfree(tty); + put_tty: + tty_kref_put(tty); reset_open_count: port->port.count = 0; usb_autopm_put_interface(serial->interface); diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 6c4eb3c..f4c56fc 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -120,10 +120,12 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ - { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */ + { USB_DEVICE(0x10C4, 0x8856) }, /* CEL EM357 ZigBee USB Stick - LR */ + { USB_DEVICE(0x10C4, 0x8857) }, /* CEL EM357 ZigBee USB Stick */ { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ + { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c index 1bd1922..ccf1df7 100644 --- a/drivers/usb/serial/generic.c +++ b/drivers/usb/serial/generic.c @@ -286,7 +286,7 @@ static int usb_serial_generic_submit_read_urb(struct usb_serial_port *port, res = usb_submit_urb(port->read_urbs[index], mem_flags); if (res) { - if (res != -EPERM) { + if (res != -EPERM && res != -ENODEV) { dev_err(&port->dev, "%s - usb_submit_urb failed: %d\n", __func__, res); @@ -373,7 +373,7 @@ void usb_serial_generic_read_bulk_callback(struct urb *urb) __func__, urb->status); return; default: - dev_err(&port->dev, "%s - nonzero urb status: %d\n", + dev_dbg(&port->dev, "%s - nonzero urb status: %d\n", __func__, urb->status); goto resubmit; } diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 077c714..e07b15e 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -410,6 +410,8 @@ static void usa26_instat_callback(struct urb *urb) } port = serial->port[msg->port]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -420,7 +422,7 @@ static void usa26_instat_callback(struct urb *urb) if (old_dcd_state != p_priv->dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -527,6 +529,8 @@ static void usa28_instat_callback(struct urb *urb) } port = serial->port[msg->port]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -537,7 +541,7 @@ static void usa28_instat_callback(struct urb *urb) if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -607,6 +611,8 @@ static void usa49_instat_callback(struct urb *urb) } port = serial->port[msg->portNumber]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -617,7 +623,7 @@ static void usa49_instat_callback(struct urb *urb) if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -855,6 +861,8 @@ static void usa90_instat_callback(struct urb *urb) port = serial->port[0]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -865,7 +873,7 @@ static void usa90_instat_callback(struct urb *urb) if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) @@ -926,6 +934,8 @@ static void usa67_instat_callback(struct urb *urb) port = serial->port[msg->port]; p_priv = usb_get_serial_port_data(port); + if (!p_priv) + goto resubmit; /* Update handshaking pin state information */ old_dcd_state = p_priv->dcd_state; @@ -934,7 +944,7 @@ static void usa67_instat_callback(struct urb *urb) if (old_dcd_state != p_priv->dcd_state && old_dcd_state) tty_port_tty_hangup(&port->port, true); - +resubmit: /* Resubmit urb so we continue receiving */ err = usb_submit_urb(urb, GFP_ATOMIC); if (err != 0) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 7a4c21b..efdcee1 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -234,6 +234,8 @@ static void option_instat_callback(struct urb *urb); #define QUALCOMM_VENDOR_ID 0x05C6 +#define SIERRA_VENDOR_ID 0x1199 + #define CMOTECH_VENDOR_ID 0x16d8 #define CMOTECH_PRODUCT_6001 0x6001 #define CMOTECH_PRODUCT_CMU_300 0x6002 @@ -512,7 +514,7 @@ enum option_blacklist_reason { OPTION_BLACKLIST_RESERVED_IF = 2 }; -#define MAX_BL_NUM 8 +#define MAX_BL_NUM 11 struct option_blacklist_info { /* bitfield of interface numbers for OPTION_BLACKLIST_SENDSETUP */ const unsigned long sendsetup; @@ -601,6 +603,11 @@ static const struct option_blacklist_info telit_le920_blacklist = { .reserved = BIT(1) | BIT(5), }; +static const struct option_blacklist_info sierra_mc73xx_blacklist = { + .sendsetup = BIT(0) | BIT(2), + .reserved = BIT(8) | BIT(10) | BIT(11), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -1098,6 +1105,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ + { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), + .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index cb3e147..9c63897 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -142,7 +142,6 @@ static const struct usb_device_id id_table[] = { {DEVICE_SWI(0x0f3d, 0x68a2)}, /* Sierra Wireless MC7700 */ {DEVICE_SWI(0x114f, 0x68a2)}, /* Sierra Wireless MC7750 */ {DEVICE_SWI(0x1199, 0x68a2)}, /* Sierra Wireless MC7710 */ - {DEVICE_SWI(0x1199, 0x68c0)}, /* Sierra Wireless MC73xx */ {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h index 8a6f371..9893d69 100644 --- a/drivers/usb/storage/uas-detect.h +++ b/drivers/usb/storage/uas-detect.h @@ -69,16 +69,39 @@ static int uas_use_uas_driver(struct usb_interface *intf, return 0; /* - * ASM1051 and older ASM1053 devices have the same usb-id, and UAS is - * broken on the ASM1051, use the number of streams to differentiate. - * New ASM1053-s also support 32 streams, but have a different prod-id. + * ASMedia has a number of usb3 to sata bridge chips, at the time of + * this writing the following versions exist: + * ASM1051 - no uas support version + * ASM1051 - with broken (*) uas support + * ASM1053 - with working uas support + * ASM1153 - with working uas support + * + * Devices with these chips re-use a number of device-ids over the + * entire line, so the device-id is useless to determine if we're + * dealing with an ASM1051 (which we want to avoid). + * + * The ASM1153 can be identified by config.MaxPower == 0, + * where as the ASM105x models have config.MaxPower == 36. + * + * Differentiating between the ASM1053 and ASM1051 is trickier, when + * connected over USB-3 we can look at the number of streams supported, + * ASM1051 supports 32 streams, where as early ASM1053 versions support + * 16 streams, newer ASM1053-s also support 32 streams, but have a + * different prod-id. + * + * (*) ASM1051 chips do work with UAS with some disks (with the + * US_FL_NO_REPORT_OPCODES quirk), but are broken with other disks */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x174c && - le16_to_cpu(udev->descriptor.idProduct) == 0x55aa) { - if (udev->speed < USB_SPEED_SUPER) { + (le16_to_cpu(udev->descriptor.idProduct) == 0x5106 || + le16_to_cpu(udev->descriptor.idProduct) == 0x55aa)) { + if (udev->actconfig->desc.bMaxPower == 0) { + /* ASM1153, do nothing */ + } else if (udev->speed < USB_SPEED_SUPER) { /* No streams info, assume ASM1051 */ flags |= US_FL_IGNORE_UAS; } else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) { + /* Possibly an ASM1051, disable uas */ flags |= US_FL_IGNORE_UAS; } } diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h index 18a283d..6df4357 100644 --- a/drivers/usb/storage/unusual_uas.h +++ b/drivers/usb/storage/unusual_uas.h @@ -40,6 +40,16 @@ * and don't forget to CC: the USB development list <linux-usb@vger.kernel.org> */ +/* + * Apricorn USB3 dongle sometimes returns "USBSUSBSUSBS" in response to SCSI + * commands in UAS mode. Observed with the 1.28 firmware; are there others? + */ +UNUSUAL_DEV(0x0984, 0x0301, 0x0128, 0x0128, + "Apricorn", + "", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_IGNORE_UAS), + /* https://bugzilla.kernel.org/show_bug.cgi?id=79511 */ UNUSUAL_DEV(0x0bc2, 0x2312, 0x0000, 0x9999, "Seagate", @@ -68,6 +78,20 @@ UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Marcin ZajÄ…czkowski <mszpak@wp.pl> */ +UNUSUAL_DEV(0x0bc2, 0xa013, 0x0000, 0x9999, + "Seagate", + "Backup Plus", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + +/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ +UNUSUAL_DEV(0x0bc2, 0xa0a4, 0x0000, 0x9999, + "Seagate", + "Backup Plus Desk", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* https://bbs.archlinux.org/viewtopic.php?id=183190 */ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999, "Seagate", @@ -82,6 +106,13 @@ UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: G. Richard Bellamy <rbellamy@pteradigm.com> */ +UNUSUAL_DEV(0x0bc2, 0xab2a, 0x0000, 0x9999, + "Seagate", + "BUP Fast HDD", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_ATA_1X), + /* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, "JMicron", @@ -89,14 +120,6 @@ UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_REPORT_OPCODES), -/* Most ASM1051 based devices have issues with uas, blacklist them all */ -/* Reported-by: Hans de Goede <hdegoede@redhat.com> */ -UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999, - "ASMedia", - "ASM1051", - USB_SC_DEVICE, USB_PR_DEVICE, NULL, - US_FL_IGNORE_UAS), - /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, "VIA", @@ -104,6 +127,13 @@ UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_NO_ATA_1X), +/* Reported-by: Takeo Nakayama <javhera@gmx.com> */ +UNUSUAL_DEV(0x357d, 0x7788, 0x0000, 0x9999, + "JMicron", + "JMS566", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_REPORT_OPCODES), + /* Reported-by: Hans de Goede <hdegoede@redhat.com> */ UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999, "Hitachi", diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c index 255201f..7cc0122 100644 --- a/drivers/vfio/pci/vfio_pci.c +++ b/drivers/vfio/pci/vfio_pci.c @@ -840,13 +840,11 @@ static const struct vfio_device_ops vfio_pci_ops = { static int vfio_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { - u8 type; struct vfio_pci_device *vdev; struct iommu_group *group; int ret; - pci_read_config_byte(pdev, PCI_HEADER_TYPE, &type); - if ((type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) + if (pdev->hdr_type != PCI_HEADER_TYPE_NORMAL) return -EINVAL; group = iommu_group_get(&pdev->dev); diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index 14419a8..d415d69 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -538,7 +538,7 @@ static int get_rx_bufs(struct vhost_virtqueue *vq, ++headcount; seg += in; } - heads[headcount - 1].len = cpu_to_vhost32(vq, len - datalen); + heads[headcount - 1].len = cpu_to_vhost32(vq, len + datalen); *iovcount = seg; if (unlikely(log)) *log_num = nlogs; diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c index 01c01cb..d695b16 100644 --- a/drivers/vhost/scsi.c +++ b/drivers/vhost/scsi.c @@ -911,6 +911,23 @@ vhost_scsi_map_iov_to_prot(struct tcm_vhost_cmd *cmd, return 0; } +static int vhost_scsi_to_tcm_attr(int attr) +{ + switch (attr) { + case VIRTIO_SCSI_S_SIMPLE: + return TCM_SIMPLE_TAG; + case VIRTIO_SCSI_S_ORDERED: + return TCM_ORDERED_TAG; + case VIRTIO_SCSI_S_HEAD: + return TCM_HEAD_TAG; + case VIRTIO_SCSI_S_ACA: + return TCM_ACA_TAG; + default: + break; + } + return TCM_SIMPLE_TAG; +} + static void tcm_vhost_submission_work(struct work_struct *work) { struct tcm_vhost_cmd *cmd = @@ -936,9 +953,10 @@ static void tcm_vhost_submission_work(struct work_struct *work) rc = target_submit_cmd_map_sgls(se_cmd, tv_nexus->tvn_se_sess, cmd->tvc_cdb, &cmd->tvc_sense_buf[0], cmd->tvc_lun, cmd->tvc_exp_data_len, - cmd->tvc_task_attr, cmd->tvc_data_direction, - TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count, - NULL, 0, sg_prot_ptr, cmd->tvc_prot_sgl_count); + vhost_scsi_to_tcm_attr(cmd->tvc_task_attr), + cmd->tvc_data_direction, TARGET_SCF_ACK_KREF, + sg_ptr, cmd->tvc_sgl_count, NULL, 0, sg_prot_ptr, + cmd->tvc_prot_sgl_count); if (rc < 0) { transport_send_check_condition_and_sense(se_cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); diff --git a/drivers/video/fbdev/broadsheetfb.c b/drivers/video/fbdev/broadsheetfb.c index 1c29bd1..0e5fde1 100644 --- a/drivers/video/fbdev/broadsheetfb.c +++ b/drivers/video/fbdev/broadsheetfb.c @@ -636,7 +636,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par, err = broadsheet_spiflash_read_range(par, start_sector_addr, data_start_addr, sector_buffer); if (err) - return err; + goto out; } /* now we copy our data into the right place in the sector buffer */ @@ -657,7 +657,7 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par, err = broadsheet_spiflash_read_range(par, tail_start_addr, tail_len, sector_buffer + tail_start_addr); if (err) - return err; + goto out; } /* if we got here we have the full sector that we want to rewrite. */ @@ -665,11 +665,13 @@ static int broadsheet_spiflash_rewrite_sector(struct broadsheetfb_par *par, /* first erase the sector */ err = broadsheet_spiflash_erase_sector(par, start_sector_addr); if (err) - return err; + goto out; /* now write it */ err = broadsheet_spiflash_write_sector(par, start_sector_addr, sector_buffer, sector_size); +out: + kfree(sector_buffer); return err; } diff --git a/drivers/video/fbdev/simplefb.c b/drivers/video/fbdev/simplefb.c index 92cac80..1085c04 100644 --- a/drivers/video/fbdev/simplefb.c +++ b/drivers/video/fbdev/simplefb.c @@ -402,7 +402,7 @@ static int __init simplefb_init(void) if (ret) return ret; - if (IS_ENABLED(CONFIG_OF) && of_chosen) { + if (IS_ENABLED(CONFIG_OF_ADDRESS) && of_chosen) { for_each_child_of_node(of_chosen, np) { if (of_device_is_compatible(np, "simple-framebuffer")) of_platform_device_create(np, NULL, NULL); diff --git a/drivers/virtio/virtio_pci_common.c b/drivers/virtio/virtio_pci_common.c index 2ef9529..9756f21 100644 --- a/drivers/virtio/virtio_pci_common.c +++ b/drivers/virtio/virtio_pci_common.c @@ -282,6 +282,7 @@ void vp_del_vqs(struct virtio_device *vdev) vp_free_vectors(vdev); kfree(vp_dev->vqs); + vp_dev->vqs = NULL; } static int vp_try_to_find_vqs(struct virtio_device *vdev, unsigned nvqs, @@ -421,15 +422,6 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu) return 0; } -void virtio_pci_release_dev(struct device *_d) -{ - /* - * No need for a release method as we allocate/free - * all devices together with the pci devices. - * Provide an empty one to avoid getting a warning from core. - */ -} - #ifdef CONFIG_PM_SLEEP static int virtio_pci_freeze(struct device *dev) { diff --git a/drivers/virtio/virtio_pci_common.h b/drivers/virtio/virtio_pci_common.h index adddb64..5a49728 100644 --- a/drivers/virtio/virtio_pci_common.h +++ b/drivers/virtio/virtio_pci_common.h @@ -126,7 +126,6 @@ const char *vp_bus_name(struct virtio_device *vdev); * - ignore the affinity request if we're using INTX */ int vp_set_vq_affinity(struct virtqueue *vq, int cpu); -void virtio_pci_release_dev(struct device *); int virtio_pci_legacy_probe(struct pci_dev *pci_dev, const struct pci_device_id *id); diff --git a/drivers/virtio/virtio_pci_legacy.c b/drivers/virtio/virtio_pci_legacy.c index 6c76f0f..a5486e6 100644 --- a/drivers/virtio/virtio_pci_legacy.c +++ b/drivers/virtio/virtio_pci_legacy.c @@ -211,6 +211,17 @@ static const struct virtio_config_ops virtio_pci_config_ops = { .set_vq_affinity = vp_set_vq_affinity, }; +static void virtio_pci_release_dev(struct device *_d) +{ + struct virtio_device *vdev = dev_to_virtio(_d); + struct virtio_pci_device *vp_dev = to_vp_device(vdev); + + /* As struct device is a kobject, it's not safe to + * free the memory (including the reference counter itself) + * until it's release callback. */ + kfree(vp_dev); +} + /* the PCI probing function */ int virtio_pci_legacy_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) @@ -302,5 +313,4 @@ void virtio_pci_legacy_remove(struct pci_dev *pci_dev) pci_iounmap(pci_dev, vp_dev->ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); - kfree(vp_dev); } diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c index 2d3e32e..8729cf6 100644 --- a/fs/btrfs/backref.c +++ b/fs/btrfs/backref.c @@ -1552,7 +1552,6 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, { int ret; int type; - struct btrfs_tree_block_info *info; struct btrfs_extent_inline_ref *eiref; if (*ptr == (unsigned long)-1) @@ -1573,9 +1572,17 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, } /* we can treat both ref types equally here */ - info = (struct btrfs_tree_block_info *)(ei + 1); *out_root = btrfs_extent_inline_ref_offset(eb, eiref); - *out_level = btrfs_tree_block_level(eb, info); + + if (key->type == BTRFS_EXTENT_ITEM_KEY) { + struct btrfs_tree_block_info *info; + + info = (struct btrfs_tree_block_info *)(ei + 1); + *out_level = btrfs_tree_block_level(eb, info); + } else { + ASSERT(key->type == BTRFS_METADATA_ITEM_KEY); + *out_level = (u8)key->offset; + } if (ret == 1) *ptr = (unsigned long)-1; diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c index 054577b..de4e70f 100644 --- a/fs/btrfs/delayed-inode.c +++ b/fs/btrfs/delayed-inode.c @@ -1857,6 +1857,14 @@ int btrfs_delayed_delete_inode_ref(struct inode *inode) { struct btrfs_delayed_node *delayed_node; + /* + * we don't do delayed inode updates during log recovery because it + * leads to enospc problems. This means we also can't do + * delayed inode refs + */ + if (BTRFS_I(inode)->root->fs_info->log_root_recovering) + return -EAGAIN; + delayed_node = btrfs_get_or_create_delayed_node(inode); if (IS_ERR(delayed_node)) return PTR_ERR(delayed_node); diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index a80b971..1511658 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c @@ -3139,9 +3139,11 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, struct extent_buffer *leaf; ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1); - if (ret < 0) + if (ret) { + if (ret > 0) + ret = -ENOENT; goto fail; - BUG_ON(ret); /* Corruption */ + } leaf = path->nodes[0]; bi = btrfs_item_ptr_offset(leaf, path->slots[0]); @@ -3149,11 +3151,9 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, btrfs_mark_buffer_dirty(leaf); btrfs_release_path(path); fail: - if (ret) { + if (ret) btrfs_abort_transaction(trans, root, ret); - return ret; - } - return 0; + return ret; } diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index e687bb0..8bf326a 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c @@ -6255,8 +6255,10 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) out_fail: btrfs_end_transaction(trans, root); - if (drop_on_err) + if (drop_on_err) { + inode_dec_link_count(inode); iput(inode); + } btrfs_balance_delayed_items(root); btrfs_btree_balance_dirty(root); return err; diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c index f2bb13a..9e1569f 100644 --- a/fs/btrfs/scrub.c +++ b/fs/btrfs/scrub.c @@ -2607,9 +2607,9 @@ static int scrub_extent_for_parity(struct scrub_parity *sparity, ret = scrub_pages_for_parity(sparity, logical, l, physical, dev, flags, gen, mirror_num, have_csum ? csum : NULL); -skip: if (ret) return ret; +skip: len -= l; logical += l; physical += l; diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c index f5013d9..c81c0e00 100644 --- a/fs/ceph/addr.c +++ b/fs/ceph/addr.c @@ -1416,7 +1416,7 @@ void ceph_fill_inline_data(struct inode *inode, struct page *locked_page, } } - dout("fill_inline_data %p %llx.%llx len %lu locked_page %p\n", + dout("fill_inline_data %p %llx.%llx len %zu locked_page %p\n", inode, ceph_vinop(inode), len, locked_page); if (len > 0) { diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index e5d3ead..bed4308 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -5166,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, /* fallback to generic here if not in extents fmt */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) - return __generic_block_fiemap(inode, fieinfo, start, len, - ext4_get_block); + return generic_block_fiemap(inode, fieinfo, start, len, + ext4_get_block); if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) return -EBADR; diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 513c12c..8131be8 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c @@ -273,19 +273,24 @@ static int ext4_file_open(struct inode * inode, struct file * filp) * we determine this extent as a data or a hole according to whether the * page cache has data or not. */ -static int ext4_find_unwritten_pgoff(struct inode *inode, int whence, - loff_t endoff, loff_t *offset) +static int ext4_find_unwritten_pgoff(struct inode *inode, + int whence, + struct ext4_map_blocks *map, + loff_t *offset) { struct pagevec pvec; + unsigned int blkbits; pgoff_t index; pgoff_t end; + loff_t endoff; loff_t startoff; loff_t lastoff; int found = 0; + blkbits = inode->i_sb->s_blocksize_bits; startoff = *offset; lastoff = startoff; - + endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; index = startoff >> PAGE_CACHE_SHIFT; end = endoff >> PAGE_CACHE_SHIFT; @@ -403,144 +408,147 @@ out: static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) { struct inode *inode = file->f_mapping->host; - struct fiemap_extent_info fie; - struct fiemap_extent ext[2]; - loff_t next; - int i, ret = 0; + struct ext4_map_blocks map; + struct extent_status es; + ext4_lblk_t start, last, end; + loff_t dataoff, isize; + int blkbits; + int ret = 0; mutex_lock(&inode->i_mutex); - if (offset >= inode->i_size) { + + isize = i_size_read(inode); + if (offset >= isize) { mutex_unlock(&inode->i_mutex); return -ENXIO; } - fie.fi_flags = 0; - fie.fi_extents_max = 2; - fie.fi_extents_start = (struct fiemap_extent __user *) &ext; - while (1) { - mm_segment_t old_fs = get_fs(); - - fie.fi_extents_mapped = 0; - memset(ext, 0, sizeof(*ext) * fie.fi_extents_max); - - set_fs(get_ds()); - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); - set_fs(old_fs); - if (ret) + + blkbits = inode->i_sb->s_blocksize_bits; + start = offset >> blkbits; + last = start; + end = isize >> blkbits; + dataoff = offset; + + do { + map.m_lblk = last; + map.m_len = end - last + 1; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { + if (last != start) + dataoff = (loff_t)last << blkbits; break; + } - /* No extents found, EOF */ - if (!fie.fi_extents_mapped) { - ret = -ENXIO; + /* + * If there is a delay extent at this offset, + * it will be as a data. + */ + ext4_es_find_delayed_extent_range(inode, last, last, &es); + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { + if (last != start) + dataoff = (loff_t)last << blkbits; break; } - for (i = 0; i < fie.fi_extents_mapped; i++) { - next = (loff_t)(ext[i].fe_length + ext[i].fe_logical); - if (offset < (loff_t)ext[i].fe_logical) - offset = (loff_t)ext[i].fe_logical; - /* - * If extent is not unwritten, then it contains valid - * data, mapped or delayed. - */ - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) - goto out; + /* + * If there is a unwritten extent at this offset, + * it will be as a data or a hole according to page + * cache that has data or not. + */ + if (map.m_flags & EXT4_MAP_UNWRITTEN) { + int unwritten; + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, + &map, &dataoff); + if (unwritten) + break; + } - /* - * If there is a unwritten extent at this offset, - * it will be as a data or a hole according to page - * cache that has data or not. - */ - if (ext4_find_unwritten_pgoff(inode, SEEK_DATA, - next, &offset)) - goto out; + last++; + dataoff = (loff_t)last << blkbits; + } while (last <= end); - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) { - ret = -ENXIO; - goto out; - } - offset = next; - } - } - if (offset > inode->i_size) - offset = inode->i_size; -out: mutex_unlock(&inode->i_mutex); - if (ret) - return ret; - return vfs_setpos(file, offset, maxsize); + if (dataoff > isize) + return -ENXIO; + + return vfs_setpos(file, dataoff, maxsize); } /* - * ext4_seek_hole() retrieves the offset for SEEK_HOLE + * ext4_seek_hole() retrieves the offset for SEEK_HOLE. */ static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) { struct inode *inode = file->f_mapping->host; - struct fiemap_extent_info fie; - struct fiemap_extent ext[2]; - loff_t next; - int i, ret = 0; + struct ext4_map_blocks map; + struct extent_status es; + ext4_lblk_t start, last, end; + loff_t holeoff, isize; + int blkbits; + int ret = 0; mutex_lock(&inode->i_mutex); - if (offset >= inode->i_size) { + + isize = i_size_read(inode); + if (offset >= isize) { mutex_unlock(&inode->i_mutex); return -ENXIO; } - fie.fi_flags = 0; - fie.fi_extents_max = 2; - fie.fi_extents_start = (struct fiemap_extent __user *)&ext; - while (1) { - mm_segment_t old_fs = get_fs(); - - fie.fi_extents_mapped = 0; - memset(ext, 0, sizeof(*ext)); + blkbits = inode->i_sb->s_blocksize_bits; + start = offset >> blkbits; + last = start; + end = isize >> blkbits; + holeoff = offset; - set_fs(get_ds()); - ret = ext4_fiemap(inode, &fie, offset, maxsize - offset); - set_fs(old_fs); - if (ret) - break; + do { + map.m_lblk = last; + map.m_len = end - last + 1; + ret = ext4_map_blocks(NULL, inode, &map, 0); + if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { + last += ret; + holeoff = (loff_t)last << blkbits; + continue; + } - /* No extents found */ - if (!fie.fi_extents_mapped) - break; + /* + * If there is a delay extent at this offset, + * we will skip this extent. + */ + ext4_es_find_delayed_extent_range(inode, last, last, &es); + if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) { + last = es.es_lblk + es.es_len; + holeoff = (loff_t)last << blkbits; + continue; + } - for (i = 0; i < fie.fi_extents_mapped; i++) { - next = (loff_t)(ext[i].fe_logical + ext[i].fe_length); - /* - * If extent is not unwritten, then it contains valid - * data, mapped or delayed. - */ - if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) { - if (offset < (loff_t)ext[i].fe_logical) - goto out; - offset = next; + /* + * If there is a unwritten extent at this offset, + * it will be as a data or a hole according to page + * cache that has data or not. + */ + if (map.m_flags & EXT4_MAP_UNWRITTEN) { + int unwritten; + unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, + &map, &holeoff); + if (!unwritten) { + last += ret; + holeoff = (loff_t)last << blkbits; continue; } - /* - * If there is a unwritten extent at this offset, - * it will be as a data or a hole according to page - * cache that has data or not. - */ - if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE, - next, &offset)) - goto out; - - offset = next; - if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) - goto out; } - } - if (offset > inode->i_size) - offset = inode->i_size; -out: + + /* find a hole */ + break; + } while (last <= end); + mutex_unlock(&inode->i_mutex); - if (ret) - return ret; - return vfs_setpos(file, offset, maxsize); + if (holeoff > isize) + holeoff = isize; + + return vfs_setpos(file, holeoff, maxsize); } /* diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c index bf76f40..8a8ec62 100644 --- a/fs/ext4/resize.c +++ b/fs/ext4/resize.c @@ -24,6 +24,18 @@ int ext4_resize_begin(struct super_block *sb) return -EPERM; /* + * If we are not using the primary superblock/GDT copy don't resize, + * because the user tools have no way of handling this. Probably a + * bad time to do it anyways. + */ + if (EXT4_SB(sb)->s_sbh->b_blocknr != + le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { + ext4_warning(sb, "won't resize using backup superblock at %llu", + (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); + return -EPERM; + } + + /* * We are not allowed to do online-resizing on a filesystem mounted * with error, because it can destroy the filesystem easily. */ @@ -758,18 +770,6 @@ static int add_new_gdb(handle_t *handle, struct inode *inode, "EXT4-fs: ext4_add_new_gdb: adding group block %lu\n", gdb_num); - /* - * If we are not using the primary superblock/GDT copy don't resize, - * because the user tools have no way of handling this. Probably a - * bad time to do it anyways. - */ - if (EXT4_SB(sb)->s_sbh->b_blocknr != - le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) { - ext4_warning(sb, "won't resize using backup superblock at %llu", - (unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr); - return -EPERM; - } - gdb_bh = sb_bread(sb, gdblock); if (!gdb_bh) return -EIO; diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 43c92b1..74c5f53 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -3482,7 +3482,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) - ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are " + ext4_warning(sb, "metadata_csum and uninit_bg are " "redundant flags; please run fsck."); /* Check for a known checksum algorithm */ @@ -740,14 +740,15 @@ static int __init fcntl_init(void) * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY * is defined as O_NONBLOCK on some platforms and not on others. */ - BUILD_BUG_ON(20 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( + BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ != HWEIGHT32( O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | O_APPEND | /* O_NONBLOCK | */ __O_SYNC | O_DSYNC | FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | O_NOATIME | O_CLOEXEC | - __FMODE_EXEC | O_PATH | __O_TMPFILE + __FMODE_EXEC | O_PATH | __O_TMPFILE | + __FMODE_NONOTIFY )); fasync_cache = kmem_cache_create("fasync_cache", diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c index ba11079..ed19a7d 100644 --- a/fs/fuse/dev.c +++ b/fs/fuse/dev.c @@ -131,6 +131,13 @@ static void fuse_req_init_context(struct fuse_req *req) req->in.h.pid = current->pid; } +void fuse_set_initialized(struct fuse_conn *fc) +{ + /* Make sure stores before this are seen on another CPU */ + smp_wmb(); + fc->initialized = 1; +} + static bool fuse_block_alloc(struct fuse_conn *fc, bool for_background) { return !fc->initialized || (for_background && fc->blocked); @@ -155,6 +162,8 @@ static struct fuse_req *__fuse_get_req(struct fuse_conn *fc, unsigned npages, if (intr) goto out; } + /* Matches smp_wmb() in fuse_set_initialized() */ + smp_rmb(); err = -ENOTCONN; if (!fc->connected) @@ -253,6 +262,8 @@ struct fuse_req *fuse_get_req_nofail_nopages(struct fuse_conn *fc, atomic_inc(&fc->num_waiting); wait_event(fc->blocked_waitq, fc->initialized); + /* Matches smp_wmb() in fuse_set_initialized() */ + smp_rmb(); req = fuse_request_alloc(0); if (!req) req = get_reserved_req(fc, file); @@ -511,6 +522,39 @@ void fuse_request_send(struct fuse_conn *fc, struct fuse_req *req) } EXPORT_SYMBOL_GPL(fuse_request_send); +static void fuse_adjust_compat(struct fuse_conn *fc, struct fuse_args *args) +{ + if (fc->minor < 4 && args->in.h.opcode == FUSE_STATFS) + args->out.args[0].size = FUSE_COMPAT_STATFS_SIZE; + + if (fc->minor < 9) { + switch (args->in.h.opcode) { + case FUSE_LOOKUP: + case FUSE_CREATE: + case FUSE_MKNOD: + case FUSE_MKDIR: + case FUSE_SYMLINK: + case FUSE_LINK: + args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; + break; + case FUSE_GETATTR: + case FUSE_SETATTR: + args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; + break; + } + } + if (fc->minor < 12) { + switch (args->in.h.opcode) { + case FUSE_CREATE: + args->in.args[0].size = sizeof(struct fuse_open_in); + break; + case FUSE_MKNOD: + args->in.args[0].size = FUSE_COMPAT_MKNOD_IN_SIZE; + break; + } + } +} + ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) { struct fuse_req *req; @@ -520,6 +564,9 @@ ssize_t fuse_simple_request(struct fuse_conn *fc, struct fuse_args *args) if (IS_ERR(req)) return PTR_ERR(req); + /* Needs to be done after fuse_get_req() so that fc->minor is valid */ + fuse_adjust_compat(fc, args); + req->in.h.opcode = args->in.h.opcode; req->in.h.nodeid = args->in.h.nodeid; req->in.numargs = args->in.numargs; @@ -2127,7 +2174,7 @@ void fuse_abort_conn(struct fuse_conn *fc) if (fc->connected) { fc->connected = 0; fc->blocked = 0; - fc->initialized = 1; + fuse_set_initialized(fc); end_io_requests(fc); end_queued_requests(fc); end_polls(fc); @@ -2146,7 +2193,7 @@ int fuse_dev_release(struct inode *inode, struct file *file) spin_lock(&fc->lock); fc->connected = 0; fc->blocked = 0; - fc->initialized = 1; + fuse_set_initialized(fc); end_queued_requests(fc); end_polls(fc); wake_up_all(&fc->blocked_waitq); diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index 252b8a5..08e7b1a 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -156,10 +156,7 @@ static void fuse_lookup_init(struct fuse_conn *fc, struct fuse_args *args, args->in.args[0].size = name->len + 1; args->in.args[0].value = name->name; args->out.numargs = 1; - if (fc->minor < 9) - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; - else - args->out.args[0].size = sizeof(struct fuse_entry_out); + args->out.args[0].size = sizeof(struct fuse_entry_out); args->out.args[0].value = outarg; } @@ -422,16 +419,12 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, args.in.h.opcode = FUSE_CREATE; args.in.h.nodeid = get_node_id(dir); args.in.numargs = 2; - args.in.args[0].size = fc->minor < 12 ? sizeof(struct fuse_open_in) : - sizeof(inarg); + args.in.args[0].size = sizeof(inarg); args.in.args[0].value = &inarg; args.in.args[1].size = entry->d_name.len + 1; args.in.args[1].value = entry->d_name.name; args.out.numargs = 2; - if (fc->minor < 9) - args.out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; - else - args.out.args[0].size = sizeof(outentry); + args.out.args[0].size = sizeof(outentry); args.out.args[0].value = &outentry; args.out.args[1].size = sizeof(outopen); args.out.args[1].value = &outopen; @@ -539,10 +532,7 @@ static int create_new_entry(struct fuse_conn *fc, struct fuse_args *args, memset(&outarg, 0, sizeof(outarg)); args->in.h.nodeid = get_node_id(dir); args->out.numargs = 1; - if (fc->minor < 9) - args->out.args[0].size = FUSE_COMPAT_ENTRY_OUT_SIZE; - else - args->out.args[0].size = sizeof(outarg); + args->out.args[0].size = sizeof(outarg); args->out.args[0].value = &outarg; err = fuse_simple_request(fc, args); if (err) @@ -592,8 +582,7 @@ static int fuse_mknod(struct inode *dir, struct dentry *entry, umode_t mode, inarg.umask = current_umask(); args.in.h.opcode = FUSE_MKNOD; args.in.numargs = 2; - args.in.args[0].size = fc->minor < 12 ? FUSE_COMPAT_MKNOD_IN_SIZE : - sizeof(inarg); + args.in.args[0].size = sizeof(inarg); args.in.args[0].value = &inarg; args.in.args[1].size = entry->d_name.len + 1; args.in.args[1].value = entry->d_name.name; @@ -899,10 +888,7 @@ static int fuse_do_getattr(struct inode *inode, struct kstat *stat, args.in.args[0].size = sizeof(inarg); args.in.args[0].value = &inarg; args.out.numargs = 1; - if (fc->minor < 9) - args.out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; - else - args.out.args[0].size = sizeof(outarg); + args.out.args[0].size = sizeof(outarg); args.out.args[0].value = &outarg; err = fuse_simple_request(fc, &args); if (!err) { @@ -1574,10 +1560,7 @@ static void fuse_setattr_fill(struct fuse_conn *fc, struct fuse_args *args, args->in.args[0].size = sizeof(*inarg_p); args->in.args[0].value = inarg_p; args->out.numargs = 1; - if (fc->minor < 9) - args->out.args[0].size = FUSE_COMPAT_ATTR_OUT_SIZE; - else - args->out.args[0].size = sizeof(*outarg_p); + args->out.args[0].size = sizeof(*outarg_p); args->out.args[0].value = outarg_p; } diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h index e0fc672..1cdfb07 100644 --- a/fs/fuse/fuse_i.h +++ b/fs/fuse/fuse_i.h @@ -906,4 +906,6 @@ int fuse_write_inode(struct inode *inode, struct writeback_control *wbc); int fuse_do_setattr(struct inode *inode, struct iattr *attr, struct file *file); +void fuse_set_initialized(struct fuse_conn *fc); + #endif /* _FS_FUSE_I_H */ diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c index 6749109..f38256e 100644 --- a/fs/fuse/inode.c +++ b/fs/fuse/inode.c @@ -424,8 +424,7 @@ static int fuse_statfs(struct dentry *dentry, struct kstatfs *buf) args.in.h.opcode = FUSE_STATFS; args.in.h.nodeid = get_node_id(dentry->d_inode); args.out.numargs = 1; - args.out.args[0].size = - fc->minor < 4 ? FUSE_COMPAT_STATFS_SIZE : sizeof(outarg); + args.out.args[0].size = sizeof(outarg); args.out.args[0].value = &outarg; err = fuse_simple_request(fc, &args); if (!err) @@ -898,7 +897,7 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req) fc->max_write = max_t(unsigned, 4096, fc->max_write); fc->conn_init = 1; } - fc->initialized = 1; + fuse_set_initialized(fc); wake_up_all(&fc->blocked_waitq); } diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c index 37989f0..2d881b3 100644 --- a/fs/kernfs/dir.c +++ b/fs/kernfs/dir.c @@ -201,10 +201,14 @@ static unsigned int kernfs_name_hash(const char *name, const void *ns) static int kernfs_name_compare(unsigned int hash, const char *name, const void *ns, const struct kernfs_node *kn) { - if (hash != kn->hash) - return hash - kn->hash; - if (ns != kn->ns) - return ns - kn->ns; + if (hash < kn->hash) + return -1; + if (hash > kn->hash) + return 1; + if (ns < kn->ns) + return -1; + if (ns > kn->ns) + return 1; return strcmp(name, kn->name); } diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c index e94c887..55505cb 100644 --- a/fs/lockd/svc.c +++ b/fs/lockd/svc.c @@ -138,10 +138,6 @@ lockd(void *vrqstp) dprintk("NFS locking service started (ver " LOCKD_VERSION ").\n"); - if (!nlm_timeout) - nlm_timeout = LOCKD_DFLT_TIMEO; - nlmsvc_timeout = nlm_timeout * HZ; - /* * The main request loop. We don't terminate until the last * NFS mount or NFS daemon has gone away. @@ -350,6 +346,10 @@ static struct svc_serv *lockd_create_svc(void) printk(KERN_WARNING "lockd_up: no pid, %d users??\n", nlmsvc_users); + if (!nlm_timeout) + nlm_timeout = LOCKD_DFLT_TIMEO; + nlmsvc_timeout = nlm_timeout * HZ; + serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, svc_rpcb_cleanup); if (!serv) { printk(KERN_WARNING "lockd_up: create service failed\n"); @@ -1702,7 +1702,7 @@ static int generic_delete_lease(struct file *filp) break; } trace_generic_delete_lease(inode, fl); - if (fl) + if (fl && IS_LEASE(fl)) error = fl->fl_lmops->lm_change(before, F_UNLCK, &dispose); spin_unlock(&inode->i_lock); locks_dispose_list(&dispose); diff --git a/fs/nfs/nfs4client.c b/fs/nfs/nfs4client.c index 0331125..953daa4 100644 --- a/fs/nfs/nfs4client.c +++ b/fs/nfs/nfs4client.c @@ -228,6 +228,7 @@ static void nfs4_shutdown_client(struct nfs_client *clp) kfree(clp->cl_serverowner); kfree(clp->cl_serverscope); kfree(clp->cl_implid); + kfree(clp->cl_owner_id); } void nfs4_free_client(struct nfs_client *clp) @@ -452,6 +453,14 @@ static void nfs4_swap_callback_idents(struct nfs_client *keep, spin_unlock(&nn->nfs_client_lock); } +static bool nfs4_match_client_owner_id(const struct nfs_client *clp1, + const struct nfs_client *clp2) +{ + if (clp1->cl_owner_id == NULL || clp2->cl_owner_id == NULL) + return true; + return strcmp(clp1->cl_owner_id, clp2->cl_owner_id) == 0; +} + /** * nfs40_walk_client_list - Find server that recognizes a client ID * @@ -483,9 +492,6 @@ int nfs40_walk_client_list(struct nfs_client *new, if (pos->rpc_ops != new->rpc_ops) continue; - if (pos->cl_proto != new->cl_proto) - continue; - if (pos->cl_minorversion != new->cl_minorversion) continue; @@ -510,6 +516,9 @@ int nfs40_walk_client_list(struct nfs_client *new, if (pos->cl_clientid != new->cl_clientid) continue; + if (!nfs4_match_client_owner_id(pos, new)) + continue; + atomic_inc(&pos->cl_count); spin_unlock(&nn->nfs_client_lock); @@ -566,20 +575,14 @@ static bool nfs4_match_clientids(struct nfs_client *a, struct nfs_client *b) } /* - * Returns true if the server owners match + * Returns true if the server major ids match */ static bool -nfs4_match_serverowners(struct nfs_client *a, struct nfs_client *b) +nfs4_check_clientid_trunking(struct nfs_client *a, struct nfs_client *b) { struct nfs41_server_owner *o1 = a->cl_serverowner; struct nfs41_server_owner *o2 = b->cl_serverowner; - if (o1->minor_id != o2->minor_id) { - dprintk("NFS: --> %s server owner minor IDs do not match\n", - __func__); - return false; - } - if (o1->major_id_sz != o2->major_id_sz) goto out_major_mismatch; if (memcmp(o1->major_id, o2->major_id, o1->major_id_sz) != 0) @@ -621,9 +624,6 @@ int nfs41_walk_client_list(struct nfs_client *new, if (pos->rpc_ops != new->rpc_ops) continue; - if (pos->cl_proto != new->cl_proto) - continue; - if (pos->cl_minorversion != new->cl_minorversion) continue; @@ -654,7 +654,19 @@ int nfs41_walk_client_list(struct nfs_client *new, if (!nfs4_match_clientids(pos, new)) continue; - if (!nfs4_match_serverowners(pos, new)) + /* + * Note that session trunking is just a special subcase of + * client id trunking. In either case, we want to fall back + * to using the existing nfs_client. + */ + if (!nfs4_check_clientid_trunking(pos, new)) + continue; + + /* Unlike NFSv4.0, we know that NFSv4.1 always uses the + * uniform string, however someone might switch the + * uniquifier string on us. + */ + if (!nfs4_match_client_owner_id(pos, new)) continue; atomic_inc(&pos->cl_count); diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index e7f8d5f..c347705 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1117,8 +1117,6 @@ static int can_open_delegated(struct nfs_delegation *delegation, fmode_t fmode) return 0; if ((delegation->type & fmode) != fmode) return 0; - if (test_bit(NFS_DELEGATION_NEED_RECLAIM, &delegation->flags)) - return 0; if (test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) return 0; nfs_mark_delegation_referenced(delegation); @@ -4917,11 +4915,14 @@ static void nfs4_init_boot_verifier(const struct nfs_client *clp, } static unsigned int -nfs4_init_nonuniform_client_string(const struct nfs_client *clp, +nfs4_init_nonuniform_client_string(struct nfs_client *clp, char *buf, size_t len) { unsigned int result; + if (clp->cl_owner_id != NULL) + return strlcpy(buf, clp->cl_owner_id, len); + rcu_read_lock(); result = scnprintf(buf, len, "Linux NFSv4.0 %s/%s %s", clp->cl_ipaddr, @@ -4930,24 +4931,32 @@ nfs4_init_nonuniform_client_string(const struct nfs_client *clp, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_PROTO)); rcu_read_unlock(); + clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); return result; } static unsigned int -nfs4_init_uniform_client_string(const struct nfs_client *clp, +nfs4_init_uniform_client_string(struct nfs_client *clp, char *buf, size_t len) { const char *nodename = clp->cl_rpcclient->cl_nodename; + unsigned int result; + + if (clp->cl_owner_id != NULL) + return strlcpy(buf, clp->cl_owner_id, len); if (nfs4_client_id_uniquifier[0] != '\0') - return scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", + result = scnprintf(buf, len, "Linux NFSv%u.%u %s/%s", clp->rpc_ops->version, clp->cl_minorversion, nfs4_client_id_uniquifier, nodename); - return scnprintf(buf, len, "Linux NFSv%u.%u %s", + else + result = scnprintf(buf, len, "Linux NFSv%u.%u %s", clp->rpc_ops->version, clp->cl_minorversion, nodename); + clp->cl_owner_id = kstrdup(buf, GFP_KERNEL); + return result; } /* diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 3550a9c..c06a1ba 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -3897,11 +3897,11 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh, status = nfs4_setlease(dp); goto out; } - atomic_inc(&fp->fi_delegees); if (fp->fi_had_conflict) { status = -EAGAIN; goto out_unlock; } + atomic_inc(&fp->fi_delegees); hash_delegation_locked(dp, fp); status = 0; out_unlock: diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index c991616..bff8567 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -259,16 +259,15 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, struct fsnotify_event *kevent; char __user *start; int ret; - DEFINE_WAIT(wait); + DEFINE_WAIT_FUNC(wait, woken_wake_function); start = buf; group = file->private_data; pr_debug("%s: group=%p\n", __func__, group); + add_wait_queue(&group->notification_waitq, &wait); while (1) { - prepare_to_wait(&group->notification_waitq, &wait, TASK_INTERRUPTIBLE); - mutex_lock(&group->notification_mutex); kevent = get_one_event(group, count); mutex_unlock(&group->notification_mutex); @@ -289,7 +288,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, if (start != buf) break; - schedule(); + + wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT); continue; } @@ -318,8 +318,8 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, buf += ret; count -= ret; } + remove_wait_queue(&group->notification_waitq, &wait); - finish_wait(&group->notification_waitq, &wait); if (start != buf && ret != -EFAULT) ret = buf - start; return ret; diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index 79b5af5..cecd875 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c @@ -2023,11 +2023,8 @@ leave: dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); - if (ret < 0) { + if (ret < 0) mlog_errno(ret); - if (newlock) - dlm_lock_put(newlock); - } return ret; } diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c index b931e04..914c121 100644 --- a/fs/ocfs2/namei.c +++ b/fs/ocfs2/namei.c @@ -94,6 +94,14 @@ static int ocfs2_create_symlink_data(struct ocfs2_super *osb, struct inode *inode, const char *symname); +static int ocfs2_double_lock(struct ocfs2_super *osb, + struct buffer_head **bh1, + struct inode *inode1, + struct buffer_head **bh2, + struct inode *inode2, + int rename); + +static void ocfs2_double_unlock(struct inode *inode1, struct inode *inode2); /* An orphan dir name is an 8 byte value, printed as a hex string */ #define OCFS2_ORPHAN_NAMELEN ((int)(2 * sizeof(u64))) @@ -678,8 +686,10 @@ static int ocfs2_link(struct dentry *old_dentry, { handle_t *handle; struct inode *inode = old_dentry->d_inode; + struct inode *old_dir = old_dentry->d_parent->d_inode; int err; struct buffer_head *fe_bh = NULL; + struct buffer_head *old_dir_bh = NULL; struct buffer_head *parent_fe_bh = NULL; struct ocfs2_dinode *fe = NULL; struct ocfs2_super *osb = OCFS2_SB(dir->i_sb); @@ -696,19 +706,33 @@ static int ocfs2_link(struct dentry *old_dentry, dquot_initialize(dir); - err = ocfs2_inode_lock_nested(dir, &parent_fe_bh, 1, OI_LS_PARENT); + err = ocfs2_double_lock(osb, &old_dir_bh, old_dir, + &parent_fe_bh, dir, 0); if (err < 0) { if (err != -ENOENT) mlog_errno(err); return err; } + /* make sure both dirs have bhs + * get an extra ref on old_dir_bh if old==new */ + if (!parent_fe_bh) { + if (old_dir_bh) { + parent_fe_bh = old_dir_bh; + get_bh(parent_fe_bh); + } else { + mlog(ML_ERROR, "%s: no old_dir_bh!\n", osb->uuid_str); + err = -EIO; + goto out; + } + } + if (!dir->i_nlink) { err = -ENOENT; goto out; } - err = ocfs2_lookup_ino_from_name(dir, old_dentry->d_name.name, + err = ocfs2_lookup_ino_from_name(old_dir, old_dentry->d_name.name, old_dentry->d_name.len, &old_de_ino); if (err) { err = -ENOENT; @@ -801,10 +825,11 @@ out_unlock_inode: ocfs2_inode_unlock(inode, 1); out: - ocfs2_inode_unlock(dir, 1); + ocfs2_double_unlock(old_dir, dir); brelse(fe_bh); brelse(parent_fe_bh); + brelse(old_dir_bh); ocfs2_free_dir_lookup_result(&lookup); @@ -1072,14 +1097,15 @@ static int ocfs2_check_if_ancestor(struct ocfs2_super *osb, } /* - * The only place this should be used is rename! + * The only place this should be used is rename and link! * if they have the same id, then the 1st one is the only one locked. */ static int ocfs2_double_lock(struct ocfs2_super *osb, struct buffer_head **bh1, struct inode *inode1, struct buffer_head **bh2, - struct inode *inode2) + struct inode *inode2, + int rename) { int status; int inode1_is_ancestor, inode2_is_ancestor; @@ -1127,7 +1153,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, } /* lock id2 */ status = ocfs2_inode_lock_nested(inode2, bh2, 1, - OI_LS_RENAME1); + rename == 1 ? OI_LS_RENAME1 : OI_LS_PARENT); if (status < 0) { if (status != -ENOENT) mlog_errno(status); @@ -1136,7 +1162,8 @@ static int ocfs2_double_lock(struct ocfs2_super *osb, } /* lock id1 */ - status = ocfs2_inode_lock_nested(inode1, bh1, 1, OI_LS_RENAME2); + status = ocfs2_inode_lock_nested(inode1, bh1, 1, + rename == 1 ? OI_LS_RENAME2 : OI_LS_PARENT); if (status < 0) { /* * An error return must mean that no cluster locks @@ -1252,7 +1279,7 @@ static int ocfs2_rename(struct inode *old_dir, /* if old and new are the same, this'll just do one lock. */ status = ocfs2_double_lock(osb, &old_dir_bh, old_dir, - &new_dir_bh, new_dir); + &new_dir_bh, new_dir, 1); if (status < 0) { mlog_errno(status); goto bail; diff --git a/include/acpi/processor.h b/include/acpi/processor.h index 3ca9b75..b95dc32 100644 --- a/include/acpi/processor.h +++ b/include/acpi/processor.h @@ -196,8 +196,8 @@ struct acpi_processor_flags { struct acpi_processor { acpi_handle handle; u32 acpi_id; - u32 apic_id; - u32 id; + u32 phys_id; /* CPU hardware ID such as APIC ID for x86 */ + u32 id; /* CPU logical ID allocated by OS */ u32 pblk; int performance_platform_limit; int throttling_platform_limit; @@ -310,8 +310,8 @@ static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit) #endif /* CONFIG_CPU_FREQ */ /* in processor_core.c */ -int acpi_get_apicid(acpi_handle, int type, u32 acpi_id); -int acpi_map_cpuid(int apic_id, u32 acpi_id); +int acpi_get_phys_id(acpi_handle, int type, u32 acpi_id); +int acpi_map_cpuid(int phys_id, u32 acpi_id); int acpi_get_cpuid(acpi_handle, int type, u32 acpi_id); /* in processor_pdc.c */ diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h index 0884805..db284bf 100644 --- a/include/asm-generic/tlb.h +++ b/include/asm-generic/tlb.h @@ -136,8 +136,12 @@ static inline void __tlb_adjust_range(struct mmu_gather *tlb, static inline void __tlb_reset_range(struct mmu_gather *tlb) { - tlb->start = TASK_SIZE; - tlb->end = 0; + if (tlb->fullmm) { + tlb->start = tlb->end = ~0; + } else { + tlb->start = TASK_SIZE; + tlb->end = 0; + } } /* diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 856d381..d459cd1 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -147,8 +147,8 @@ void acpi_numa_arch_fixup(void); #ifdef CONFIG_ACPI_HOTPLUG_CPU /* Arch dependent functions for cpu hotplug support */ -int acpi_map_lsapic(acpi_handle handle, int physid, int *pcpu); -int acpi_unmap_lsapic(int cpu); +int acpi_map_cpu(acpi_handle handle, int physid, int *pcpu); +int acpi_unmap_cpu(int cpu); #endif /* CONFIG_ACPI_HOTPLUG_CPU */ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base); diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 8aded9a..5735e71 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -34,7 +34,6 @@ struct blk_mq_hw_ctx { unsigned long flags; /* BLK_MQ_F_* flags */ struct request_queue *queue; - unsigned int queue_num; struct blk_flush_queue *fq; void *driver_data; @@ -54,7 +53,7 @@ struct blk_mq_hw_ctx { unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; unsigned int numa_node; - unsigned int cmd_size; /* per-request extra data */ + unsigned int queue_num; atomic_t nr_active; @@ -195,13 +194,16 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag) struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int); +int blk_mq_request_started(struct request *rq); void blk_mq_start_request(struct request *rq); void blk_mq_end_request(struct request *rq, int error); void __blk_mq_end_request(struct request *rq, int error); void blk_mq_requeue_request(struct request *rq); void blk_mq_add_to_requeue_list(struct request *rq, bool at_head); +void blk_mq_cancel_requeue_work(struct request_queue *q); void blk_mq_kick_requeue_list(struct request_queue *q); +void blk_mq_abort_requeue_list(struct request_queue *q); void blk_mq_complete_request(struct request *rq); void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); @@ -212,6 +214,8 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async); void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs); void blk_mq_tag_busy_iter(struct blk_mq_hw_ctx *hctx, busy_iter_fn *fn, void *priv); +void blk_mq_unfreeze_queue(struct request_queue *q); +void blk_mq_freeze_queue_start(struct request_queue *q); /* * Driver command data is immediately after the request. So subtract request diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 445d592..c294e3e 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -190,6 +190,7 @@ enum rq_flag_bits { __REQ_PM, /* runtime pm request */ __REQ_HASHED, /* on IO scheduler merge hash */ __REQ_MQ_INFLIGHT, /* track inflight for MQ */ + __REQ_NO_TIMEOUT, /* requests may never expire */ __REQ_NR_BITS, /* stops here */ }; @@ -243,5 +244,6 @@ enum rq_flag_bits { #define REQ_PM (1ULL << __REQ_PM) #define REQ_HASHED (1ULL << __REQ_HASHED) #define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT) +#define REQ_NO_TIMEOUT (1ULL << __REQ_NO_TIMEOUT) #endif /* __LINUX_BLK_TYPES_H */ diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h index 5d86416..61b19c4 100644 --- a/include/linux/ceph/osd_client.h +++ b/include/linux/ceph/osd_client.h @@ -87,8 +87,8 @@ struct ceph_osd_req_op { struct ceph_osd_data osd_data; } extent; struct { - __le32 name_len; - __le32 value_len; + u32 name_len; + u32 value_len; __u8 cmp_op; /* CEPH_OSD_CMPXATTR_OP_* */ __u8 cmp_mode; /* CEPH_OSD_CMPXATTR_MODE_* */ struct ceph_osd_data osd_data; diff --git a/include/linux/compiler.h b/include/linux/compiler.h index a1c81f8..33063f8 100644 --- a/include/linux/compiler.h +++ b/include/linux/compiler.h @@ -215,7 +215,7 @@ static __always_inline void __read_once_size(volatile void *p, void *res, int si } } -static __always_inline void __assign_once_size(volatile void *p, void *res, int size) +static __always_inline void __write_once_size(volatile void *p, void *res, int size) { switch (size) { case 1: *(volatile __u8 *)p = *(__u8 *)res; break; @@ -235,15 +235,15 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int /* * Prevent the compiler from merging or refetching reads or writes. The * compiler is also forbidden from reordering successive instances of - * READ_ONCE, ASSIGN_ONCE and ACCESS_ONCE (see below), but only when the + * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the * compiler is aware of some particular ordering. One way to make the * compiler aware of ordering is to put the two invocations of READ_ONCE, - * ASSIGN_ONCE or ACCESS_ONCE() in different C statements. + * WRITE_ONCE or ACCESS_ONCE() in different C statements. * * In contrast to ACCESS_ONCE these two macros will also work on aggregate * data types like structs or unions. If the size of the accessed data * type exceeds the word size of the machine (e.g., 32 bits or 64 bits) - * READ_ONCE() and ASSIGN_ONCE() will fall back to memcpy and print a + * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a * compile-time warning. * * Their two major use cases are: (1) Mediating communication between @@ -257,8 +257,8 @@ static __always_inline void __assign_once_size(volatile void *p, void *res, int #define READ_ONCE(x) \ ({ typeof(x) __val; __read_once_size(&x, &__val, sizeof(__val)); __val; }) -#define ASSIGN_ONCE(val, x) \ - ({ typeof(x) __val; __val = val; __assign_once_size(&x, &__val, sizeof(__val)); __val; }) +#define WRITE_ONCE(x, val) \ + ({ typeof(x) __val; __val = val; __write_once_size(&x, &__val, sizeof(__val)); __val; }) #endif /* __KERNEL__ */ diff --git a/include/linux/fs.h b/include/linux/fs.h index f90c028..42efe13 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -135,7 +135,7 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, #define FMODE_CAN_WRITE ((__force fmode_t)0x40000) /* File was opened by fanotify and shouldn't generate fanotify events */ -#define FMODE_NONOTIFY ((__force fmode_t)0x1000000) +#define FMODE_NONOTIFY ((__force fmode_t)0x4000000) /* * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h index 55b6857..09460d6 100644 --- a/include/linux/genetlink.h +++ b/include/linux/genetlink.h @@ -11,6 +11,10 @@ extern void genl_unlock(void); extern int lockdep_genl_is_held(void); #endif +/* for synchronisation between af_netlink and genetlink */ +extern atomic_t genl_sk_destructing_cnt; +extern wait_queue_head_t genl_sk_destructing_waitq; + /** * rcu_dereference_genl - rcu_dereference with debug checking * @p: The pointer to read, prior to dereferencing diff --git a/include/linux/kdb.h b/include/linux/kdb.h index 290db12..75ae2e2 100644 --- a/include/linux/kdb.h +++ b/include/linux/kdb.h @@ -13,11 +13,54 @@ * Copyright (C) 2009 Jason Wessel <jason.wessel@windriver.com> */ +/* Shifted versions of the command enable bits are be used if the command + * has no arguments (see kdb_check_flags). This allows commands, such as + * go, to have different permissions depending upon whether it is called + * with an argument. + */ +#define KDB_ENABLE_NO_ARGS_SHIFT 10 + typedef enum { - KDB_REPEAT_NONE = 0, /* Do not repeat this command */ - KDB_REPEAT_NO_ARGS, /* Repeat the command without arguments */ - KDB_REPEAT_WITH_ARGS, /* Repeat the command including its arguments */ -} kdb_repeat_t; + KDB_ENABLE_ALL = (1 << 0), /* Enable everything */ + KDB_ENABLE_MEM_READ = (1 << 1), + KDB_ENABLE_MEM_WRITE = (1 << 2), + KDB_ENABLE_REG_READ = (1 << 3), + KDB_ENABLE_REG_WRITE = (1 << 4), + KDB_ENABLE_INSPECT = (1 << 5), + KDB_ENABLE_FLOW_CTRL = (1 << 6), + KDB_ENABLE_SIGNAL = (1 << 7), + KDB_ENABLE_REBOOT = (1 << 8), + /* User exposed values stop here, all remaining flags are + * exclusively used to describe a commands behaviour. + */ + + KDB_ENABLE_ALWAYS_SAFE = (1 << 9), + KDB_ENABLE_MASK = (1 << KDB_ENABLE_NO_ARGS_SHIFT) - 1, + + KDB_ENABLE_ALL_NO_ARGS = KDB_ENABLE_ALL << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MEM_READ_NO_ARGS = KDB_ENABLE_MEM_READ + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MEM_WRITE_NO_ARGS = KDB_ENABLE_MEM_WRITE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REG_READ_NO_ARGS = KDB_ENABLE_REG_READ + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REG_WRITE_NO_ARGS = KDB_ENABLE_REG_WRITE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_INSPECT_NO_ARGS = KDB_ENABLE_INSPECT + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_FLOW_CTRL_NO_ARGS = KDB_ENABLE_FLOW_CTRL + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_SIGNAL_NO_ARGS = KDB_ENABLE_SIGNAL + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_REBOOT_NO_ARGS = KDB_ENABLE_REBOOT + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS = KDB_ENABLE_ALWAYS_SAFE + << KDB_ENABLE_NO_ARGS_SHIFT, + KDB_ENABLE_MASK_NO_ARGS = KDB_ENABLE_MASK << KDB_ENABLE_NO_ARGS_SHIFT, + + KDB_REPEAT_NO_ARGS = 0x40000000, /* Repeat the command w/o arguments */ + KDB_REPEAT_WITH_ARGS = 0x80000000, /* Repeat the command with args */ +} kdb_cmdflags_t; typedef int (*kdb_func_t)(int, const char **); @@ -62,6 +105,7 @@ extern atomic_t kdb_event; #define KDB_BADLENGTH (-19) #define KDB_NOBP (-20) #define KDB_BADADDR (-21) +#define KDB_NOPERM (-22) /* * kdb_diemsg @@ -146,17 +190,17 @@ static inline const char *kdb_walk_kallsyms(loff_t *pos) /* Dynamic kdb shell command registration */ extern int kdb_register(char *, kdb_func_t, char *, char *, short); -extern int kdb_register_repeat(char *, kdb_func_t, char *, char *, - short, kdb_repeat_t); +extern int kdb_register_flags(char *, kdb_func_t, char *, char *, + short, kdb_cmdflags_t); extern int kdb_unregister(char *); #else /* ! CONFIG_KGDB_KDB */ static inline __printf(1, 2) int kdb_printf(const char *fmt, ...) { return 0; } static inline void kdb_init(int level) {} static inline int kdb_register(char *cmd, kdb_func_t func, char *usage, char *help, short minlen) { return 0; } -static inline int kdb_register_repeat(char *cmd, kdb_func_t func, char *usage, - char *help, short minlen, - kdb_repeat_t repeat) { return 0; } +static inline int kdb_register_flags(char *cmd, kdb_func_t func, char *usage, + char *help, short minlen, + kdb_cmdflags_t flags) { return 0; } static inline int kdb_unregister(char *cmd) { return 0; } #endif /* CONFIG_KGDB_KDB */ enum { diff --git a/include/linux/libata.h b/include/linux/libata.h index 2d18241..91f705d 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -231,6 +231,7 @@ enum { ATA_FLAG_SW_ACTIVITY = (1 << 22), /* driver supports sw activity * led */ ATA_FLAG_NO_DIPM = (1 << 23), /* host not happy with DIPM */ + ATA_FLAG_LOWTAG = (1 << 24), /* host wants lowest available tag */ /* bits 24:31 of ap->flags are reserved for LLD specific flags */ @@ -422,6 +423,7 @@ enum { ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */ ATA_HORKAGE_NOLPM = (1 << 20), /* don't use LPM */ ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ + ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/mm.h b/include/linux/mm.h index f80d019..80fc92a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1952,7 +1952,7 @@ extern int expand_downwards(struct vm_area_struct *vma, #if VM_GROWSUP extern int expand_upwards(struct vm_area_struct *vma, unsigned long address); #else - #define expand_upwards(vma, address) do { } while (0) + #define expand_upwards(vma, address) (0) #endif /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */ diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h index 375af80..f767a0d 100644 --- a/include/linux/mmc/sdhci.h +++ b/include/linux/mmc/sdhci.h @@ -137,6 +137,7 @@ struct sdhci_host { #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ #define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ +#define SDHCI_HS400_TUNING (1<<13) /* Tuning for HS400 */ unsigned int version; /* SDHCI spec. version */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index 679e6e90..52fd8e8 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -852,11 +852,11 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev, * 3. Update dev->stats asynchronously and atomically, and define * neither operation. * - * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16t vid); + * int (*ndo_vlan_rx_add_vid)(struct net_device *dev, __be16 proto, u16 vid); * If device support VLAN filtering this function is called when a * VLAN id is registered. * - * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, unsigned short vid); + * int (*ndo_vlan_rx_kill_vid)(struct net_device *dev, __be16 proto, u16 vid); * If device support VLAN filtering this function is called when a * VLAN id is unregistered. * @@ -2085,7 +2085,7 @@ extern rwlock_t dev_base_lock; /* Device list lock */ list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list) #define for_each_netdev_in_bond_rcu(bond, slave) \ for_each_netdev_rcu(&init_net, slave) \ - if (netdev_master_upper_dev_get_rcu(slave) == bond) + if (netdev_master_upper_dev_get_rcu(slave) == (bond)) #define net_device_entry(lh) list_entry(lh, struct net_device, dev_list) static inline struct net_device *next_net_device(struct net_device *dev) diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h index 1e37fbb..ddea982 100644 --- a/include/linux/nfs_fs_sb.h +++ b/include/linux/nfs_fs_sb.h @@ -74,6 +74,9 @@ struct nfs_client { /* idmapper */ struct idmap * cl_idmap; + /* Client owner identifier */ + const char * cl_owner_id; + /* Our own IP address, as a null-terminated string. * This is used to generate the mv0 callback address. */ diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 486e84c..4f7a61c 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -79,11 +79,6 @@ struct perf_branch_stack { struct perf_branch_entry entries[0]; }; -struct perf_regs { - __u64 abi; - struct pt_regs *regs; -}; - struct task_struct; /* @@ -610,7 +605,14 @@ struct perf_sample_data { u32 reserved; } cpu_entry; struct perf_callchain_entry *callchain; + + /* + * regs_user may point to task_pt_regs or to regs_user_copy, depending + * on arch details. + */ struct perf_regs regs_user; + struct pt_regs regs_user_copy; + struct perf_regs regs_intr; u64 stack_user_size; } ____cacheline_aligned; diff --git a/include/linux/perf_regs.h b/include/linux/perf_regs.h index 3c73d5f..a5f98d5 100644 --- a/include/linux/perf_regs.h +++ b/include/linux/perf_regs.h @@ -1,11 +1,19 @@ #ifndef _LINUX_PERF_REGS_H #define _LINUX_PERF_REGS_H +struct perf_regs { + __u64 abi; + struct pt_regs *regs; +}; + #ifdef CONFIG_HAVE_PERF_REGS #include <asm/perf_regs.h> u64 perf_reg_value(struct pt_regs *regs, int idx); int perf_reg_validate(u64 mask); u64 perf_reg_abi(struct task_struct *task); +void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy); #else static inline u64 perf_reg_value(struct pt_regs *regs, int idx) { @@ -21,5 +29,13 @@ static inline u64 perf_reg_abi(struct task_struct *task) { return PERF_SAMPLE_REGS_ABI_NONE; } + +static inline void perf_get_regs_user(struct perf_regs *regs_user, + struct pt_regs *regs, + struct pt_regs *regs_user_copy) +{ + regs_user->regs = task_pt_regs(current); + regs_user->abi = perf_reg_abi(current); +} #endif /* CONFIG_HAVE_PERF_REGS */ #endif /* _LINUX_PERF_REGS_H */ diff --git a/include/linux/phy/omap_control_phy.h b/include/linux/phy/omap_control_phy.h index e9e6cfb..eb7d4a1 100644 --- a/include/linux/phy/omap_control_phy.h +++ b/include/linux/phy/omap_control_phy.h @@ -66,7 +66,7 @@ enum omap_control_usb_mode { #define OMAP_CTRL_PIPE3_PHY_TX_RX_POWEROFF 0x0 #define OMAP_CTRL_PCIE_PCS_MASK 0xff -#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 0x8 +#define OMAP_CTRL_PCIE_PCS_DELAY_COUNT_SHIFT 16 #define OMAP_CTRL_USB2_PHY_PD BIT(28) @@ -79,7 +79,7 @@ enum omap_control_usb_mode { void omap_control_phy_power(struct device *dev, int on); void omap_control_usb_set_mode(struct device *dev, enum omap_control_usb_mode mode); -void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay); +void omap_control_pcie_pcs(struct device *dev, u8 delay); #else static inline void omap_control_phy_power(struct device *dev, int on) @@ -91,7 +91,7 @@ static inline void omap_control_usb_set_mode(struct device *dev, { } -static inline void omap_control_pcie_pcs(struct device *dev, u8 id, u8 delay) +static inline void omap_control_pcie_pcs(struct device *dev, u8 delay) { } #endif diff --git a/include/linux/rmap.h b/include/linux/rmap.h index c0c2bce..d9d7e7e 100644 --- a/include/linux/rmap.h +++ b/include/linux/rmap.h @@ -37,6 +37,16 @@ struct anon_vma { atomic_t refcount; /* + * Count of child anon_vmas and VMAs which points to this anon_vma. + * + * This counter is used for making decision about reusing anon_vma + * instead of forking new one. See comments in function anon_vma_clone. + */ + unsigned degree; + + struct anon_vma *parent; /* Parent of this anon_vma */ + + /* * NOTE: the LSB of the rb_root.rb_node is set by * mm_take_all_locks() _after_ taking the above lock. So the * rb_root must only be read/written after taking the above lock diff --git a/include/linux/writeback.h b/include/linux/writeback.h index a219be9..0004833 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -177,7 +177,6 @@ int write_cache_pages(struct address_space *mapping, struct writeback_control *wbc, writepage_t writepage, void *data); int do_writepages(struct address_space *mapping, struct writeback_control *wbc); -void set_page_dirty_balance(struct page *page); void writeback_set_ratelimit(void); void tag_pages_for_writeback(struct address_space *mapping, pgoff_t start, pgoff_t end); diff --git a/include/net/genetlink.h b/include/net/genetlink.h index 8412508..6c92415 100644 --- a/include/net/genetlink.h +++ b/include/net/genetlink.h @@ -27,13 +27,18 @@ struct genl_info; * @maxattr: maximum number of attributes supported * @netnsok: set to true if the family can handle network * namespaces and should be presented in all of them + * @parallel_ops: operations can be called in parallel and aren't + * synchronized by the core genetlink code * @pre_doit: called before an operation's doit callback, it may * do additional, common, filtering and return an error * @post_doit: called after an operation's doit callback, it may * undo operations done by pre_doit, for example release locks * @mcast_bind: a socket bound to the given multicast group (which * is given as the offset into the groups array) - * @mcast_unbind: a socket was unbound from the given multicast group + * @mcast_unbind: a socket was unbound from the given multicast group. + * Note that unbind() will not be called symmetrically if the + * generic netlink family is removed while there are still open + * sockets. * @attrbuf: buffer to store parsed attributes * @family_list: family list * @mcgrps: multicast groups used by this family (private) diff --git a/include/net/mac80211.h b/include/net/mac80211.h index 58d719d..29c7be8 100644 --- a/include/net/mac80211.h +++ b/include/net/mac80211.h @@ -1270,8 +1270,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * * @IEEE80211_KEY_FLAG_GENERATE_IV: This flag should be set by the * driver to indicate that it requires IV generation for this - * particular key. Setting this flag does not necessarily mean that SKBs - * will have sufficient tailroom for ICV or MIC. + * particular key. * @IEEE80211_KEY_FLAG_GENERATE_MMIC: This flag should be set by * the driver for a TKIP key if it requires Michael MIC * generation in software. @@ -1283,9 +1282,7 @@ struct ieee80211_vif *wdev_to_ieee80211_vif(struct wireless_dev *wdev); * @IEEE80211_KEY_FLAG_PUT_IV_SPACE: This flag should be set by the driver * if space should be prepared for the IV, but the IV * itself should not be generated. Do not set together with - * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. Setting this flag does - * not necessarily mean that SKBs will have sufficient tailroom for ICV or - * MIC. + * @IEEE80211_KEY_FLAG_GENERATE_IV on the same key. * @IEEE80211_KEY_FLAG_RX_MGMT: This key will be used to decrypt received * management frames. The flag can help drivers that have a hardware * crypto implementation that doesn't deal with management frames diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h index 430cfaf..db81c65 100644 --- a/include/target/target_core_backend.h +++ b/include/target/target_core_backend.h @@ -135,7 +135,6 @@ int se_dev_set_is_nonrot(struct se_device *, int); int se_dev_set_emulate_rest_reord(struct se_device *dev, int); int se_dev_set_queue_depth(struct se_device *, u32); int se_dev_set_max_sectors(struct se_device *, u32); -int se_dev_set_fabric_max_sectors(struct se_device *, u32); int se_dev_set_optimal_sectors(struct se_device *, u32); int se_dev_set_block_size(struct se_device *, u32); diff --git a/include/target/target_core_backend_configfs.h b/include/target/target_core_backend_configfs.h index 3247d75..186f7a9 100644 --- a/include/target/target_core_backend_configfs.h +++ b/include/target/target_core_backend_configfs.h @@ -98,8 +98,6 @@ static struct target_backend_dev_attrib_attribute _backend##_dev_attrib_##_name TB_DEV_ATTR(_backend, block_size, S_IRUGO | S_IWUSR); \ DEF_TB_DEV_ATTRIB_RO(_backend, hw_max_sectors); \ TB_DEV_ATTR_RO(_backend, hw_max_sectors); \ - DEF_TB_DEV_ATTRIB(_backend, fabric_max_sectors); \ - TB_DEV_ATTR(_backend, fabric_max_sectors, S_IRUGO | S_IWUSR); \ DEF_TB_DEV_ATTRIB(_backend, optimal_sectors); \ TB_DEV_ATTR(_backend, optimal_sectors, S_IRUGO | S_IWUSR); \ DEF_TB_DEV_ATTRIB_RO(_backend, hw_queue_depth); \ diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h index 397fb63..4a8795a 100644 --- a/include/target/target_core_base.h +++ b/include/target/target_core_base.h @@ -77,8 +77,6 @@ #define DA_UNMAP_GRANULARITY_ALIGNMENT_DEFAULT 0 /* Default max_write_same_len, disabled by default */ #define DA_MAX_WRITE_SAME_LEN 0 -/* Default max transfer length */ -#define DA_FABRIC_MAX_SECTORS 8192 /* Use a model alias based on the configfs backend device name */ #define DA_EMULATE_MODEL_ALIAS 0 /* Emulation for Direct Page Out */ @@ -694,7 +692,6 @@ struct se_dev_attrib { u32 hw_block_size; u32 block_size; u32 hw_max_sectors; - u32 fabric_max_sectors; u32 optimal_sectors; u32 hw_queue_depth; u32 queue_depth; diff --git a/include/uapi/asm-generic/fcntl.h b/include/uapi/asm-generic/fcntl.h index 7543b3e..e063eff 100644 --- a/include/uapi/asm-generic/fcntl.h +++ b/include/uapi/asm-generic/fcntl.h @@ -5,7 +5,7 @@ /* * FMODE_EXEC is 0x20 - * FMODE_NONOTIFY is 0x1000000 + * FMODE_NONOTIFY is 0x4000000 * These cannot be used by userspace O_* until internal and external open * flags are split. * -Eric Paris diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h index 3e4323a..94ffe0c 100644 --- a/include/uapi/linux/can/netlink.h +++ b/include/uapi/linux/can/netlink.h @@ -98,6 +98,7 @@ struct can_ctrlmode { #define CAN_CTRLMODE_BERR_REPORTING 0x10 /* Bus-error reporting */ #define CAN_CTRLMODE_FD 0x20 /* CAN FD mode */ #define CAN_CTRLMODE_PRESUME_ACK 0x40 /* Ignore missing CAN ACKs */ +#define CAN_CTRLMODE_FD_NON_ISO 0x80 /* CAN FD in non-ISO mode */ /* * CAN device statistics diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h index 7acef41..af94f31 100644 --- a/include/uapi/linux/kfd_ioctl.h +++ b/include/uapi/linux/kfd_ioctl.h @@ -128,27 +128,34 @@ struct kfd_ioctl_get_process_apertures_args { uint32_t pad; }; -#define KFD_IOC_MAGIC 'K' +#define AMDKFD_IOCTL_BASE 'K' +#define AMDKFD_IO(nr) _IO(AMDKFD_IOCTL_BASE, nr) +#define AMDKFD_IOR(nr, type) _IOR(AMDKFD_IOCTL_BASE, nr, type) +#define AMDKFD_IOW(nr, type) _IOW(AMDKFD_IOCTL_BASE, nr, type) +#define AMDKFD_IOWR(nr, type) _IOWR(AMDKFD_IOCTL_BASE, nr, type) -#define KFD_IOC_GET_VERSION \ - _IOR(KFD_IOC_MAGIC, 1, struct kfd_ioctl_get_version_args) +#define AMDKFD_IOC_GET_VERSION \ + AMDKFD_IOR(0x01, struct kfd_ioctl_get_version_args) -#define KFD_IOC_CREATE_QUEUE \ - _IOWR(KFD_IOC_MAGIC, 2, struct kfd_ioctl_create_queue_args) +#define AMDKFD_IOC_CREATE_QUEUE \ + AMDKFD_IOWR(0x02, struct kfd_ioctl_create_queue_args) -#define KFD_IOC_DESTROY_QUEUE \ - _IOWR(KFD_IOC_MAGIC, 3, struct kfd_ioctl_destroy_queue_args) +#define AMDKFD_IOC_DESTROY_QUEUE \ + AMDKFD_IOWR(0x03, struct kfd_ioctl_destroy_queue_args) -#define KFD_IOC_SET_MEMORY_POLICY \ - _IOW(KFD_IOC_MAGIC, 4, struct kfd_ioctl_set_memory_policy_args) +#define AMDKFD_IOC_SET_MEMORY_POLICY \ + AMDKFD_IOW(0x04, struct kfd_ioctl_set_memory_policy_args) -#define KFD_IOC_GET_CLOCK_COUNTERS \ - _IOWR(KFD_IOC_MAGIC, 5, struct kfd_ioctl_get_clock_counters_args) +#define AMDKFD_IOC_GET_CLOCK_COUNTERS \ + AMDKFD_IOWR(0x05, struct kfd_ioctl_get_clock_counters_args) -#define KFD_IOC_GET_PROCESS_APERTURES \ - _IOR(KFD_IOC_MAGIC, 6, struct kfd_ioctl_get_process_apertures_args) +#define AMDKFD_IOC_GET_PROCESS_APERTURES \ + AMDKFD_IOR(0x06, struct kfd_ioctl_get_process_apertures_args) -#define KFD_IOC_UPDATE_QUEUE \ - _IOW(KFD_IOC_MAGIC, 7, struct kfd_ioctl_update_queue_args) +#define AMDKFD_IOC_UPDATE_QUEUE \ + AMDKFD_IOW(0x07, struct kfd_ioctl_update_queue_args) + +#define AMDKFD_COMMAND_START 0x01 +#define AMDKFD_COMMAND_END 0x08 #endif diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h index 3a6dcaa..f714e86 100644 --- a/include/uapi/linux/openvswitch.h +++ b/include/uapi/linux/openvswitch.h @@ -174,6 +174,10 @@ enum ovs_packet_attr { OVS_PACKET_ATTR_USERDATA, /* OVS_ACTION_ATTR_USERSPACE arg. */ OVS_PACKET_ATTR_EGRESS_TUN_KEY, /* Nested OVS_TUNNEL_KEY_ATTR_* attributes. */ + OVS_PACKET_ATTR_UNUSED1, + OVS_PACKET_ATTR_UNUSED2, + OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe, + error logging should be suppressed. */ __OVS_PACKET_ATTR_MAX }; diff --git a/include/uapi/linux/uinput.h b/include/uapi/linux/uinput.h index baeab83..013c9d8 100644 --- a/include/uapi/linux/uinput.h +++ b/include/uapi/linux/uinput.h @@ -82,7 +82,7 @@ struct uinput_ff_erase { * The complete sysfs path is then /sys/devices/virtual/input/--NAME-- * Usually, it is in the form "inputN" */ -#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 300, len) +#define UI_GET_SYSNAME(len) _IOC(_IOC_READ, UINPUT_IOCTL_BASE, 44, len) /** * UI_GET_VERSION - Return version of uinput protocol @@ -91,7 +91,7 @@ struct uinput_ff_erase { * the integer pointed to by the ioctl argument. The protocol version * is hard-coded in the kernel and is independent of the uinput device. */ -#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 301, unsigned int) +#define UI_GET_VERSION _IOR(UINPUT_IOCTL_BASE, 45, unsigned int) /* * To write a force-feedback-capable driver, the upload_effect diff --git a/include/xen/interface/nmi.h b/include/xen/interface/nmi.h new file mode 100644 index 0000000..b47d9d0 --- /dev/null +++ b/include/xen/interface/nmi.h @@ -0,0 +1,51 @@ +/****************************************************************************** + * nmi.h + * + * NMI callback registration and reason codes. + * + * Copyright (c) 2005, Keir Fraser <keir@xensource.com> + */ + +#ifndef __XEN_PUBLIC_NMI_H__ +#define __XEN_PUBLIC_NMI_H__ + +#include <xen/interface/xen.h> + +/* + * NMI reason codes: + * Currently these are x86-specific, stored in arch_shared_info.nmi_reason. + */ + /* I/O-check error reported via ISA port 0x61, bit 6. */ +#define _XEN_NMIREASON_io_error 0 +#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error) + /* PCI SERR reported via ISA port 0x61, bit 7. */ +#define _XEN_NMIREASON_pci_serr 1 +#define XEN_NMIREASON_pci_serr (1UL << _XEN_NMIREASON_pci_serr) + /* Unknown hardware-generated NMI. */ +#define _XEN_NMIREASON_unknown 2 +#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown) + +/* + * long nmi_op(unsigned int cmd, void *arg) + * NB. All ops return zero on success, else a negative error code. + */ + +/* + * Register NMI callback for this (calling) VCPU. Currently this only makes + * sense for domain 0, vcpu 0. All other callers will be returned EINVAL. + * arg == pointer to xennmi_callback structure. + */ +#define XENNMI_register_callback 0 +struct xennmi_callback { + unsigned long handler_address; + unsigned long pad; +}; +DEFINE_GUEST_HANDLE_STRUCT(xennmi_callback); + +/* + * Deregister NMI callback for this (calling) VCPU. + * arg == NULL. + */ +#define XENNMI_unregister_callback 1 + +#endif /* __XEN_PUBLIC_NMI_H__ */ diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c index 1adf62b..07ce18c 100644 --- a/kernel/debug/debug_core.c +++ b/kernel/debug/debug_core.c @@ -27,6 +27,9 @@ * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ + +#define pr_fmt(fmt) "KGDB: " fmt + #include <linux/pid_namespace.h> #include <linux/clocksource.h> #include <linux/serial_core.h> @@ -196,8 +199,8 @@ int __weak kgdb_validate_break_address(unsigned long addr) return err; err = kgdb_arch_remove_breakpoint(&tmp); if (err) - printk(KERN_ERR "KGDB: Critical breakpoint error, kernel " - "memory destroyed at: %lx", addr); + pr_err("Critical breakpoint error, kernel memory destroyed at: %lx\n", + addr); return err; } @@ -256,8 +259,8 @@ int dbg_activate_sw_breakpoints(void) error = kgdb_arch_set_breakpoint(&kgdb_break[i]); if (error) { ret = error; - printk(KERN_INFO "KGDB: BP install failed: %lx", - kgdb_break[i].bpt_addr); + pr_info("BP install failed: %lx\n", + kgdb_break[i].bpt_addr); continue; } @@ -319,8 +322,8 @@ int dbg_deactivate_sw_breakpoints(void) continue; error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); if (error) { - printk(KERN_INFO "KGDB: BP remove failed: %lx\n", - kgdb_break[i].bpt_addr); + pr_info("BP remove failed: %lx\n", + kgdb_break[i].bpt_addr); ret = error; } @@ -367,7 +370,7 @@ int dbg_remove_all_break(void) goto setundefined; error = kgdb_arch_remove_breakpoint(&kgdb_break[i]); if (error) - printk(KERN_ERR "KGDB: breakpoint remove failed: %lx\n", + pr_err("breakpoint remove failed: %lx\n", kgdb_break[i].bpt_addr); setundefined: kgdb_break[i].state = BP_UNDEFINED; @@ -400,9 +403,9 @@ static int kgdb_io_ready(int print_wait) if (print_wait) { #ifdef CONFIG_KGDB_KDB if (!dbg_kdb_mode) - printk(KERN_CRIT "KGDB: waiting... or $3#33 for KDB\n"); + pr_crit("waiting... or $3#33 for KDB\n"); #else - printk(KERN_CRIT "KGDB: Waiting for remote debugger\n"); + pr_crit("Waiting for remote debugger\n"); #endif } return 1; @@ -430,8 +433,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) exception_level = 0; kgdb_skipexception(ks->ex_vector, ks->linux_regs); dbg_activate_sw_breakpoints(); - printk(KERN_CRIT "KGDB: re-enter error: breakpoint removed %lx\n", - addr); + pr_crit("re-enter error: breakpoint removed %lx\n", addr); WARN_ON_ONCE(1); return 1; @@ -444,7 +446,7 @@ static int kgdb_reenter_check(struct kgdb_state *ks) panic("Recursive entry to debugger"); } - printk(KERN_CRIT "KGDB: re-enter exception: ALL breakpoints killed\n"); + pr_crit("re-enter exception: ALL breakpoints killed\n"); #ifdef CONFIG_KGDB_KDB /* Allow kdb to debug itself one level */ return 0; @@ -471,6 +473,7 @@ static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs, int cpu; int trace_on = 0; int online_cpus = num_online_cpus(); + u64 time_left; kgdb_info[ks->cpu].enter_kgdb++; kgdb_info[ks->cpu].exception_state |= exception_state; @@ -595,9 +598,13 @@ return_normal: /* * Wait for the other CPUs to be notified and be waiting for us: */ - while (kgdb_do_roundup && (atomic_read(&masters_in_kgdb) + - atomic_read(&slaves_in_kgdb)) != online_cpus) + time_left = loops_per_jiffy * HZ; + while (kgdb_do_roundup && --time_left && + (atomic_read(&masters_in_kgdb) + atomic_read(&slaves_in_kgdb)) != + online_cpus) cpu_relax(); + if (!time_left) + pr_crit("KGDB: Timed out waiting for secondary CPUs.\n"); /* * At this point the primary processor is completely @@ -795,15 +802,15 @@ static struct console kgdbcons = { static void sysrq_handle_dbg(int key) { if (!dbg_io_ops) { - printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); + pr_crit("ERROR: No KGDB I/O module available\n"); return; } if (!kgdb_connected) { #ifdef CONFIG_KGDB_KDB if (!dbg_kdb_mode) - printk(KERN_CRIT "KGDB or $3#33 for KDB\n"); + pr_crit("KGDB or $3#33 for KDB\n"); #else - printk(KERN_CRIT "Entering KGDB\n"); + pr_crit("Entering KGDB\n"); #endif } @@ -945,7 +952,7 @@ static void kgdb_initial_breakpoint(void) { kgdb_break_asap = 0; - printk(KERN_CRIT "kgdb: Waiting for connection from remote gdb...\n"); + pr_crit("Waiting for connection from remote gdb...\n"); kgdb_breakpoint(); } @@ -964,8 +971,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) if (dbg_io_ops) { spin_unlock(&kgdb_registration_lock); - printk(KERN_ERR "kgdb: Another I/O driver is already " - "registered with KGDB.\n"); + pr_err("Another I/O driver is already registered with KGDB\n"); return -EBUSY; } @@ -981,8 +987,7 @@ int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) spin_unlock(&kgdb_registration_lock); - printk(KERN_INFO "kgdb: Registered I/O driver %s.\n", - new_dbg_io_ops->name); + pr_info("Registered I/O driver %s\n", new_dbg_io_ops->name); /* Arm KGDB now. */ kgdb_register_callbacks(); @@ -1017,8 +1022,7 @@ void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) spin_unlock(&kgdb_registration_lock); - printk(KERN_INFO - "kgdb: Unregistered I/O driver %s, debugger disabled.\n", + pr_info("Unregistered I/O driver %s, debugger disabled\n", old_dbg_io_ops->name); } EXPORT_SYMBOL_GPL(kgdb_unregister_io_module); diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c index b20d544..e1dbf4a 100644 --- a/kernel/debug/kdb/kdb_bp.c +++ b/kernel/debug/kdb/kdb_bp.c @@ -531,22 +531,29 @@ void __init kdb_initbptab(void) for (i = 0, bp = kdb_breakpoints; i < KDB_MAXBPT; i++, bp++) bp->bp_free = 1; - kdb_register_repeat("bp", kdb_bp, "[<vaddr>]", - "Set/Display breakpoints", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("bl", kdb_bp, "[<vaddr>]", - "Display breakpoints", 0, KDB_REPEAT_NO_ARGS); + kdb_register_flags("bp", kdb_bp, "[<vaddr>]", + "Set/Display breakpoints", 0, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); + kdb_register_flags("bl", kdb_bp, "[<vaddr>]", + "Display breakpoints", 0, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); if (arch_kgdb_ops.flags & KGDB_HW_BREAKPOINT) - kdb_register_repeat("bph", kdb_bp, "[<vaddr>]", - "[datar [length]|dataw [length]] Set hw brk", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("bc", kdb_bc, "<bpnum>", - "Clear Breakpoint", 0, KDB_REPEAT_NONE); - kdb_register_repeat("be", kdb_bc, "<bpnum>", - "Enable Breakpoint", 0, KDB_REPEAT_NONE); - kdb_register_repeat("bd", kdb_bc, "<bpnum>", - "Disable Breakpoint", 0, KDB_REPEAT_NONE); - - kdb_register_repeat("ss", kdb_ss, "", - "Single Step", 1, KDB_REPEAT_NO_ARGS); + kdb_register_flags("bph", kdb_bp, "[<vaddr>]", + "[datar [length]|dataw [length]] Set hw brk", 0, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); + kdb_register_flags("bc", kdb_bc, "<bpnum>", + "Clear Breakpoint", 0, + KDB_ENABLE_FLOW_CTRL); + kdb_register_flags("be", kdb_bc, "<bpnum>", + "Enable Breakpoint", 0, + KDB_ENABLE_FLOW_CTRL); + kdb_register_flags("bd", kdb_bc, "<bpnum>", + "Disable Breakpoint", 0, + KDB_ENABLE_FLOW_CTRL); + + kdb_register_flags("ss", kdb_ss, "", + "Single Step", 1, + KDB_ENABLE_FLOW_CTRL | KDB_REPEAT_NO_ARGS); /* * Architecture dependent initialization. */ diff --git a/kernel/debug/kdb/kdb_debugger.c b/kernel/debug/kdb/kdb_debugger.c index 8859ca3..15e1a7a 100644 --- a/kernel/debug/kdb/kdb_debugger.c +++ b/kernel/debug/kdb/kdb_debugger.c @@ -129,6 +129,10 @@ int kdb_stub(struct kgdb_state *ks) ks->pass_exception = 1; KDB_FLAG_SET(CATASTROPHIC); } + /* set CATASTROPHIC if the system contains unresponsive processors */ + for_each_online_cpu(i) + if (!kgdb_info[i].enter_kgdb) + KDB_FLAG_SET(CATASTROPHIC); if (KDB_STATE(SSBPT) && reason == KDB_REASON_SSTEP) { KDB_STATE_CLEAR(SSBPT); KDB_STATE_CLEAR(DOING_SS); diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c index 379650b..f191bdd 100644 --- a/kernel/debug/kdb/kdb_main.c +++ b/kernel/debug/kdb/kdb_main.c @@ -12,6 +12,7 @@ */ #include <linux/ctype.h> +#include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/kmsg_dump.h> @@ -23,6 +24,7 @@ #include <linux/vmalloc.h> #include <linux/atomic.h> #include <linux/module.h> +#include <linux/moduleparam.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/kallsyms.h> @@ -42,6 +44,12 @@ #include <linux/slab.h> #include "kdb_private.h" +#undef MODULE_PARAM_PREFIX +#define MODULE_PARAM_PREFIX "kdb." + +static int kdb_cmd_enabled = CONFIG_KDB_DEFAULT_ENABLE; +module_param_named(cmd_enable, kdb_cmd_enabled, int, 0600); + #define GREP_LEN 256 char kdb_grep_string[GREP_LEN]; int kdb_grepping_flag; @@ -121,6 +129,7 @@ static kdbmsg_t kdbmsgs[] = { KDBMSG(BADLENGTH, "Invalid length field"), KDBMSG(NOBP, "No Breakpoint exists"), KDBMSG(BADADDR, "Invalid address"), + KDBMSG(NOPERM, "Permission denied"), }; #undef KDBMSG @@ -188,6 +197,26 @@ struct task_struct *kdb_curr_task(int cpu) } /* + * Check whether the flags of the current command and the permissions + * of the kdb console has allow a command to be run. + */ +static inline bool kdb_check_flags(kdb_cmdflags_t flags, int permissions, + bool no_args) +{ + /* permissions comes from userspace so needs massaging slightly */ + permissions &= KDB_ENABLE_MASK; + permissions |= KDB_ENABLE_ALWAYS_SAFE; + + /* some commands change group when launched with no arguments */ + if (no_args) + permissions |= permissions << KDB_ENABLE_NO_ARGS_SHIFT; + + flags |= KDB_ENABLE_ALL; + + return permissions & flags; +} + +/* * kdbgetenv - This function will return the character string value of * an environment variable. * Parameters: @@ -476,6 +505,15 @@ int kdbgetaddrarg(int argc, const char **argv, int *nextarg, kdb_symtab_t symtab; /* + * If the enable flags prohibit both arbitrary memory access + * and flow control then there are no reasonable grounds to + * provide symbol lookup. + */ + if (!kdb_check_flags(KDB_ENABLE_MEM_READ | KDB_ENABLE_FLOW_CTRL, + kdb_cmd_enabled, false)) + return KDB_NOPERM; + + /* * Process arguments which follow the following syntax: * * symbol | numeric-address [+/- numeric-offset] @@ -641,8 +679,13 @@ static int kdb_defcmd2(const char *cmdstr, const char *argv0) if (!s->count) s->usable = 0; if (s->usable) - kdb_register(s->name, kdb_exec_defcmd, - s->usage, s->help, 0); + /* macros are always safe because when executed each + * internal command re-enters kdb_parse() and is + * safety checked individually. + */ + kdb_register_flags(s->name, kdb_exec_defcmd, s->usage, + s->help, 0, + KDB_ENABLE_ALWAYS_SAFE); return 0; } if (!s->usable) @@ -1003,25 +1046,22 @@ int kdb_parse(const char *cmdstr) if (i < kdb_max_commands) { int result; + + if (!kdb_check_flags(tp->cmd_flags, kdb_cmd_enabled, argc <= 1)) + return KDB_NOPERM; + KDB_STATE_SET(CMD); result = (*tp->cmd_func)(argc-1, (const char **)argv); if (result && ignore_errors && result > KDB_CMD_GO) result = 0; KDB_STATE_CLEAR(CMD); - switch (tp->cmd_repeat) { - case KDB_REPEAT_NONE: - argc = 0; - if (argv[0]) - *(argv[0]) = '\0'; - break; - case KDB_REPEAT_NO_ARGS: - argc = 1; - if (argv[1]) - *(argv[1]) = '\0'; - break; - case KDB_REPEAT_WITH_ARGS: - break; - } + + if (tp->cmd_flags & KDB_REPEAT_WITH_ARGS) + return result; + + argc = tp->cmd_flags & KDB_REPEAT_NO_ARGS ? 1 : 0; + if (argv[argc]) + *(argv[argc]) = '\0'; return result; } @@ -1921,10 +1961,14 @@ static int kdb_rm(int argc, const char **argv) */ static int kdb_sr(int argc, const char **argv) { + bool check_mask = + !kdb_check_flags(KDB_ENABLE_ALL, kdb_cmd_enabled, false); + if (argc != 1) return KDB_ARGCOUNT; + kdb_trap_printk++; - __handle_sysrq(*argv[1], false); + __handle_sysrq(*argv[1], check_mask); kdb_trap_printk--; return 0; @@ -2157,6 +2201,8 @@ static void kdb_cpu_status(void) for (start_cpu = -1, i = 0; i < NR_CPUS; i++) { if (!cpu_online(i)) { state = 'F'; /* cpu is offline */ + } else if (!kgdb_info[i].enter_kgdb) { + state = 'D'; /* cpu is online but unresponsive */ } else { state = ' '; /* cpu is responding to kdb */ if (kdb_task_state_char(KDB_TSK(i)) == 'I') @@ -2210,7 +2256,7 @@ static int kdb_cpu(int argc, const char **argv) /* * Validate cpunum */ - if ((cpunum > NR_CPUS) || !cpu_online(cpunum)) + if ((cpunum > NR_CPUS) || !kgdb_info[cpunum].enter_kgdb) return KDB_BADCPUNUM; dbg_switch_cpu = cpunum; @@ -2375,6 +2421,8 @@ static int kdb_help(int argc, const char **argv) return 0; if (!kt->cmd_name) continue; + if (!kdb_check_flags(kt->cmd_flags, kdb_cmd_enabled, true)) + continue; if (strlen(kt->cmd_usage) > 20) space = "\n "; kdb_printf("%-15.15s %-20s%s%s\n", kt->cmd_name, @@ -2629,7 +2677,7 @@ static int kdb_grep_help(int argc, const char **argv) } /* - * kdb_register_repeat - This function is used to register a kernel + * kdb_register_flags - This function is used to register a kernel * debugger command. * Inputs: * cmd Command name @@ -2641,12 +2689,12 @@ static int kdb_grep_help(int argc, const char **argv) * zero for success, one if a duplicate command. */ #define kdb_command_extend 50 /* arbitrary */ -int kdb_register_repeat(char *cmd, - kdb_func_t func, - char *usage, - char *help, - short minlen, - kdb_repeat_t repeat) +int kdb_register_flags(char *cmd, + kdb_func_t func, + char *usage, + char *help, + short minlen, + kdb_cmdflags_t flags) { int i; kdbtab_t *kp; @@ -2694,19 +2742,18 @@ int kdb_register_repeat(char *cmd, kp->cmd_func = func; kp->cmd_usage = usage; kp->cmd_help = help; - kp->cmd_flags = 0; kp->cmd_minlen = minlen; - kp->cmd_repeat = repeat; + kp->cmd_flags = flags; return 0; } -EXPORT_SYMBOL_GPL(kdb_register_repeat); +EXPORT_SYMBOL_GPL(kdb_register_flags); /* * kdb_register - Compatibility register function for commands that do * not need to specify a repeat state. Equivalent to - * kdb_register_repeat with KDB_REPEAT_NONE. + * kdb_register_flags with flags set to 0. * Inputs: * cmd Command name * func Function to execute the command @@ -2721,8 +2768,7 @@ int kdb_register(char *cmd, char *help, short minlen) { - return kdb_register_repeat(cmd, func, usage, help, minlen, - KDB_REPEAT_NONE); + return kdb_register_flags(cmd, func, usage, help, minlen, 0); } EXPORT_SYMBOL_GPL(kdb_register); @@ -2764,80 +2810,109 @@ static void __init kdb_inittab(void) for_each_kdbcmd(kp, i) kp->cmd_name = NULL; - kdb_register_repeat("md", kdb_md, "<vaddr>", + kdb_register_flags("md", kdb_md, "<vaddr>", "Display Memory Contents, also mdWcN, e.g. md8c1", 1, - KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mdr", kdb_md, "<vaddr> <bytes>", - "Display Raw Memory", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mdp", kdb_md, "<paddr> <bytes>", - "Display Physical Memory", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mds", kdb_md, "<vaddr>", - "Display Memory Symbolically", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("mm", kdb_mm, "<vaddr> <contents>", - "Modify Memory Contents", 0, KDB_REPEAT_NO_ARGS); - kdb_register_repeat("go", kdb_go, "[<vaddr>]", - "Continue Execution", 1, KDB_REPEAT_NONE); - kdb_register_repeat("rd", kdb_rd, "", - "Display Registers", 0, KDB_REPEAT_NONE); - kdb_register_repeat("rm", kdb_rm, "<reg> <contents>", - "Modify Registers", 0, KDB_REPEAT_NONE); - kdb_register_repeat("ef", kdb_ef, "<vaddr>", - "Display exception frame", 0, KDB_REPEAT_NONE); - kdb_register_repeat("bt", kdb_bt, "[<vaddr>]", - "Stack traceback", 1, KDB_REPEAT_NONE); - kdb_register_repeat("btp", kdb_bt, "<pid>", - "Display stack for process <pid>", 0, KDB_REPEAT_NONE); - kdb_register_repeat("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", - "Backtrace all processes matching state flag", 0, KDB_REPEAT_NONE); - kdb_register_repeat("btc", kdb_bt, "", - "Backtrace current process on each cpu", 0, KDB_REPEAT_NONE); - kdb_register_repeat("btt", kdb_bt, "<vaddr>", + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mdr", kdb_md, "<vaddr> <bytes>", + "Display Raw Memory", 0, + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mdp", kdb_md, "<paddr> <bytes>", + "Display Physical Memory", 0, + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mds", kdb_md, "<vaddr>", + "Display Memory Symbolically", 0, + KDB_ENABLE_MEM_READ | KDB_REPEAT_NO_ARGS); + kdb_register_flags("mm", kdb_mm, "<vaddr> <contents>", + "Modify Memory Contents", 0, + KDB_ENABLE_MEM_WRITE | KDB_REPEAT_NO_ARGS); + kdb_register_flags("go", kdb_go, "[<vaddr>]", + "Continue Execution", 1, + KDB_ENABLE_REG_WRITE | KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); + kdb_register_flags("rd", kdb_rd, "", + "Display Registers", 0, + KDB_ENABLE_REG_READ); + kdb_register_flags("rm", kdb_rm, "<reg> <contents>", + "Modify Registers", 0, + KDB_ENABLE_REG_WRITE); + kdb_register_flags("ef", kdb_ef, "<vaddr>", + "Display exception frame", 0, + KDB_ENABLE_MEM_READ); + kdb_register_flags("bt", kdb_bt, "[<vaddr>]", + "Stack traceback", 1, + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); + kdb_register_flags("btp", kdb_bt, "<pid>", + "Display stack for process <pid>", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("bta", kdb_bt, "[D|R|S|T|C|Z|E|U|I|M|A]", + "Backtrace all processes matching state flag", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("btc", kdb_bt, "", + "Backtrace current process on each cpu", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("btt", kdb_bt, "<vaddr>", "Backtrace process given its struct task address", 0, - KDB_REPEAT_NONE); - kdb_register_repeat("env", kdb_env, "", - "Show environment variables", 0, KDB_REPEAT_NONE); - kdb_register_repeat("set", kdb_set, "", - "Set environment variables", 0, KDB_REPEAT_NONE); - kdb_register_repeat("help", kdb_help, "", - "Display Help Message", 1, KDB_REPEAT_NONE); - kdb_register_repeat("?", kdb_help, "", - "Display Help Message", 0, KDB_REPEAT_NONE); - kdb_register_repeat("cpu", kdb_cpu, "<cpunum>", - "Switch to new cpu", 0, KDB_REPEAT_NONE); - kdb_register_repeat("kgdb", kdb_kgdb, "", - "Enter kgdb mode", 0, KDB_REPEAT_NONE); - kdb_register_repeat("ps", kdb_ps, "[<flags>|A]", - "Display active task list", 0, KDB_REPEAT_NONE); - kdb_register_repeat("pid", kdb_pid, "<pidnum>", - "Switch to another task", 0, KDB_REPEAT_NONE); - kdb_register_repeat("reboot", kdb_reboot, "", - "Reboot the machine immediately", 0, KDB_REPEAT_NONE); + KDB_ENABLE_MEM_READ | KDB_ENABLE_INSPECT_NO_ARGS); + kdb_register_flags("env", kdb_env, "", + "Show environment variables", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("set", kdb_set, "", + "Set environment variables", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("help", kdb_help, "", + "Display Help Message", 1, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("?", kdb_help, "", + "Display Help Message", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("cpu", kdb_cpu, "<cpunum>", + "Switch to new cpu", 0, + KDB_ENABLE_ALWAYS_SAFE_NO_ARGS); + kdb_register_flags("kgdb", kdb_kgdb, "", + "Enter kgdb mode", 0, 0); + kdb_register_flags("ps", kdb_ps, "[<flags>|A]", + "Display active task list", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("pid", kdb_pid, "<pidnum>", + "Switch to another task", 0, + KDB_ENABLE_INSPECT); + kdb_register_flags("reboot", kdb_reboot, "", + "Reboot the machine immediately", 0, + KDB_ENABLE_REBOOT); #if defined(CONFIG_MODULES) - kdb_register_repeat("lsmod", kdb_lsmod, "", - "List loaded kernel modules", 0, KDB_REPEAT_NONE); + kdb_register_flags("lsmod", kdb_lsmod, "", + "List loaded kernel modules", 0, + KDB_ENABLE_INSPECT); #endif #if defined(CONFIG_MAGIC_SYSRQ) - kdb_register_repeat("sr", kdb_sr, "<key>", - "Magic SysRq key", 0, KDB_REPEAT_NONE); + kdb_register_flags("sr", kdb_sr, "<key>", + "Magic SysRq key", 0, + KDB_ENABLE_ALWAYS_SAFE); #endif #if defined(CONFIG_PRINTK) - kdb_register_repeat("dmesg", kdb_dmesg, "[lines]", - "Display syslog buffer", 0, KDB_REPEAT_NONE); + kdb_register_flags("dmesg", kdb_dmesg, "[lines]", + "Display syslog buffer", 0, + KDB_ENABLE_ALWAYS_SAFE); #endif if (arch_kgdb_ops.enable_nmi) { - kdb_register_repeat("disable_nmi", kdb_disable_nmi, "", - "Disable NMI entry to KDB", 0, KDB_REPEAT_NONE); - } - kdb_register_repeat("defcmd", kdb_defcmd, "name \"usage\" \"help\"", - "Define a set of commands, down to endefcmd", 0, KDB_REPEAT_NONE); - kdb_register_repeat("kill", kdb_kill, "<-signal> <pid>", - "Send a signal to a process", 0, KDB_REPEAT_NONE); - kdb_register_repeat("summary", kdb_summary, "", - "Summarize the system", 4, KDB_REPEAT_NONE); - kdb_register_repeat("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", - "Display per_cpu variables", 3, KDB_REPEAT_NONE); - kdb_register_repeat("grephelp", kdb_grep_help, "", - "Display help on | grep", 0, KDB_REPEAT_NONE); + kdb_register_flags("disable_nmi", kdb_disable_nmi, "", + "Disable NMI entry to KDB", 0, + KDB_ENABLE_ALWAYS_SAFE); + } + kdb_register_flags("defcmd", kdb_defcmd, "name \"usage\" \"help\"", + "Define a set of commands, down to endefcmd", 0, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("kill", kdb_kill, "<-signal> <pid>", + "Send a signal to a process", 0, + KDB_ENABLE_SIGNAL); + kdb_register_flags("summary", kdb_summary, "", + "Summarize the system", 4, + KDB_ENABLE_ALWAYS_SAFE); + kdb_register_flags("per_cpu", kdb_per_cpu, "<sym> [<bytes>] [<cpu>]", + "Display per_cpu variables", 3, + KDB_ENABLE_MEM_READ); + kdb_register_flags("grephelp", kdb_grep_help, "", + "Display help on | grep", 0, + KDB_ENABLE_ALWAYS_SAFE); } /* Execute any commands defined in kdb_cmds. */ diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h index 7afd3c8..eaacd16 100644 --- a/kernel/debug/kdb/kdb_private.h +++ b/kernel/debug/kdb/kdb_private.h @@ -172,10 +172,9 @@ typedef struct _kdbtab { kdb_func_t cmd_func; /* Function to execute command */ char *cmd_usage; /* Usage String for this command */ char *cmd_help; /* Help message for this command */ - short cmd_flags; /* Parsing flags */ short cmd_minlen; /* Minimum legal # command * chars required */ - kdb_repeat_t cmd_repeat; /* Does command auto repeat on enter? */ + kdb_cmdflags_t cmd_flags; /* Command behaviour flags */ } kdbtab_t; extern int kdb_bt(int, const char **); /* KDB display back trace */ diff --git a/kernel/events/core.c b/kernel/events/core.c index 4c1ee7f..882f835 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -4461,18 +4461,14 @@ perf_output_sample_regs(struct perf_output_handle *handle, } static void perf_sample_regs_user(struct perf_regs *regs_user, - struct pt_regs *regs) + struct pt_regs *regs, + struct pt_regs *regs_user_copy) { - if (!user_mode(regs)) { - if (current->mm) - regs = task_pt_regs(current); - else - regs = NULL; - } - - if (regs) { - regs_user->abi = perf_reg_abi(current); + if (user_mode(regs)) { + regs_user->abi = perf_reg_abi(current); regs_user->regs = regs; + } else if (current->mm) { + perf_get_regs_user(regs_user, regs, regs_user_copy); } else { regs_user->abi = PERF_SAMPLE_REGS_ABI_NONE; regs_user->regs = NULL; @@ -4951,7 +4947,8 @@ void perf_prepare_sample(struct perf_event_header *header, } if (sample_type & (PERF_SAMPLE_REGS_USER | PERF_SAMPLE_STACK_USER)) - perf_sample_regs_user(&data->regs_user, regs); + perf_sample_regs_user(&data->regs_user, regs, + &data->regs_user_copy); if (sample_type & PERF_SAMPLE_REGS_USER) { /* regs dump ABI info */ diff --git a/kernel/exit.c b/kernel/exit.c index 1ea4369..6806c55 100644 --- a/kernel/exit.c +++ b/kernel/exit.c @@ -1287,9 +1287,15 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) static int wait_consider_task(struct wait_opts *wo, int ptrace, struct task_struct *p) { + /* + * We can race with wait_task_zombie() from another thread. + * Ensure that EXIT_ZOMBIE -> EXIT_DEAD/EXIT_TRACE transition + * can't confuse the checks below. + */ + int exit_state = ACCESS_ONCE(p->exit_state); int ret; - if (unlikely(p->exit_state == EXIT_DEAD)) + if (unlikely(exit_state == EXIT_DEAD)) return 0; ret = eligible_child(wo, p); @@ -1310,7 +1316,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, return 0; } - if (unlikely(p->exit_state == EXIT_TRACE)) { + if (unlikely(exit_state == EXIT_TRACE)) { /* * ptrace == 0 means we are the natural parent. In this case * we should clear notask_error, debugger will notify us. @@ -1337,7 +1343,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, } /* slay zombie? */ - if (p->exit_state == EXIT_ZOMBIE) { + if (exit_state == EXIT_ZOMBIE) { /* we don't reap group leaders with subthreads */ if (!delay_group_leader(p)) { /* diff --git a/kernel/locking/mutex-debug.c b/kernel/locking/mutex-debug.c index 5cf6731..3ef3736 100644 --- a/kernel/locking/mutex-debug.c +++ b/kernel/locking/mutex-debug.c @@ -80,13 +80,13 @@ void debug_mutex_unlock(struct mutex *lock) DEBUG_LOCKS_WARN_ON(lock->owner != current); DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); - mutex_clear_owner(lock); } /* * __mutex_slowpath_needs_to_unlock() is explicitly 0 for debug * mutexes so that we can do it here after we've verified state. */ + mutex_clear_owner(lock); atomic_set(&lock->count, 1); } diff --git a/kernel/range.c b/kernel/range.c index 322ea8e..82cfc28 100644 --- a/kernel/range.c +++ b/kernel/range.c @@ -113,12 +113,12 @@ static int cmp_range(const void *x1, const void *x2) { const struct range *r1 = x1; const struct range *r2 = x2; - s64 start1, start2; - start1 = r1->start; - start2 = r2->start; - - return start1 - start2; + if (r1->start < r2->start) + return -1; + if (r1->start > r2->start) + return 1; + return 0; } int clean_sort_range(struct range *range, int az) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index b5797b7..c0accc0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -7113,9 +7113,6 @@ void __init sched_init(void) #ifdef CONFIG_RT_GROUP_SCHED alloc_size += 2 * nr_cpu_ids * sizeof(void **); #endif -#ifdef CONFIG_CPUMASK_OFFSTACK - alloc_size += num_possible_cpus() * cpumask_size(); -#endif if (alloc_size) { ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT); @@ -7135,13 +7132,13 @@ void __init sched_init(void) ptr += nr_cpu_ids * sizeof(void **); #endif /* CONFIG_RT_GROUP_SCHED */ + } #ifdef CONFIG_CPUMASK_OFFSTACK - for_each_possible_cpu(i) { - per_cpu(load_balance_mask, i) = (void *)ptr; - ptr += cpumask_size(); - } -#endif /* CONFIG_CPUMASK_OFFSTACK */ + for_each_possible_cpu(i) { + per_cpu(load_balance_mask, i) = (cpumask_var_t)kzalloc_node( + cpumask_size(), GFP_KERNEL, cpu_to_node(i)); } +#endif /* CONFIG_CPUMASK_OFFSTACK */ init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime()); diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index e5db8c6..b52092f 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -570,24 +570,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se) static int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se) { - int dmiss = dl_time_before(dl_se->deadline, rq_clock(rq)); - int rorun = dl_se->runtime <= 0; - - if (!rorun && !dmiss) - return 0; - - /* - * If we are beyond our current deadline and we are still - * executing, then we have already used some of the runtime of - * the next instance. Thus, if we do not account that, we are - * stealing bandwidth from the system at each deadline miss! - */ - if (dmiss) { - dl_se->runtime = rorun ? dl_se->runtime : 0; - dl_se->runtime -= rq_clock(rq) - dl_se->deadline; - } - - return 1; + return (dl_se->runtime <= 0); } extern bool sched_rt_bandwidth_account(struct rt_rq *rt_rq); @@ -826,10 +809,10 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, * parameters of the task might need updating. Otherwise, * we want a replenishment of its runtime. */ - if (!dl_se->dl_new && flags & ENQUEUE_REPLENISH) - replenish_dl_entity(dl_se, pi_se); - else + if (dl_se->dl_new || flags & ENQUEUE_WAKEUP) update_dl_entity(dl_se, pi_se); + else if (flags & ENQUEUE_REPLENISH) + replenish_dl_entity(dl_se, pi_se); __enqueue_dl_entity(dl_se); } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index df2cdf7..40667cb 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4005,6 +4005,10 @@ void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force) static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { + /* init_cfs_bandwidth() was not called */ + if (!cfs_b->throttled_cfs_rq.next) + return; + hrtimer_cancel(&cfs_b->period_timer); hrtimer_cancel(&cfs_b->slack_timer); } @@ -4424,7 +4428,7 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) * wl = S * s'_i; see (2) */ if (W > 0 && w < W) - wl = (w * tg->shares) / W; + wl = (w * (long)tg->shares) / W; else wl = tg->shares; diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 929a733d..224e768 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2497,12 +2497,14 @@ static void ftrace_run_update_code(int command) } static void ftrace_run_modify_code(struct ftrace_ops *ops, int command, - struct ftrace_hash *old_hash) + struct ftrace_ops_hash *old_hash) { ops->flags |= FTRACE_OPS_FL_MODIFYING; - ops->old_hash.filter_hash = old_hash; + ops->old_hash.filter_hash = old_hash->filter_hash; + ops->old_hash.notrace_hash = old_hash->notrace_hash; ftrace_run_update_code(command); ops->old_hash.filter_hash = NULL; + ops->old_hash.notrace_hash = NULL; ops->flags &= ~FTRACE_OPS_FL_MODIFYING; } @@ -3579,7 +3581,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly = static int ftrace_probe_registered; -static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash) +static void __enable_ftrace_function_probe(struct ftrace_ops_hash *old_hash) { int ret; int i; @@ -3637,6 +3639,7 @@ int register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, void *data) { + struct ftrace_ops_hash old_hash_ops; struct ftrace_func_probe *entry; struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; struct ftrace_hash *old_hash = *orig_hash; @@ -3658,6 +3661,10 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, mutex_lock(&trace_probe_ops.func_hash->regex_lock); + old_hash_ops.filter_hash = old_hash; + /* Probes only have filters */ + old_hash_ops.notrace_hash = NULL; + hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash); if (!hash) { count = -ENOMEM; @@ -3718,7 +3725,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); - __enable_ftrace_function_probe(old_hash); + __enable_ftrace_function_probe(&old_hash_ops); if (!ret) free_ftrace_hash_rcu(old_hash); @@ -4006,10 +4013,34 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) } static void ftrace_ops_update_code(struct ftrace_ops *ops, - struct ftrace_hash *old_hash) + struct ftrace_ops_hash *old_hash) { - if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) + struct ftrace_ops *op; + + if (!ftrace_enabled) + return; + + if (ops->flags & FTRACE_OPS_FL_ENABLED) { ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash); + return; + } + + /* + * If this is the shared global_ops filter, then we need to + * check if there is another ops that shares it, is enabled. + * If so, we still need to run the modify code. + */ + if (ops->func_hash != &global_ops.local_hash) + return; + + do_for_each_ftrace_op(op, ftrace_ops_list) { + if (op->func_hash == &global_ops.local_hash && + op->flags & FTRACE_OPS_FL_ENABLED) { + ftrace_run_modify_code(op, FTRACE_UPDATE_CALLS, old_hash); + /* Only need to do this once */ + return; + } + } while_for_each_ftrace_op(op); } static int @@ -4017,6 +4048,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, unsigned long ip, int remove, int reset, int enable) { struct ftrace_hash **orig_hash; + struct ftrace_ops_hash old_hash_ops; struct ftrace_hash *old_hash; struct ftrace_hash *hash; int ret; @@ -4053,9 +4085,11 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, mutex_lock(&ftrace_lock); old_hash = *orig_hash; + old_hash_ops.filter_hash = ops->func_hash->filter_hash; + old_hash_ops.notrace_hash = ops->func_hash->notrace_hash; ret = ftrace_hash_move(ops, enable, orig_hash, hash); if (!ret) { - ftrace_ops_update_code(ops, old_hash); + ftrace_ops_update_code(ops, &old_hash_ops); free_ftrace_hash_rcu(old_hash); } mutex_unlock(&ftrace_lock); @@ -4267,6 +4301,7 @@ static void __init set_ftrace_early_filters(void) int ftrace_regex_release(struct inode *inode, struct file *file) { struct seq_file *m = (struct seq_file *)file->private_data; + struct ftrace_ops_hash old_hash_ops; struct ftrace_iterator *iter; struct ftrace_hash **orig_hash; struct ftrace_hash *old_hash; @@ -4300,10 +4335,12 @@ int ftrace_regex_release(struct inode *inode, struct file *file) mutex_lock(&ftrace_lock); old_hash = *orig_hash; + old_hash_ops.filter_hash = iter->ops->func_hash->filter_hash; + old_hash_ops.notrace_hash = iter->ops->func_hash->notrace_hash; ret = ftrace_hash_move(iter->ops, filter_hash, orig_hash, iter->hash); if (!ret) { - ftrace_ops_update_code(iter->ops, old_hash); + ftrace_ops_update_code(iter->ops, &old_hash_ops); free_ftrace_hash_rcu(old_hash); } mutex_unlock(&ftrace_lock); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 2e76797..4a9079b 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -6918,7 +6918,6 @@ void __init trace_init(void) tracepoint_printk = 0; } tracer_alloc_buffers(); - init_ftrace_syscalls(); trace_event_init(); } diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 366a78a..b03a0ea 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -2429,12 +2429,39 @@ static __init int event_trace_memsetup(void) return 0; } +static __init void +early_enable_events(struct trace_array *tr, bool disable_first) +{ + char *buf = bootup_event_buf; + char *token; + int ret; + + while (true) { + token = strsep(&buf, ","); + + if (!token) + break; + if (!*token) + continue; + + /* Restarting syscalls requires that we stop them first */ + if (disable_first) + ftrace_set_clr_event(tr, token, 0); + + ret = ftrace_set_clr_event(tr, token, 1); + if (ret) + pr_warn("Failed to enable trace event: %s\n", token); + + /* Put back the comma to allow this to be called again */ + if (buf) + *(buf - 1) = ','; + } +} + static __init int event_trace_enable(void) { struct trace_array *tr = top_trace_array(); struct ftrace_event_call **iter, *call; - char *buf = bootup_event_buf; - char *token; int ret; if (!tr) @@ -2456,18 +2483,7 @@ static __init int event_trace_enable(void) */ __trace_early_add_events(tr); - while (true) { - token = strsep(&buf, ","); - - if (!token) - break; - if (!*token) - continue; - - ret = ftrace_set_clr_event(tr, token, 1); - if (ret) - pr_warn("Failed to enable trace event: %s\n", token); - } + early_enable_events(tr, false); trace_printk_start_comm(); @@ -2478,6 +2494,31 @@ static __init int event_trace_enable(void) return 0; } +/* + * event_trace_enable() is called from trace_event_init() first to + * initialize events and perhaps start any events that are on the + * command line. Unfortunately, there are some events that will not + * start this early, like the system call tracepoints that need + * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable() + * is called before pid 1 starts, and this flag is never set, making + * the syscall tracepoint never get reached, but the event is enabled + * regardless (and not doing anything). + */ +static __init int event_trace_enable_again(void) +{ + struct trace_array *tr; + + tr = top_trace_array(); + if (!tr) + return -ENODEV; + + early_enable_events(tr, true); + + return 0; +} + +early_initcall(event_trace_enable_again); + static __init int event_trace_init(void) { struct trace_array *tr; diff --git a/kernel/trace/trace_kdb.c b/kernel/trace/trace_kdb.c index b0b1c44..3ccf5c2 100644 --- a/kernel/trace/trace_kdb.c +++ b/kernel/trace/trace_kdb.c @@ -132,8 +132,8 @@ static int kdb_ftdump(int argc, const char **argv) static __init int kdb_ftrace_register(void) { - kdb_register_repeat("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", - "Dump ftrace log", 0, KDB_REPEAT_NONE); + kdb_register_flags("ftdump", kdb_ftdump, "[skip_#lines] [cpu]", + "Dump ftrace log", 0, KDB_ENABLE_ALWAYS_SAFE); return 0; } diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 6202b08..beeeac9 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -1841,17 +1841,11 @@ static void pool_mayday_timeout(unsigned long __pool) * spin_lock_irq(pool->lock) which may be released and regrabbed * multiple times. Does GFP_KERNEL allocations. Called only from * manager. - * - * Return: - * %false if no action was taken and pool->lock stayed locked, %true - * otherwise. */ -static bool maybe_create_worker(struct worker_pool *pool) +static void maybe_create_worker(struct worker_pool *pool) __releases(&pool->lock) __acquires(&pool->lock) { - if (!need_to_create_worker(pool)) - return false; restart: spin_unlock_irq(&pool->lock); @@ -1877,7 +1871,6 @@ restart: */ if (need_to_create_worker(pool)) goto restart; - return true; } /** @@ -1897,16 +1890,14 @@ restart: * multiple times. Does GFP_KERNEL allocations. * * Return: - * %false if the pool don't need management and the caller can safely start - * processing works, %true indicates that the function released pool->lock - * and reacquired it to perform some management function and that the - * conditions that the caller verified while holding the lock before - * calling the function might no longer be true. + * %false if the pool doesn't need management and the caller can safely + * start processing works, %true if management function was performed and + * the conditions that the caller verified before calling the function may + * no longer be true. */ static bool manage_workers(struct worker *worker) { struct worker_pool *pool = worker->pool; - bool ret = false; /* * Anyone who successfully grabs manager_arb wins the arbitration @@ -1919,12 +1910,12 @@ static bool manage_workers(struct worker *worker) * actual management, the pool may stall indefinitely. */ if (!mutex_trylock(&pool->manager_arb)) - return ret; + return false; - ret |= maybe_create_worker(pool); + maybe_create_worker(pool); mutex_unlock(&pool->manager_arb); - return ret; + return true; } /** diff --git a/lib/Kconfig.kgdb b/lib/Kconfig.kgdb index 358eb81..c635a10 100644 --- a/lib/Kconfig.kgdb +++ b/lib/Kconfig.kgdb @@ -73,6 +73,31 @@ config KGDB_KDB help KDB frontend for kernel +config KDB_DEFAULT_ENABLE + hex "KDB: Select kdb command functions to be enabled by default" + depends on KGDB_KDB + default 0x1 + help + Specifiers which kdb commands are enabled by default. This may + be set to 1 or 0 to enable all commands or disable almost all + commands. + + Alternatively the following bitmask applies: + + 0x0002 - allow arbitrary reads from memory and symbol lookup + 0x0004 - allow arbitrary writes to memory + 0x0008 - allow current register state to be inspected + 0x0010 - allow current register state to be modified + 0x0020 - allow passive inspection (backtrace, process list, lsmod) + 0x0040 - allow flow control management (breakpoint, single step) + 0x0080 - enable signalling of processes + 0x0100 - allow machine to be rebooted + + The config option merely sets the default at boot time. Both + issuing 'echo X > /sys/module/kdb/parameters/cmd_enable' or + setting with kdb.cmd_enable=X kernel command line option will + override the default settings. + config KDB_KEYBOARD bool "KGDB_KDB: keyboard as input device" depends on VT && KGDB_KDB diff --git a/lib/assoc_array.c b/lib/assoc_array.c index 2404d03..03dd576 100644 --- a/lib/assoc_array.c +++ b/lib/assoc_array.c @@ -11,6 +11,7 @@ * 2 of the Licence, or (at your option) any later version. */ //#define DEBUG +#include <linux/rcupdate.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/assoc_array_priv.h> diff --git a/mm/Kconfig.debug b/mm/Kconfig.debug index 56badfc..957d3da 100644 --- a/mm/Kconfig.debug +++ b/mm/Kconfig.debug @@ -14,7 +14,6 @@ config DEBUG_PAGEALLOC depends on !KMEMCHECK select PAGE_EXTENSION select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC - select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC ---help--- Unmap pages from the kernel linear mapping after free_pages(). This results in a large slowdown, but helps to find certain types @@ -27,13 +26,5 @@ config DEBUG_PAGEALLOC that would result in incorrect warnings of memory corruption after a resume because free pages are not saved to the suspend image. -config WANT_PAGE_DEBUG_FLAGS - bool - config PAGE_POISONING bool - select WANT_PAGE_DEBUG_FLAGS - -config PAGE_GUARD - bool - select WANT_PAGE_DEBUG_FLAGS diff --git a/mm/memcontrol.c b/mm/memcontrol.c index ef91e85..851924f 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -3043,18 +3043,6 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry, if (swap_cgroup_cmpxchg(entry, old_id, new_id) == old_id) { mem_cgroup_swap_statistics(from, false); mem_cgroup_swap_statistics(to, true); - /* - * This function is only called from task migration context now. - * It postpones page_counter and refcount handling till the end - * of task migration(mem_cgroup_clear_mc()) for performance - * improvement. But we cannot postpone css_get(to) because if - * the process that has been moved to @to does swap-in, the - * refcount of @to might be decreased to 0. - * - * We are in attach() phase, so the cgroup is guaranteed to be - * alive, so we can just call css_get(). - */ - css_get(&to->css); return 0; } return -EINVAL; @@ -4679,6 +4667,7 @@ mem_cgroup_css_alloc(struct cgroup_subsys_state *parent_css) if (parent_css == NULL) { root_mem_cgroup = memcg; page_counter_init(&memcg->memory, NULL); + memcg->soft_limit = PAGE_COUNTER_MAX; page_counter_init(&memcg->memsw, NULL); page_counter_init(&memcg->kmem, NULL); } @@ -4724,6 +4713,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) if (parent->use_hierarchy) { page_counter_init(&memcg->memory, &parent->memory); + memcg->soft_limit = PAGE_COUNTER_MAX; page_counter_init(&memcg->memsw, &parent->memsw); page_counter_init(&memcg->kmem, &parent->kmem); @@ -4733,6 +4723,7 @@ mem_cgroup_css_online(struct cgroup_subsys_state *css) */ } else { page_counter_init(&memcg->memory, NULL); + memcg->soft_limit = PAGE_COUNTER_MAX; page_counter_init(&memcg->memsw, NULL); page_counter_init(&memcg->kmem, NULL); /* @@ -4807,7 +4798,7 @@ static void mem_cgroup_css_reset(struct cgroup_subsys_state *css) mem_cgroup_resize_limit(memcg, PAGE_COUNTER_MAX); mem_cgroup_resize_memsw_limit(memcg, PAGE_COUNTER_MAX); memcg_update_kmem_limit(memcg, PAGE_COUNTER_MAX); - memcg->soft_limit = 0; + memcg->soft_limit = PAGE_COUNTER_MAX; } #ifdef CONFIG_MMU diff --git a/mm/memory.c b/mm/memory.c index ca920d1..54f3a9b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -235,6 +235,9 @@ void tlb_gather_mmu(struct mmu_gather *tlb, struct mm_struct *mm, unsigned long static void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb) { + if (!tlb->end) + return; + tlb_flush(tlb); mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end); #ifdef CONFIG_HAVE_RCU_TABLE_FREE @@ -247,7 +250,7 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) { struct mmu_gather_batch *batch; - for (batch = &tlb->local; batch; batch = batch->next) { + for (batch = &tlb->local; batch && batch->nr; batch = batch->next) { free_pages_and_swap_cache(batch->pages, batch->nr); batch->nr = 0; } @@ -256,9 +259,6 @@ static void tlb_flush_mmu_free(struct mmu_gather *tlb) void tlb_flush_mmu(struct mmu_gather *tlb) { - if (!tlb->end) - return; - tlb_flush_mmu_tlbonly(tlb); tlb_flush_mmu_free(tlb); } @@ -2137,17 +2137,24 @@ reuse: if (!dirty_page) return ret; - /* - * Yes, Virginia, this is actually required to prevent a race - * with clear_page_dirty_for_io() from clearing the page dirty - * bit after it clear all dirty ptes, but before a racing - * do_wp_page installs a dirty pte. - * - * do_shared_fault is protected similarly. - */ if (!page_mkwrite) { - wait_on_page_locked(dirty_page); - set_page_dirty_balance(dirty_page); + struct address_space *mapping; + int dirtied; + + lock_page(dirty_page); + dirtied = set_page_dirty(dirty_page); + VM_BUG_ON_PAGE(PageAnon(dirty_page), dirty_page); + mapping = dirty_page->mapping; + unlock_page(dirty_page); + + if (dirtied && mapping) { + /* + * Some device drivers do not set page.mapping + * but still dirty their pages + */ + balance_dirty_pages_ratelimited(mapping); + } + /* file_update_time outside page_lock */ if (vma->vm_file) file_update_time(vma->vm_file); @@ -2593,7 +2600,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo if (prev && prev->vm_end == address) return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; - expand_downwards(vma, address - PAGE_SIZE); + return expand_downwards(vma, address - PAGE_SIZE); } if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) { struct vm_area_struct *next = vma->vm_next; @@ -2602,7 +2609,7 @@ static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned lo if (next && next->vm_start == address + PAGE_SIZE) return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM; - expand_upwards(vma, address + PAGE_SIZE); + return expand_upwards(vma, address + PAGE_SIZE); } return 0; } @@ -778,10 +778,12 @@ again: remove_next = 1 + (end > next->vm_end); if (exporter && exporter->anon_vma && !importer->anon_vma) { int error; + importer->anon_vma = exporter->anon_vma; error = anon_vma_clone(importer, exporter); - if (error) + if (error) { + importer->anon_vma = NULL; return error; - importer->anon_vma = exporter->anon_vma; + } } } @@ -2099,14 +2101,17 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns { struct mm_struct *mm = vma->vm_mm; struct rlimit *rlim = current->signal->rlim; - unsigned long new_start; + unsigned long new_start, actual_size; /* address space limit tests */ if (!may_expand_vm(mm, grow)) return -ENOMEM; /* Stack limit test */ - if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + actual_size = size; + if (size && (vma->vm_flags & (VM_GROWSUP | VM_GROWSDOWN))) + actual_size -= PAGE_SIZE; + if (actual_size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; /* mlock limit tests */ diff --git a/mm/page-writeback.c b/mm/page-writeback.c index d5d81f5..6f43352 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1541,16 +1541,6 @@ pause: bdi_start_background_writeback(bdi); } -void set_page_dirty_balance(struct page *page) -{ - if (set_page_dirty(page)) { - struct address_space *mapping = page_mapping(page); - - if (mapping) - balance_dirty_pages_ratelimited(mapping); - } -} - static DEFINE_PER_CPU(int, bdp_ratelimits); /* @@ -2123,32 +2113,25 @@ EXPORT_SYMBOL(account_page_dirtied); * page dirty in that case, but not all the buffers. This is a "bottom-up" * dirtying, whereas __set_page_dirty_buffers() is a "top-down" dirtying. * - * Most callers have locked the page, which pins the address_space in memory. - * But zap_pte_range() does not lock the page, however in that case the - * mapping is pinned by the vma's ->vm_file reference. - * - * We take care to handle the case where the page was truncated from the - * mapping by re-checking page_mapping() inside tree_lock. + * The caller must ensure this doesn't race with truncation. Most will simply + * hold the page lock, but e.g. zap_pte_range() calls with the page mapped and + * the pte lock held, which also locks out truncation. */ int __set_page_dirty_nobuffers(struct page *page) { if (!TestSetPageDirty(page)) { struct address_space *mapping = page_mapping(page); - struct address_space *mapping2; unsigned long flags; if (!mapping) return 1; spin_lock_irqsave(&mapping->tree_lock, flags); - mapping2 = page_mapping(page); - if (mapping2) { /* Race with truncate? */ - BUG_ON(mapping2 != mapping); - WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); - account_page_dirtied(page, mapping); - radix_tree_tag_set(&mapping->page_tree, - page_index(page), PAGECACHE_TAG_DIRTY); - } + BUG_ON(page_mapping(page) != mapping); + WARN_ON_ONCE(!PagePrivate(page) && !PageUptodate(page)); + account_page_dirtied(page, mapping); + radix_tree_tag_set(&mapping->page_tree, page_index(page), + PAGECACHE_TAG_DIRTY); spin_unlock_irqrestore(&mapping->tree_lock, flags); if (mapping->host) { /* !PageAnon && !swapper_space */ @@ -2305,12 +2288,10 @@ int clear_page_dirty_for_io(struct page *page) /* * We carefully synchronise fault handlers against * installing a dirty pte and marking the page dirty - * at this point. We do this by having them hold the - * page lock at some point after installing their - * pte, but before marking the page dirty. - * Pages are always locked coming in here, so we get - * the desired exclusion. See mm/memory.c:do_wp_page() - * for more comments. + * at this point. We do this by having them hold the + * page lock while dirtying the page, and pages are + * always locked coming in here, so we get the desired + * exclusion. */ if (TestClearPageDirty(page)) { dec_zone_page_state(page, NR_FILE_DIRTY); @@ -72,6 +72,8 @@ static inline struct anon_vma *anon_vma_alloc(void) anon_vma = kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL); if (anon_vma) { atomic_set(&anon_vma->refcount, 1); + anon_vma->degree = 1; /* Reference for first vma */ + anon_vma->parent = anon_vma; /* * Initialise the anon_vma root to point to itself. If called * from fork, the root will be reset to the parents anon_vma. @@ -188,6 +190,8 @@ int anon_vma_prepare(struct vm_area_struct *vma) if (likely(!vma->anon_vma)) { vma->anon_vma = anon_vma; anon_vma_chain_link(vma, avc, anon_vma); + /* vma reference or self-parent link for new root */ + anon_vma->degree++; allocated = NULL; avc = NULL; } @@ -236,6 +240,14 @@ static inline void unlock_anon_vma_root(struct anon_vma *root) /* * Attach the anon_vmas from src to dst. * Returns 0 on success, -ENOMEM on failure. + * + * If dst->anon_vma is NULL this function tries to find and reuse existing + * anon_vma which has no vmas and only one child anon_vma. This prevents + * degradation of anon_vma hierarchy to endless linear chain in case of + * constantly forking task. On the other hand, an anon_vma with more than one + * child isn't reused even if there was no alive vma, thus rmap walker has a + * good chance of avoiding scanning the whole hierarchy when it searches where + * page is mapped. */ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) { @@ -256,7 +268,21 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) anon_vma = pavc->anon_vma; root = lock_anon_vma_root(root, anon_vma); anon_vma_chain_link(dst, avc, anon_vma); + + /* + * Reuse existing anon_vma if its degree lower than two, + * that means it has no vma and only one anon_vma child. + * + * Do not chose parent anon_vma, otherwise first child + * will always reuse it. Root anon_vma is never reused: + * it has self-parent reference and at least one child. + */ + if (!dst->anon_vma && anon_vma != src->anon_vma && + anon_vma->degree < 2) + dst->anon_vma = anon_vma; } + if (dst->anon_vma) + dst->anon_vma->degree++; unlock_anon_vma_root(root); return 0; @@ -280,6 +306,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) if (!pvma->anon_vma) return 0; + /* Drop inherited anon_vma, we'll reuse existing or allocate new. */ + vma->anon_vma = NULL; + /* * First, attach the new VMA to the parent VMA's anon_vmas, * so rmap can find non-COWed pages in child processes. @@ -288,6 +317,10 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) if (error) return error; + /* An existing anon_vma has been reused, all done then. */ + if (vma->anon_vma) + return 0; + /* Then add our own anon_vma. */ anon_vma = anon_vma_alloc(); if (!anon_vma) @@ -301,6 +334,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) * lock any of the anon_vmas in this anon_vma tree. */ anon_vma->root = pvma->anon_vma->root; + anon_vma->parent = pvma->anon_vma; /* * With refcounts, an anon_vma can stay around longer than the * process it belongs to. The root anon_vma needs to be pinned until @@ -311,6 +345,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) vma->anon_vma = anon_vma; anon_vma_lock_write(anon_vma); anon_vma_chain_link(vma, avc, anon_vma); + anon_vma->parent->degree++; anon_vma_unlock_write(anon_vma); return 0; @@ -341,12 +376,16 @@ void unlink_anon_vmas(struct vm_area_struct *vma) * Leave empty anon_vmas on the list - we'll need * to free them outside the lock. */ - if (RB_EMPTY_ROOT(&anon_vma->rb_root)) + if (RB_EMPTY_ROOT(&anon_vma->rb_root)) { + anon_vma->parent->degree--; continue; + } list_del(&avc->same_vma); anon_vma_chain_free(avc); } + if (vma->anon_vma) + vma->anon_vma->degree--; unlock_anon_vma_root(root); /* @@ -357,6 +396,7 @@ void unlink_anon_vmas(struct vm_area_struct *vma) list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) { struct anon_vma *anon_vma = avc->anon_vma; + BUG_ON(anon_vma->degree); put_anon_vma(anon_vma); list_del(&avc->same_vma); diff --git a/mm/vmscan.c b/mm/vmscan.c index bd9a72b..ab2505c 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c @@ -2921,18 +2921,20 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining, return false; /* - * There is a potential race between when kswapd checks its watermarks - * and a process gets throttled. There is also a potential race if - * processes get throttled, kswapd wakes, a large process exits therby - * balancing the zones that causes kswapd to miss a wakeup. If kswapd - * is going to sleep, no process should be sleeping on pfmemalloc_wait - * so wake them now if necessary. If necessary, processes will wake - * kswapd and get throttled again + * The throttled processes are normally woken up in balance_pgdat() as + * soon as pfmemalloc_watermark_ok() is true. But there is a potential + * race between when kswapd checks the watermarks and a process gets + * throttled. There is also a potential race if processes get + * throttled, kswapd wakes, a large process exits thereby balancing the + * zones, which causes kswapd to exit balance_pgdat() before reaching + * the wake up checks. If kswapd is going to sleep, no process should + * be sleeping on pfmemalloc_wait, so wake them now if necessary. If + * the wake up is premature, processes will wake kswapd and get + * throttled again. The difference from wake ups in balance_pgdat() is + * that here we are under prepare_to_wait(). */ - if (waitqueue_active(&pgdat->pfmemalloc_wait)) { - wake_up(&pgdat->pfmemalloc_wait); - return false; - } + if (waitqueue_active(&pgdat->pfmemalloc_wait)) + wake_up_all(&pgdat->pfmemalloc_wait); return pgdat_balanced(pgdat, order, classzone_idx); } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index ab6bb2a..b24e4bb 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -685,11 +685,13 @@ static void batadv_mcast_tvlv_ogm_handler_v1(struct batadv_priv *bat_priv, if (orig_initialized) atomic_dec(&bat_priv->mcast.num_disabled); orig->capabilities |= BATADV_ORIG_CAPA_HAS_MCAST; - /* If mcast support is being switched off increase the disabled - * mcast node counter. + /* If mcast support is being switched off or if this is an initial + * OGM without mcast support then increase the disabled mcast + * node counter. */ } else if (!orig_mcast_enabled && - orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) { + (orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST || + !orig_initialized)) { atomic_inc(&bat_priv->mcast.num_disabled); orig->capabilities &= ~BATADV_ORIG_CAPA_HAS_MCAST; } @@ -738,7 +740,8 @@ void batadv_mcast_purge_orig(struct batadv_orig_node *orig) { struct batadv_priv *bat_priv = orig->bat_priv; - if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST)) + if (!(orig->capabilities & BATADV_ORIG_CAPA_HAS_MCAST) && + orig->capa_initialized & BATADV_ORIG_CAPA_HAS_MCAST) atomic_dec(&bat_priv->mcast.num_disabled); batadv_mcast_want_unsnoop_update(bat_priv, orig, BATADV_NO_FLAGS); diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c index 8d04d17..fab47f1 100644 --- a/net/batman-adv/network-coding.c +++ b/net/batman-adv/network-coding.c @@ -133,7 +133,7 @@ int batadv_nc_mesh_init(struct batadv_priv *bat_priv) if (!bat_priv->nc.decoding_hash) goto err; - batadv_hash_set_lock_class(bat_priv->nc.coding_hash, + batadv_hash_set_lock_class(bat_priv->nc.decoding_hash, &batadv_nc_decoding_hash_lock_class_key); INIT_DELAYED_WORK(&bat_priv->nc.work, batadv_nc_worker); diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c index 6a48451..bea8198 100644 --- a/net/batman-adv/originator.c +++ b/net/batman-adv/originator.c @@ -570,9 +570,6 @@ static void batadv_orig_node_free_rcu(struct rcu_head *rcu) batadv_frag_purge_orig(orig_node, NULL); - batadv_tt_global_del_orig(orig_node->bat_priv, orig_node, -1, - "originator timed out"); - if (orig_node->bat_priv->bat_algo_ops->bat_orig_free) orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node); @@ -678,6 +675,7 @@ struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv, atomic_set(&orig_node->last_ttvn, 0); orig_node->tt_buff = NULL; orig_node->tt_buff_len = 0; + orig_node->last_seen = jiffies; reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS); orig_node->bcast_seqno_reset = reset_time; #ifdef CONFIG_BATMAN_ADV_MCAST @@ -977,6 +975,9 @@ static void _batadv_purge_orig(struct batadv_priv *bat_priv) if (batadv_purge_orig_node(bat_priv, orig_node)) { batadv_gw_node_delete(bat_priv, orig_node); hlist_del_rcu(&orig_node->hash_entry); + batadv_tt_global_del_orig(orig_node->bat_priv, + orig_node, -1, + "originator timed out"); batadv_orig_node_free_ref(orig_node); continue; } diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 35f76f2..6648f32 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -443,11 +443,13 @@ batadv_find_router(struct batadv_priv *bat_priv, router = batadv_orig_router_get(orig_node, recv_if); + if (!router) + return router; + /* only consider bonding for recv_if == BATADV_IF_DEFAULT (first hop) * and if activated. */ - if (recv_if == BATADV_IF_DEFAULT || !atomic_read(&bat_priv->bonding) || - !router) + if (!(recv_if == BATADV_IF_DEFAULT && atomic_read(&bat_priv->bonding))) return router; /* bonding: loop through the list of possible routers found diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 1f1de71..e2aa7be 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -154,7 +154,8 @@ int br_handle_frame_finish(struct sk_buff *skb) dst = NULL; if (is_broadcast_ether_addr(dest)) { - if (p->flags & BR_PROXYARP && + if (IS_ENABLED(CONFIG_INET) && + p->flags & BR_PROXYARP && skb->protocol == htons(ETH_P_ARP)) br_do_proxy_arp(skb, br, vid); diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c index 1584581..ba6eb17 100644 --- a/net/ceph/auth_x.c +++ b/net/ceph/auth_x.c @@ -676,7 +676,7 @@ static int calcu_signature(struct ceph_x_authorizer *au, int ret; char tmp_enc[40]; __le32 tmp[5] = { - 16u, msg->hdr.crc, msg->footer.front_crc, + cpu_to_le32(16), msg->hdr.crc, msg->footer.front_crc, msg->footer.middle_crc, msg->footer.data_crc, }; ret = ceph_x_encrypt(&au->session_key, &tmp, sizeof(tmp), diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c index a83062c..f2148e2 100644 --- a/net/ceph/mon_client.c +++ b/net/ceph/mon_client.c @@ -717,7 +717,7 @@ static int get_poolop_reply_buf(const char *src, size_t src_len, if (src_len != sizeof(u32) + dst_len) return -EINVAL; - buf_len = le32_to_cpu(*(u32 *)src); + buf_len = le32_to_cpu(*(__le32 *)src); if (buf_len != dst_len) return -EINVAL; diff --git a/net/core/dev.c b/net/core/dev.c index 683d493..171420e 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -7072,10 +7072,20 @@ static int dev_cpu_callback(struct notifier_block *nfb, oldsd->output_queue = NULL; oldsd->output_queue_tailp = &oldsd->output_queue; } - /* Append NAPI poll list from offline CPU. */ - if (!list_empty(&oldsd->poll_list)) { - list_splice_init(&oldsd->poll_list, &sd->poll_list); - raise_softirq_irqoff(NET_RX_SOFTIRQ); + /* Append NAPI poll list from offline CPU, with one exception : + * process_backlog() must be called by cpu owning percpu backlog. + * We properly handle process_queue & input_pkt_queue later. + */ + while (!list_empty(&oldsd->poll_list)) { + struct napi_struct *napi = list_first_entry(&oldsd->poll_list, + struct napi_struct, + poll_list); + + list_del_init(&napi->poll_list); + if (napi->poll == process_backlog) + napi->state = 0; + else + ____napi_schedule(sd, napi); } raise_softirq_irqoff(NET_TX_SOFTIRQ); @@ -7086,7 +7096,7 @@ static int dev_cpu_callback(struct notifier_block *nfb, netif_rx_internal(skb); input_queue_head_incr(oldsd); } - while ((skb = __skb_dequeue(&oldsd->input_pkt_queue))) { + while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) { netif_rx_internal(skb); input_queue_head_incr(oldsd); } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 8e38f17..8d614c9 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2043,6 +2043,12 @@ static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh) case NDTPA_BASE_REACHABLE_TIME: NEIGH_VAR_SET(p, BASE_REACHABLE_TIME, nla_get_msecs(tbp[i])); + /* update reachable_time as well, otherwise, the change will + * only be effective after the next time neigh_periodic_work + * decides to recompute it (can be multiple minutes) + */ + p->reachable_time = + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); break; case NDTPA_GC_STALETIME: NEIGH_VAR_SET(p, GC_STALETIME, @@ -2921,6 +2927,31 @@ static int neigh_proc_dointvec_unres_qlen(struct ctl_table *ctl, int write, return ret; } +static int neigh_proc_base_reachable_time(struct ctl_table *ctl, int write, + void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + struct neigh_parms *p = ctl->extra2; + int ret; + + if (strcmp(ctl->procname, "base_reachable_time") == 0) + ret = neigh_proc_dointvec_jiffies(ctl, write, buffer, lenp, ppos); + else if (strcmp(ctl->procname, "base_reachable_time_ms") == 0) + ret = neigh_proc_dointvec_ms_jiffies(ctl, write, buffer, lenp, ppos); + else + ret = -1; + + if (write && ret == 0) { + /* update reachable_time as well, otherwise, the change will + * only be effective after the next time neigh_periodic_work + * decides to recompute it + */ + p->reachable_time = + neigh_rand_reach_time(NEIGH_VAR(p, BASE_REACHABLE_TIME)); + } + return ret; +} + #define NEIGH_PARMS_DATA_OFFSET(index) \ (&((struct neigh_parms *) 0)->data[index]) @@ -3047,6 +3078,19 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p, t->neigh_vars[NEIGH_VAR_RETRANS_TIME_MS].proc_handler = handler; /* ReachableTime (in milliseconds) */ t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = handler; + } else { + /* Those handlers will update p->reachable_time after + * base_reachable_time(_ms) is set to ensure the new timer starts being + * applied after the next neighbour update instead of waiting for + * neigh_periodic_work to update its value (can be multiple minutes) + * So any handler that replaces them should do this as well + */ + /* ReachableTime */ + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME].proc_handler = + neigh_proc_base_reachable_time; + /* ReachableTime (in milliseconds) */ + t->neigh_vars[NEIGH_VAR_BASE_REACHABLE_TIME_MS].proc_handler = + neigh_proc_base_reachable_time; } /* Don't export sysctls to unprivileged users */ diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c index 8a89c73..6b85adb 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c @@ -461,17 +461,13 @@ int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; - sin->sin_family = AF_UNSPEC; + memset(sin, 0, sizeof(*sin)); if (serr->ee.ee_origin == SO_EE_ORIGIN_ICMP || ipv4_pktinfo_prepare_errqueue(sk, skb, serr->ee.ee_origin)) { - struct inet_sock *inet = inet_sk(sk); - sin->sin_family = AF_INET; sin->sin_addr.s_addr = ip_hdr(skb)->saddr; - sin->sin_port = 0; - memset(&sin->sin_zero, 0, sizeof(sin->sin_zero)); - if (inet->cmsg_flags) + if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } diff --git a/net/ipv4/netfilter/nft_redir_ipv4.c b/net/ipv4/netfilter/nft_redir_ipv4.c index ff2d23d..6ecfce6 100644 --- a/net/ipv4/netfilter/nft_redir_ipv4.c +++ b/net/ipv4/netfilter/nft_redir_ipv4.c @@ -27,10 +27,10 @@ static void nft_redir_ipv4_eval(const struct nft_expr *expr, memset(&mr, 0, sizeof(mr)); if (priv->sreg_proto_min) { - mr.range[0].min.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - mr.range[0].max.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + mr.range[0].min.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + mr.range[0].max.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; mr.range[0].flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 7f18262..65caf8b 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -2019,7 +2019,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) break; - if (tso_segs == 1) { + if (tso_segs == 1 || !max_segs) { if (unlikely(!tcp_nagle_test(tp, skb, mss_now, (tcp_skb_is_last(sk, skb) ? nonagle : TCP_NAGLE_PUSH)))) @@ -2032,7 +2032,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, } limit = mss_now; - if (tso_segs > 1 && !tcp_urg_mode(tp)) + if (tso_segs > 1 && max_segs && !tcp_urg_mode(tp)) limit = tcp_mss_split_point(sk, skb, mss_now, min_t(unsigned int, cwnd_quota, diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c index 100c589..49f5e73 100644 --- a/net/ipv6/datagram.c +++ b/net/ipv6/datagram.c @@ -393,11 +393,10 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) memcpy(&errhdr.ee, &serr->ee, sizeof(struct sock_extended_err)); sin = &errhdr.offender; - sin->sin6_family = AF_UNSPEC; + memset(sin, 0, sizeof(*sin)); + if (serr->ee.ee_origin != SO_EE_ORIGIN_LOCAL) { sin->sin6_family = AF_INET6; - sin->sin6_flowinfo = 0; - sin->sin6_port = 0; if (np->rxopt.all) { if (serr->ee.ee_origin != SO_EE_ORIGIN_ICMP && serr->ee.ee_origin != SO_EE_ORIGIN_ICMP6) @@ -412,12 +411,9 @@ int ipv6_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len) ipv6_iface_scope_id(&sin->sin6_addr, IP6CB(skb)->iif); } else { - struct inet_sock *inet = inet_sk(sk); - ipv6_addr_set_v4mapped(ip_hdr(skb)->saddr, &sin->sin6_addr); - sin->sin6_scope_id = 0; - if (inet->cmsg_flags) + if (inet_sk(sk)->cmsg_flags) ip_cmsg_recv(msg, skb); } } diff --git a/net/ipv6/netfilter/nft_redir_ipv6.c b/net/ipv6/netfilter/nft_redir_ipv6.c index 2433a6b..11820b6 100644 --- a/net/ipv6/netfilter/nft_redir_ipv6.c +++ b/net/ipv6/netfilter/nft_redir_ipv6.c @@ -27,10 +27,10 @@ static void nft_redir_ipv6_eval(const struct nft_expr *expr, memset(&range, 0, sizeof(range)); if (priv->sreg_proto_min) { - range.min_proto.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - range.max_proto.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + range.min_proto.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + range.max_proto.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/ipv6/route.c b/net/ipv6/route.c index c910831..166e33b 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1160,12 +1160,9 @@ static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk, struct net *net = dev_net(dst->dev); rt6->rt6i_flags |= RTF_MODIFIED; - if (mtu < IPV6_MIN_MTU) { - u32 features = dst_metric(dst, RTAX_FEATURES); + if (mtu < IPV6_MIN_MTU) mtu = IPV6_MIN_MTU; - features |= RTAX_FEATURE_ALLFRAG; - dst_metric_set(dst, RTAX_FEATURES, features); - } + dst_metric_set(dst, RTAX_MTU, mtu); rt6_update_expires(rt6, net->ipv6.sysctl.ip6_rt_mtu_expires); } diff --git a/net/mac80211/key.c b/net/mac80211/key.c index 0bb7038..bd4e46e 100644 --- a/net/mac80211/key.c +++ b/net/mac80211/key.c @@ -140,7 +140,9 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key) if (!ret) { key->flags |= KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) sdata->crypto_tx_tailroom_needed_cnt--; WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) && @@ -188,7 +190,9 @@ static void ieee80211_key_disable_hw_accel(struct ieee80211_key *key) sta = key->sta; sdata = key->sdata; - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) increment_tailroom_need_count(sdata); ret = drv_set_key(key->local, DISABLE_KEY, sdata, @@ -884,7 +888,9 @@ void ieee80211_remove_key(struct ieee80211_key_conf *keyconf) if (key->flags & KEY_FLAG_UPLOADED_TO_HARDWARE) { key->flags &= ~KEY_FLAG_UPLOADED_TO_HARDWARE; - if (!(key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) + if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) || + (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV) || + (key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE))) increment_tailroom_need_count(key->sdata); } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 2c36c47..837a406 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -1643,7 +1643,7 @@ __ieee80211_sta_handle_tspec_ac_params(struct ieee80211_sub_if_data *sdata) { struct ieee80211_local *local = sdata->local; struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; - bool ret; + bool ret = false; int ac; if (local->hw.queues < IEEE80211_NUM_ACS) diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 1d5341f..5d3daae 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c @@ -183,6 +183,8 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, struct nf_conn *ct; struct net *net; + *diff = 0; + #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets @@ -191,8 +193,6 @@ static int ip_vs_ftp_out(struct ip_vs_app *app, struct ip_vs_conn *cp, return 1; #endif - *diff = 0; - /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; @@ -322,6 +322,9 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, struct ip_vs_conn *n_cp; struct net *net; + /* no diff required for incoming packets */ + *diff = 0; + #ifdef CONFIG_IP_VS_IPV6 /* This application helper doesn't work with IPv6 yet, * so turn this into a no-op for IPv6 packets @@ -330,9 +333,6 @@ static int ip_vs_ftp_in(struct ip_vs_app *app, struct ip_vs_conn *cp, return 1; #endif - /* no diff required for incoming packets */ - *diff = 0; - /* Only useful for established sessions */ if (cp->state != IP_VS_TCP_S_ESTABLISHED) return 1; diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index a116748..46d1b26 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -611,16 +611,15 @@ __nf_conntrack_confirm(struct sk_buff *skb) */ NF_CT_ASSERT(!nf_ct_is_confirmed(ct)); pr_debug("Confirming conntrack %p\n", ct); - /* We have to check the DYING flag inside the lock to prevent - a race against nf_ct_get_next_corpse() possibly called from - user context, else we insert an already 'dead' hash, blocking - further use of that particular connection -JM */ + /* We have to check the DYING flag after unlink to prevent + * a race against nf_ct_get_next_corpse() possibly called from + * user context, else we insert an already 'dead' hash, blocking + * further use of that particular connection -JM. + */ + nf_ct_del_from_dying_or_unconfirmed_list(ct); - if (unlikely(nf_ct_is_dying(ct))) { - nf_conntrack_double_unlock(hash, reply_hash); - local_bh_enable(); - return NF_ACCEPT; - } + if (unlikely(nf_ct_is_dying(ct))) + goto out; /* See if there's one in the list already, including reverse: NAT could have grabbed it without realizing, since we're @@ -636,8 +635,6 @@ __nf_conntrack_confirm(struct sk_buff *skb) zone == nf_ct_zone(nf_ct_tuplehash_to_ctrack(h))) goto out; - nf_ct_del_from_dying_or_unconfirmed_list(ct); - /* Timer relative to confirmation time, not original setting time, otherwise we'd get timer wrap in weird delay cases. */ @@ -673,6 +670,7 @@ __nf_conntrack_confirm(struct sk_buff *skb) return NF_ACCEPT; out: + nf_ct_add_to_dying_list(ct); nf_conntrack_double_unlock(hash, reply_hash); NF_CT_STAT_INC(net, insert_failed); local_bh_enable(); diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index 129a8da..3b3ddb4 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -713,16 +713,12 @@ static int nft_flush_table(struct nft_ctx *ctx) struct nft_chain *chain, *nc; struct nft_set *set, *ns; - list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + list_for_each_entry(chain, &ctx->table->chains, list) { ctx->chain = chain; err = nft_delrule_by_chain(ctx); if (err < 0) goto out; - - err = nft_delchain(ctx); - if (err < 0) - goto out; } list_for_each_entry_safe(set, ns, &ctx->table->sets, list) { @@ -735,6 +731,14 @@ static int nft_flush_table(struct nft_ctx *ctx) goto out; } + list_for_each_entry_safe(chain, nc, &ctx->table->chains, list) { + ctx->chain = chain; + + err = nft_delchain(ctx); + if (err < 0) + goto out; + } + err = nft_deltable(ctx); out: return err; diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index cde4a67..c421d94 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -321,7 +321,8 @@ replay: nlh = nlmsg_hdr(skb); err = 0; - if (nlh->nlmsg_len < NLMSG_HDRLEN) { + if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) || + skb->len < nlh->nlmsg_len) { err = -EINVAL; goto ack; } @@ -469,7 +470,7 @@ static int nfnetlink_bind(struct net *net, int group) int type; if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX) - return -EINVAL; + return 0; type = nfnl_group2type[group]; diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c index afe2b0b..aff54fb1 100644 --- a/net/netfilter/nft_nat.c +++ b/net/netfilter/nft_nat.c @@ -65,10 +65,10 @@ static void nft_nat_eval(const struct nft_expr *expr, } if (priv->sreg_proto_min) { - range.min_proto.all = (__force __be16) - data[priv->sreg_proto_min].data[0]; - range.max_proto.all = (__force __be16) - data[priv->sreg_proto_max].data[0]; + range.min_proto.all = + *(__be16 *)&data[priv->sreg_proto_min].data[0]; + range.max_proto.all = + *(__be16 *)&data[priv->sreg_proto_max].data[0]; range.flags |= NF_NAT_RANGE_PROTO_SPECIFIED; } diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c index 84ea76c..02fdde2 100644 --- a/net/netlink/af_netlink.c +++ b/net/netlink/af_netlink.c @@ -61,6 +61,7 @@ #include <linux/rhashtable.h> #include <asm/cacheflush.h> #include <linux/hash.h> +#include <linux/genetlink.h> #include <net/net_namespace.h> #include <net/sock.h> @@ -1095,6 +1096,8 @@ static void netlink_remove(struct sock *sk) __sk_del_bind_node(sk); netlink_update_listeners(sk); } + if (sk->sk_protocol == NETLINK_GENERIC) + atomic_inc(&genl_sk_destructing_cnt); netlink_table_ungrab(); } @@ -1211,6 +1214,20 @@ static int netlink_release(struct socket *sock) * will be purged. */ + /* must not acquire netlink_table_lock in any way again before unbind + * and notifying genetlink is done as otherwise it might deadlock + */ + if (nlk->netlink_unbind) { + int i; + + for (i = 0; i < nlk->ngroups; i++) + if (test_bit(i, nlk->groups)) + nlk->netlink_unbind(sock_net(sk), i + 1); + } + if (sk->sk_protocol == NETLINK_GENERIC && + atomic_dec_return(&genl_sk_destructing_cnt) == 0) + wake_up(&genl_sk_destructing_waitq); + sock->sk = NULL; wake_up_interruptible_all(&nlk->wait); @@ -1246,13 +1263,6 @@ static int netlink_release(struct socket *sock) netlink_table_ungrab(); } - if (nlk->netlink_unbind) { - int i; - - for (i = 0; i < nlk->ngroups; i++) - if (test_bit(i, nlk->groups)) - nlk->netlink_unbind(sock_net(sk), i + 1); - } kfree(nlk->groups); nlk->groups = NULL; diff --git a/net/netlink/af_netlink.h b/net/netlink/af_netlink.h index f123a88..f1c31b3 100644 --- a/net/netlink/af_netlink.h +++ b/net/netlink/af_netlink.h @@ -2,6 +2,7 @@ #define _AF_NETLINK_H #include <linux/rhashtable.h> +#include <linux/atomic.h> #include <net/sock.h> #define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8) diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index 2e11061e..ee57459 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -23,6 +23,9 @@ static DEFINE_MUTEX(genl_mutex); /* serialization of message processing */ static DECLARE_RWSEM(cb_lock); +atomic_t genl_sk_destructing_cnt = ATOMIC_INIT(0); +DECLARE_WAIT_QUEUE_HEAD(genl_sk_destructing_waitq); + void genl_lock(void) { mutex_lock(&genl_mutex); @@ -435,15 +438,18 @@ int genl_unregister_family(struct genl_family *family) genl_lock_all(); - genl_unregister_mc_groups(family); - list_for_each_entry(rc, genl_family_chain(family->id), family_list) { if (family->id != rc->id || strcmp(rc->name, family->name)) continue; + genl_unregister_mc_groups(family); + list_del(&rc->family_list); family->n_ops = 0; - genl_unlock_all(); + up_write(&cb_lock); + wait_event(genl_sk_destructing_waitq, + atomic_read(&genl_sk_destructing_cnt) == 0); + genl_unlock(); kfree(family->attrbuf); genl_ctrl_event(CTRL_CMD_DELFAMILY, family, NULL, 0); @@ -985,7 +991,7 @@ static struct genl_multicast_group genl_ctrl_groups[] = { static int genl_bind(struct net *net, int group) { - int i, err = 0; + int i, err = -ENOENT; down_read(&cb_lock); for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { @@ -1014,7 +1020,6 @@ static int genl_bind(struct net *net, int group) static void genl_unbind(struct net *net, int group) { int i; - bool found = false; down_read(&cb_lock); for (i = 0; i < GENL_FAM_TAB_SIZE; i++) { @@ -1027,14 +1032,11 @@ static void genl_unbind(struct net *net, int group) if (f->mcast_unbind) f->mcast_unbind(net, fam_grp); - found = true; break; } } } up_read(&cb_lock); - - WARN_ON(!found); } static int __net_init genl_pernet_init(struct net *net) diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index 4e9a5f0..b07349e 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -524,7 +524,7 @@ static int ovs_packet_cmd_execute(struct sk_buff *skb, struct genl_info *info) struct vport *input_vport; int len; int err; - bool log = !a[OVS_FLOW_ATTR_PROBE]; + bool log = !a[OVS_PACKET_ATTR_PROBE]; err = -EINVAL; if (!a[OVS_PACKET_ATTR_PACKET] || !a[OVS_PACKET_ATTR_KEY] || @@ -610,6 +610,7 @@ static const struct nla_policy packet_policy[OVS_PACKET_ATTR_MAX + 1] = { [OVS_PACKET_ATTR_PACKET] = { .len = ETH_HLEN }, [OVS_PACKET_ATTR_KEY] = { .type = NLA_NESTED }, [OVS_PACKET_ATTR_ACTIONS] = { .type = NLA_NESTED }, + [OVS_PACKET_ATTR_PROBE] = { .type = NLA_FLAG }, }; static const struct genl_ops dp_packet_genl_ops[] = { diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 70bef2a..da2fae0 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -70,6 +70,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, { struct flow_stats *stats; int node = numa_node_id(); + int len = skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); stats = rcu_dereference(flow->stats[node]); @@ -105,7 +106,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, if (likely(new_stats)) { new_stats->used = jiffies; new_stats->packet_count = 1; - new_stats->byte_count = skb->len; + new_stats->byte_count = len; new_stats->tcp_flags = tcp_flags; spin_lock_init(&new_stats->lock); @@ -120,7 +121,7 @@ void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags, stats->used = jiffies; stats->packet_count++; - stats->byte_count += skb->len; + stats->byte_count += len; stats->tcp_flags |= tcp_flags; unlock: spin_unlock(&stats->lock); diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index 53f3ebb..2034c6d 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -480,7 +480,7 @@ void ovs_vport_receive(struct vport *vport, struct sk_buff *skb, stats = this_cpu_ptr(vport->percpu_stats); u64_stats_update_begin(&stats->syncp); stats->rx_packets++; - stats->rx_bytes += skb->len; + stats->rx_bytes += skb->len + (vlan_tx_tag_present(skb) ? VLAN_HLEN : 0); u64_stats_update_end(&stats->syncp); OVS_CB(skb)->input_vport = vport; diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 6880f34a..9cfe2e1 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -2517,7 +2517,7 @@ static int packet_snd(struct socket *sock, struct msghdr *msg, size_t len) err = -EINVAL; if (sock->type == SOCK_DGRAM) { offset = dev_hard_header(skb, dev, ntohs(proto), addr, NULL, len); - if (unlikely(offset) < 0) + if (unlikely(offset < 0)) goto out_free; } else { if (ll_header_truncated(dev, len)) diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 2625ecc..aafe94b 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -1603,7 +1603,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, sctp_assoc_t associd = 0; sctp_cmsgs_t cmsgs = { NULL }; sctp_scope_t scope; - bool fill_sinfo_ttl = false; + bool fill_sinfo_ttl = false, wait_connect = false; struct sctp_datamsg *datamsg; int msg_flags = msg->msg_flags; __u16 sinfo_flags = 0; @@ -1943,6 +1943,7 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, if (err < 0) goto out_free; + wait_connect = true; pr_debug("%s: we associated primitively\n", __func__); } @@ -1980,6 +1981,11 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk, sctp_datamsg_put(datamsg); err = msg_len; + if (unlikely(wait_connect)) { + timeo = sock_sndtimeo(sk, msg_flags & MSG_DONTWAIT); + sctp_wait_for_connect(asoc, &timeo); + } + /* If we are already past ASSOCIATE, the lower * layers are responsible for association cleanup. */ diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c index 1cb6124..4439ac4 100644 --- a/net/sunrpc/xdr.c +++ b/net/sunrpc/xdr.c @@ -606,7 +606,7 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) struct kvec *head = buf->head; struct kvec *tail = buf->tail; int fraglen; - int new, old; + int new; if (len > buf->len) { WARN_ON_ONCE(1); @@ -629,8 +629,8 @@ void xdr_truncate_encode(struct xdr_stream *xdr, size_t len) buf->len -= fraglen; new = buf->page_base + buf->page_len; - old = new + fraglen; - xdr->page_ptr -= (old >> PAGE_SHIFT) - (new >> PAGE_SHIFT); + + xdr->page_ptr = buf->pages + (new >> PAGE_SHIFT); if (buf->page_len) { xdr->p = page_address(*xdr->page_ptr); diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c index 96ceefe..a9e174f 100644 --- a/net/tipc/bcast.c +++ b/net/tipc/bcast.c @@ -220,10 +220,11 @@ static void bclink_retransmit_pkt(u32 after, u32 to) struct sk_buff *skb; skb_queue_walk(&bcl->outqueue, skb) { - if (more(buf_seqno(skb), after)) + if (more(buf_seqno(skb), after)) { + tipc_link_retransmit(bcl, skb, mod(to - after)); break; + } } - tipc_link_retransmit(bcl, skb, mod(to - after)); } /** diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 7b83098..d39d1cb 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -1530,45 +1530,40 @@ static void reg_call_notifier(struct wiphy *wiphy, static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) { - struct ieee80211_channel *ch; struct cfg80211_chan_def chandef; struct cfg80211_registered_device *rdev = wiphy_to_rdev(wiphy); - bool ret = true; + enum nl80211_iftype iftype; wdev_lock(wdev); + iftype = wdev->iftype; + /* make sure the interface is active */ if (!wdev->netdev || !netif_running(wdev->netdev)) - goto out; + goto wdev_inactive_unlock; - switch (wdev->iftype) { + switch (iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_P2P_GO: if (!wdev->beacon_interval) - goto out; - - ret = cfg80211_reg_can_beacon(wiphy, - &wdev->chandef, wdev->iftype); + goto wdev_inactive_unlock; + chandef = wdev->chandef; break; case NL80211_IFTYPE_ADHOC: if (!wdev->ssid_len) - goto out; - - ret = cfg80211_reg_can_beacon(wiphy, - &wdev->chandef, wdev->iftype); + goto wdev_inactive_unlock; + chandef = wdev->chandef; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (!wdev->current_bss || !wdev->current_bss->pub.channel) - goto out; + goto wdev_inactive_unlock; - ch = wdev->current_bss->pub.channel; - if (rdev->ops->get_channel && - !rdev_get_channel(rdev, wdev, &chandef)) - ret = cfg80211_chandef_usable(wiphy, &chandef, - IEEE80211_CHAN_DISABLED); - else - ret = !(ch->flags & IEEE80211_CHAN_DISABLED); + if (!rdev->ops->get_channel || + rdev_get_channel(rdev, wdev, &chandef)) + cfg80211_chandef_create(&chandef, + wdev->current_bss->pub.channel, + NL80211_CHAN_NO_HT); break; case NL80211_IFTYPE_MONITOR: case NL80211_IFTYPE_AP_VLAN: @@ -1581,9 +1576,26 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev) break; } -out: wdev_unlock(wdev); - return ret; + + switch (iftype) { + case NL80211_IFTYPE_AP: + case NL80211_IFTYPE_P2P_GO: + case NL80211_IFTYPE_ADHOC: + return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); + case NL80211_IFTYPE_STATION: + case NL80211_IFTYPE_P2P_CLIENT: + return cfg80211_chandef_usable(wiphy, &chandef, + IEEE80211_CHAN_DISABLED); + default: + break; + } + + return true; + +wdev_inactive_unlock: + wdev_unlock(wdev); + return true; } static void reg_leave_invalid_chans(struct wiphy *wiphy) diff --git a/scripts/Makefile.clean b/scripts/Makefile.clean index 1bca180..627f8cb 100644 --- a/scripts/Makefile.clean +++ b/scripts/Makefile.clean @@ -42,19 +42,19 @@ __clean-files := $(extra-y) $(extra-m) $(extra-) \ __clean-files := $(filter-out $(no-clean-files), $(__clean-files)) -# as clean-files is given relative to the current directory, this adds -# a $(obj) prefix, except for absolute paths +# clean-files is given relative to the current directory, unless it +# starts with $(objtree)/ (which means "./", so do not add "./" unless +# you want to delete a file from the toplevel object directory). __clean-files := $(wildcard \ - $(addprefix $(obj)/, $(filter-out /%, $(__clean-files))) \ - $(filter /%, $(__clean-files))) + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(__clean-files))) \ + $(filter $(objtree)/%, $(__clean-files))) -# as clean-dirs is given relative to the current directory, this adds -# a $(obj) prefix, except for absolute paths +# same as clean-files __clean-dirs := $(wildcard \ - $(addprefix $(obj)/, $(filter-out /%, $(clean-dirs))) \ - $(filter /%, $(clean-dirs))) + $(addprefix $(obj)/, $(filter-out $(objtree)/%, $(clean-dirs))) \ + $(filter $(objtree)/%, $(clean-dirs))) # ========================================================================== diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl index 56ea99a..537c38c 100755 --- a/scripts/recordmcount.pl +++ b/scripts/recordmcount.pl @@ -255,7 +255,6 @@ if ($arch eq "x86_64") { # force flags for this arch $ld .= " -m shlelf_linux"; $objcopy .= " -O elf32-sh-linux"; - $cc .= " -m32"; } elsif ($arch eq "powerpc") { $local_regex = "^[0-9a-fA-F]+\\s+t\\s+(\\.?\\S+)"; diff --git a/security/keys/gc.c b/security/keys/gc.c index 9609a7f..c795237 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -148,12 +148,12 @@ static noinline void key_gc_unused_keys(struct list_head *keys) if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) atomic_dec(&key->user->nikeys); - key_user_put(key->user); - /* now throw away the key memory */ if (key->type->destroy) key->type->destroy(key); + key_user_put(key->user); + kfree(key->description); #ifdef KEY_DEBUGGING diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c index 3badc70..0d58018 100644 --- a/sound/firewire/amdtp.c +++ b/sound/firewire/amdtp.c @@ -21,7 +21,19 @@ #define CYCLES_PER_SECOND 8000 #define TICKS_PER_SECOND (TICKS_PER_CYCLE * CYCLES_PER_SECOND) -#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ +/* + * Nominally 3125 bytes/second, but the MIDI port's clock might be + * 1% too slow, and the bus clock 100 ppm too fast. + */ +#define MIDI_BYTES_PER_SECOND 3093 + +/* + * Several devices look only at the first eight data blocks. + * In any case, this is more than enough for the MIDI data rate. + */ +#define MAX_MIDI_RX_BLOCKS 8 + +#define TRANSFER_DELAY_TICKS 0x2e00 /* 479.17 µs */ /* isochronous header parameters */ #define ISO_DATA_LENGTH_SHIFT 16 @@ -78,8 +90,6 @@ int amdtp_stream_init(struct amdtp_stream *s, struct fw_unit *unit, s->callbacked = false; s->sync_slave = NULL; - s->rx_blocks_for_midi = UINT_MAX; - return 0; } EXPORT_SYMBOL(amdtp_stream_init); @@ -222,6 +232,14 @@ sfc_found: for (i = 0; i < pcm_channels; i++) s->pcm_positions[i] = i; s->midi_position = s->pcm_channels; + + /* + * We do not know the actual MIDI FIFO size of most devices. Just + * assume two bytes, i.e., one byte can be received over the bus while + * the previous one is transmitted over MIDI. + * (The value here is adjusted for midi_ratelimit_per_packet().) + */ + s->midi_fifo_limit = rate - MIDI_BYTES_PER_SECOND * s->syt_interval + 1; } EXPORT_SYMBOL(amdtp_stream_set_parameters); @@ -463,6 +481,36 @@ static void amdtp_fill_pcm_silence(struct amdtp_stream *s, } } +/* + * To avoid sending MIDI bytes at too high a rate, assume that the receiving + * device has a FIFO, and track how much it is filled. This values increases + * by one whenever we send one byte in a packet, but the FIFO empties at + * a constant rate independent of our packet rate. One packet has syt_interval + * samples, so the number of bytes that empty out of the FIFO, per packet(!), + * is MIDI_BYTES_PER_SECOND * syt_interval / sample_rate. To avoid storing + * fractional values, the values in midi_fifo_used[] are measured in bytes + * multiplied by the sample rate. + */ +static bool midi_ratelimit_per_packet(struct amdtp_stream *s, unsigned int port) +{ + int used; + + used = s->midi_fifo_used[port]; + if (used == 0) /* common shortcut */ + return true; + + used -= MIDI_BYTES_PER_SECOND * s->syt_interval; + used = max(used, 0); + s->midi_fifo_used[port] = used; + + return used < s->midi_fifo_limit; +} + +static void midi_rate_use_one_byte(struct amdtp_stream *s, unsigned int port) +{ + s->midi_fifo_used[port] += amdtp_rate_table[s->sfc]; +} + static void amdtp_fill_midi(struct amdtp_stream *s, __be32 *buffer, unsigned int frames) { @@ -470,16 +518,21 @@ static void amdtp_fill_midi(struct amdtp_stream *s, u8 *b; for (f = 0; f < frames; f++) { - buffer[s->midi_position] = 0; b = (u8 *)&buffer[s->midi_position]; port = (s->data_block_counter + f) % 8; - if ((f >= s->rx_blocks_for_midi) || - (s->midi[port] == NULL) || - (snd_rawmidi_transmit(s->midi[port], b + 1, 1) <= 0)) - b[0] = 0x80; - else + if (f < MAX_MIDI_RX_BLOCKS && + midi_ratelimit_per_packet(s, port) && + s->midi[port] != NULL && + snd_rawmidi_transmit(s->midi[port], &b[1], 1) == 1) { + midi_rate_use_one_byte(s, port); b[0] = 0x81; + } else { + b[0] = 0x80; + b[1] = 0; + } + b[2] = 0; + b[3] = 0; buffer += s->data_block_quadlets; } diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h index e6e8926..8a03a91 100644 --- a/sound/firewire/amdtp.h +++ b/sound/firewire/amdtp.h @@ -148,13 +148,12 @@ struct amdtp_stream { bool double_pcm_frames; struct snd_rawmidi_substream *midi[AMDTP_MAX_CHANNELS_FOR_MIDI * 8]; + int midi_fifo_limit; + int midi_fifo_used[AMDTP_MAX_CHANNELS_FOR_MIDI * 8]; /* quirk: fixed interval of dbc between previos/current packets. */ unsigned int tx_dbc_interval; - /* quirk: the first count of data blocks in an rx packet for MIDI */ - unsigned int rx_blocks_for_midi; - bool callbacked; wait_queue_head_t callback_wait; struct amdtp_stream *sync_slave; diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c index 1aab0a32..0ebcabf 100644 --- a/sound/firewire/bebob/bebob_stream.c +++ b/sound/firewire/bebob/bebob_stream.c @@ -484,13 +484,6 @@ int snd_bebob_stream_init_duplex(struct snd_bebob *bebob) amdtp_stream_destroy(&bebob->rx_stream); destroy_both_connections(bebob); } - /* - * The firmware for these devices ignore MIDI messages in more than - * first 8 data blocks of an received AMDTP packet. - */ - if (bebob->spec == &maudio_fw410_spec || - bebob->spec == &maudio_special_spec) - bebob->rx_stream.rx_blocks_for_midi = 8; end: return err; } diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c index b985fc5..4f440e1 100644 --- a/sound/firewire/fireworks/fireworks_stream.c +++ b/sound/firewire/fireworks/fireworks_stream.c @@ -179,11 +179,6 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw) destroy_stream(efw, &efw->tx_stream); goto end; } - /* - * Fireworks ignores MIDI messages in more than first 8 data - * blocks of an received AMDTP packet. - */ - efw->rx_stream.rx_blocks_for_midi = 8; /* set IEC61883 compliant mode (actually not fully compliant...) */ err = snd_efw_command_set_tx_mode(efw, SND_EFW_TRANSPORT_MODE_IEC61883); diff --git a/sound/firewire/fireworks/fireworks_transaction.c b/sound/firewire/fireworks/fireworks_transaction.c index 255dabc..2a85e42 100644 --- a/sound/firewire/fireworks/fireworks_transaction.c +++ b/sound/firewire/fireworks/fireworks_transaction.c @@ -124,7 +124,7 @@ copy_resp_to_buf(struct snd_efw *efw, void *data, size_t length, int *rcode) spin_lock_irq(&efw->lock); t = (struct snd_efw_transaction *)data; - length = min_t(size_t, t->length * sizeof(t->length), length); + length = min_t(size_t, be32_to_cpu(t->length) * sizeof(u32), length); if (efw->push_ptr < efw->pull_ptr) capacity = (unsigned int)(efw->pull_ptr - efw->push_ptr); diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 5f13d2d..b422e40 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c @@ -3353,6 +3353,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, +{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, { .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, { .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, @@ -3413,6 +3414,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0060"); MODULE_ALIAS("snd-hda-codec-id:10de0067"); MODULE_ALIAS("snd-hda-codec-id:10de0070"); MODULE_ALIAS("snd-hda-codec-id:10de0071"); +MODULE_ALIAS("snd-hda-codec-id:10de0072"); MODULE_ALIAS("snd-hda-codec-id:10de8001"); MODULE_ALIAS("snd-hda-codec-id:11069f80"); MODULE_ALIAS("snd-hda-codec-id:11069f81"); diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c index 4f6413e..605d140 100644 --- a/sound/pci/hda/patch_sigmatel.c +++ b/sound/pci/hda/patch_sigmatel.c @@ -568,9 +568,9 @@ static void stac_store_hints(struct hda_codec *codec) spec->gpio_mask; } if (get_int_hint(codec, "gpio_dir", &spec->gpio_dir)) - spec->gpio_mask &= spec->gpio_mask; - if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) spec->gpio_dir &= spec->gpio_mask; + if (get_int_hint(codec, "gpio_data", &spec->gpio_data)) + spec->gpio_data &= spec->gpio_mask; if (get_int_hint(codec, "eapd_mask", &spec->eapd_mask)) spec->eapd_mask &= spec->gpio_mask; if (get_int_hint(codec, "gpio_mute", &spec->gpio_mute)) diff --git a/sound/usb/caiaq/audio.c b/sound/usb/caiaq/audio.c index 2728447..327f864 100644 --- a/sound/usb/caiaq/audio.c +++ b/sound/usb/caiaq/audio.c @@ -816,7 +816,7 @@ int snd_usb_caiaq_audio_init(struct snd_usb_caiaqdev *cdev) return -EINVAL; } - if (cdev->n_streams < 2) { + if (cdev->n_streams < 1) { dev_err(dev, "bogus number of streams: %d\n", cdev->n_streams); return -EINVAL; } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 41650d5..3e2ef61 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -913,6 +913,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): + case USB_ID(0x046d, 0x0819): /* Logitech Webcam C210 */ case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */ case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */ diff --git a/tools/include/asm-generic/bitops.h b/tools/include/asm-generic/bitops.h index 6eedba1..653d1ba 100644 --- a/tools/include/asm-generic/bitops.h +++ b/tools/include/asm-generic/bitops.h @@ -22,6 +22,8 @@ #error only <linux/bitops.h> can be included directly #endif +#include <asm-generic/bitops/hweight.h> + #include <asm-generic/bitops/atomic.h> #endif /* __TOOLS_ASM_GENERIC_BITOPS_H */ diff --git a/tools/include/asm-generic/bitops/arch_hweight.h b/tools/include/asm-generic/bitops/arch_hweight.h new file mode 100644 index 0000000..318bb2b --- /dev/null +++ b/tools/include/asm-generic/bitops/arch_hweight.h @@ -0,0 +1 @@ +#include "../../../../include/asm-generic/bitops/arch_hweight.h" diff --git a/tools/include/asm-generic/bitops/const_hweight.h b/tools/include/asm-generic/bitops/const_hweight.h new file mode 100644 index 0000000..0afd644 --- /dev/null +++ b/tools/include/asm-generic/bitops/const_hweight.h @@ -0,0 +1 @@ +#include "../../../../include/asm-generic/bitops/const_hweight.h" diff --git a/tools/include/asm-generic/bitops/hweight.h b/tools/include/asm-generic/bitops/hweight.h new file mode 100644 index 0000000..290120c --- /dev/null +++ b/tools/include/asm-generic/bitops/hweight.h @@ -0,0 +1,7 @@ +#ifndef _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ +#define _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ + +#include <asm-generic/bitops/arch_hweight.h> +#include <asm-generic/bitops/const_hweight.h> + +#endif /* _TOOLS_LINUX_ASM_GENERIC_BITOPS_HWEIGHT_H_ */ diff --git a/tools/include/linux/bitops.h b/tools/include/linux/bitops.h index 26005a15..5ad9ee1 100644 --- a/tools/include/linux/bitops.h +++ b/tools/include/linux/bitops.h @@ -1,9 +1,9 @@ #ifndef _TOOLS_LINUX_BITOPS_H_ #define _TOOLS_LINUX_BITOPS_H_ +#include <asm/types.h> #include <linux/kernel.h> #include <linux/compiler.h> -#include <asm/hweight.h> #ifndef __WORDSIZE #define __WORDSIZE (__SIZEOF_LONG__ * 8) @@ -19,6 +19,11 @@ #define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32)) #define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE) +extern unsigned int __sw_hweight8(unsigned int w); +extern unsigned int __sw_hweight16(unsigned int w); +extern unsigned int __sw_hweight32(unsigned int w); +extern unsigned long __sw_hweight64(__u64 w); + /* * Include this here because some architectures need generic_ffs/fls in * scope diff --git a/tools/lib/api/fs/debugfs.c b/tools/lib/api/fs/debugfs.c index a74fba6..86ea2d7 100644 --- a/tools/lib/api/fs/debugfs.c +++ b/tools/lib/api/fs/debugfs.c @@ -67,7 +67,7 @@ int debugfs_valid_mountpoint(const char *debugfs) if (statfs(debugfs, &st_fs) < 0) return -ENOENT; - else if (st_fs.f_type != (long) DEBUGFS_MAGIC) + else if ((long)st_fs.f_type != (long)DEBUGFS_MAGIC) return -ENOENT; return 0; diff --git a/tools/lib/api/fs/fs.c b/tools/lib/api/fs/fs.c index 65d9be3..128ef63 100644 --- a/tools/lib/api/fs/fs.c +++ b/tools/lib/api/fs/fs.c @@ -79,7 +79,7 @@ static int fs__valid_mount(const char *fs, long magic) if (statfs(fs, &st_fs) < 0) return -ENOENT; - else if (st_fs.f_type != magic) + else if ((long)st_fs.f_type != magic) return -ENOENT; return 0; diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c index 6f80360..0b0112c 100644 --- a/tools/lib/lockdep/preload.c +++ b/tools/lib/lockdep/preload.c @@ -317,7 +317,7 @@ int pthread_mutex_destroy(pthread_mutex_t *mutex) * * TODO: Hook into free() and add that check there as well. */ - debug_check_no_locks_freed(mutex, mutex + sizeof(*mutex)); + debug_check_no_locks_freed(mutex, sizeof(*mutex)); __del_lock(__get_lock(mutex)); return ll_pthread_mutex_destroy(mutex); } @@ -341,7 +341,7 @@ int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) { try_init_preload(); - debug_check_no_locks_freed(rwlock, rwlock + sizeof(*rwlock)); + debug_check_no_locks_freed(rwlock, sizeof(*rwlock)); __del_lock(__get_lock(rwlock)); return ll_pthread_rwlock_destroy(rwlock); } diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST index 83e2887..fbbfdc3 100644 --- a/tools/perf/MANIFEST +++ b/tools/perf/MANIFEST @@ -6,12 +6,15 @@ tools/lib/symbol/kallsyms.c tools/lib/symbol/kallsyms.h tools/lib/util/find_next_bit.c tools/include/asm/bug.h +tools/include/asm-generic/bitops/arch_hweight.h tools/include/asm-generic/bitops/atomic.h +tools/include/asm-generic/bitops/const_hweight.h tools/include/asm-generic/bitops/__ffs.h tools/include/asm-generic/bitops/__fls.h tools/include/asm-generic/bitops/find.h tools/include/asm-generic/bitops/fls64.h tools/include/asm-generic/bitops/fls.h +tools/include/asm-generic/bitops/hweight.h tools/include/asm-generic/bitops.h tools/include/linux/bitops.h tools/include/linux/compiler.h @@ -19,6 +22,8 @@ tools/include/linux/export.h tools/include/linux/hash.h tools/include/linux/log2.h tools/include/linux/types.h +include/asm-generic/bitops/arch_hweight.h +include/asm-generic/bitops/const_hweight.h include/asm-generic/bitops/fls64.h include/asm-generic/bitops/__fls.h include/asm-generic/bitops/fls.h @@ -29,6 +34,7 @@ include/linux/list.h include/linux/hash.h include/linux/stringify.h lib/find_next_bit.c +lib/hweight.c lib/rbtree.c include/linux/swab.h arch/*/include/asm/unistd*.h diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf index 67a03a82..aa6a504 100644 --- a/tools/perf/Makefile.perf +++ b/tools/perf/Makefile.perf @@ -232,12 +232,15 @@ LIB_H += ../include/linux/hash.h LIB_H += ../../include/linux/stringify.h LIB_H += util/include/linux/bitmap.h LIB_H += ../include/linux/bitops.h +LIB_H += ../include/asm-generic/bitops/arch_hweight.h LIB_H += ../include/asm-generic/bitops/atomic.h +LIB_H += ../include/asm-generic/bitops/const_hweight.h LIB_H += ../include/asm-generic/bitops/find.h LIB_H += ../include/asm-generic/bitops/fls64.h LIB_H += ../include/asm-generic/bitops/fls.h LIB_H += ../include/asm-generic/bitops/__ffs.h LIB_H += ../include/asm-generic/bitops/__fls.h +LIB_H += ../include/asm-generic/bitops/hweight.h LIB_H += ../include/asm-generic/bitops.h LIB_H += ../include/linux/compiler.h LIB_H += ../include/linux/log2.h @@ -255,7 +258,6 @@ LIB_H += util/include/linux/linkage.h LIB_H += util/include/asm/asm-offsets.h LIB_H += ../include/asm/bug.h LIB_H += util/include/asm/byteorder.h -LIB_H += util/include/asm/hweight.h LIB_H += util/include/asm/swab.h LIB_H += util/include/asm/system.h LIB_H += util/include/asm/uaccess.h @@ -462,10 +464,12 @@ BUILTIN_OBJS += $(OUTPUT)builtin-bench.o # Benchmark modules BUILTIN_OBJS += $(OUTPUT)bench/sched-messaging.o BUILTIN_OBJS += $(OUTPUT)bench/sched-pipe.o -ifeq ($(RAW_ARCH),x86_64) +ifeq ($(ARCH), x86) +ifeq ($(IS_64_BIT), 1) BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy-x86-64-asm.o BUILTIN_OBJS += $(OUTPUT)bench/mem-memset-x86-64-asm.o endif +endif BUILTIN_OBJS += $(OUTPUT)bench/mem-memcpy.o BUILTIN_OBJS += $(OUTPUT)bench/futex-hash.o BUILTIN_OBJS += $(OUTPUT)bench/futex-wake.o @@ -743,6 +747,9 @@ $(OUTPUT)util/kallsyms.o: ../lib/symbol/kallsyms.c $(OUTPUT)PERF-CFLAGS $(OUTPUT)util/rbtree.o: ../../lib/rbtree.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< +$(OUTPUT)util/hweight.o: ../../lib/hweight.c $(OUTPUT)PERF-CFLAGS + $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< + $(OUTPUT)util/find_next_bit.o: ../lib/util/find_next_bit.c $(OUTPUT)PERF-CFLAGS $(QUIET_CC)$(CC) -o $@ -c $(CFLAGS) -Wno-unused-parameter -DETC_PERFCONFIG='"$(ETC_PERFCONFIG_SQ)"' $< diff --git a/tools/perf/arch/powerpc/util/skip-callchain-idx.c b/tools/perf/arch/powerpc/util/skip-callchain-idx.c index 3bb50ea..0c370f8 100644 --- a/tools/perf/arch/powerpc/util/skip-callchain-idx.c +++ b/tools/perf/arch/powerpc/util/skip-callchain-idx.c @@ -103,7 +103,7 @@ static Dwarf_Frame *get_eh_frame(Dwfl_Module *mod, Dwarf_Addr pc) return NULL; } - result = dwarf_cfi_addrframe(cfi, pc, &frame); + result = dwarf_cfi_addrframe(cfi, pc-bias, &frame); if (result) { pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); return NULL; @@ -128,7 +128,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc) return NULL; } - result = dwarf_cfi_addrframe(cfi, pc, &frame); + result = dwarf_cfi_addrframe(cfi, pc-bias, &frame); if (result) { pr_debug("%s(): %s\n", __func__, dwfl_errmsg(-1)); return NULL; @@ -145,7 +145,7 @@ static Dwarf_Frame *get_dwarf_frame(Dwfl_Module *mod, Dwarf_Addr pc) * yet used) * -1 in case of errors */ -static int check_return_addr(struct dso *dso, Dwarf_Addr pc) +static int check_return_addr(struct dso *dso, u64 map_start, Dwarf_Addr pc) { int rc = -1; Dwfl *dwfl; @@ -155,6 +155,7 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc) Dwarf_Addr start = pc; Dwarf_Addr end = pc; bool signalp; + const char *exec_file = dso->long_name; dwfl = dso->dwfl; @@ -165,8 +166,10 @@ static int check_return_addr(struct dso *dso, Dwarf_Addr pc) return -1; } - if (dwfl_report_offline(dwfl, "", dso->long_name, -1) == NULL) { - pr_debug("dwfl_report_offline() failed %s\n", + mod = dwfl_report_elf(dwfl, exec_file, exec_file, -1, + map_start, false); + if (!mod) { + pr_debug("dwfl_report_elf() failed %s\n", dwarf_errmsg(-1)); /* * We normally cache the DWARF debug info and never @@ -256,10 +259,10 @@ int arch_skip_callchain_idx(struct thread *thread, struct ip_callchain *chain) return skip_slot; } - rc = check_return_addr(dso, ip); + rc = check_return_addr(dso, al.map->start, ip); - pr_debug("DSO %s, nr %" PRIx64 ", ip 0x%" PRIx64 "rc %d\n", - dso->long_name, chain->nr, ip, rc); + pr_debug("[DSO %s, sym %s, ip 0x%" PRIx64 "] rc %d\n", + dso->long_name, al.sym->name, ip, rc); if (rc == 0) { /* diff --git a/tools/perf/bench/sched-pipe.c b/tools/perf/bench/sched-pipe.c index 07a8d76..005cc28 100644 --- a/tools/perf/bench/sched-pipe.c +++ b/tools/perf/bench/sched-pipe.c @@ -19,12 +19,12 @@ #include <stdlib.h> #include <signal.h> #include <sys/wait.h> -#include <linux/unistd.h> #include <string.h> #include <errno.h> #include <assert.h> #include <sys/time.h> #include <sys/types.h> +#include <sys/syscall.h> #include <pthread.h> diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c index e7417fe..747f861 100644 --- a/tools/perf/builtin-annotate.c +++ b/tools/perf/builtin-annotate.c @@ -232,7 +232,7 @@ static int __cmd_annotate(struct perf_annotate *ann) if (nr_samples > 0) { total_nr_samples += nr_samples; hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (symbol_conf.event_group && !perf_evsel__is_group_leader(pos)) diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c index 1ce425d..1fd96c1 100644 --- a/tools/perf/builtin-diff.c +++ b/tools/perf/builtin-diff.c @@ -545,6 +545,42 @@ hist_entry__cmp_compute(struct hist_entry *left, struct hist_entry *right, return __hist_entry__cmp_compute(p_left, p_right, c); } +static int64_t +hist_entry__cmp_nop(struct hist_entry *left __maybe_unused, + struct hist_entry *right __maybe_unused) +{ + return 0; +} + +static int64_t +hist_entry__cmp_baseline(struct hist_entry *left, struct hist_entry *right) +{ + if (sort_compute) + return 0; + + if (left->stat.period == right->stat.period) + return 0; + return left->stat.period > right->stat.period ? 1 : -1; +} + +static int64_t +hist_entry__cmp_delta(struct hist_entry *left, struct hist_entry *right) +{ + return hist_entry__cmp_compute(right, left, COMPUTE_DELTA); +} + +static int64_t +hist_entry__cmp_ratio(struct hist_entry *left, struct hist_entry *right) +{ + return hist_entry__cmp_compute(right, left, COMPUTE_RATIO); +} + +static int64_t +hist_entry__cmp_wdiff(struct hist_entry *left, struct hist_entry *right) +{ + return hist_entry__cmp_compute(right, left, COMPUTE_WEIGHTED_DIFF); +} + static void insert_hist_entry_by_compute(struct rb_root *root, struct hist_entry *he, int c) @@ -605,7 +641,7 @@ static void hists__process(struct hists *hists) hists__precompute(hists); hists__compute_resort(hists); } else { - hists__output_resort(hists); + hists__output_resort(hists, NULL); } hists__fprintf(hists, true, 0, 0, 0, stdout); @@ -1038,27 +1074,35 @@ static void data__hpp_register(struct data__file *d, int idx) fmt->header = hpp__header; fmt->width = hpp__width; fmt->entry = hpp__entry_global; + fmt->cmp = hist_entry__cmp_nop; + fmt->collapse = hist_entry__cmp_nop; /* TODO more colors */ switch (idx) { case PERF_HPP_DIFF__BASELINE: fmt->color = hpp__color_baseline; + fmt->sort = hist_entry__cmp_baseline; break; case PERF_HPP_DIFF__DELTA: fmt->color = hpp__color_delta; + fmt->sort = hist_entry__cmp_delta; break; case PERF_HPP_DIFF__RATIO: fmt->color = hpp__color_ratio; + fmt->sort = hist_entry__cmp_ratio; break; case PERF_HPP_DIFF__WEIGHTED_DIFF: fmt->color = hpp__color_wdiff; + fmt->sort = hist_entry__cmp_wdiff; break; default: + fmt->sort = hist_entry__cmp_nop; break; } init_header(d, dfmt); perf_hpp__column_register(fmt); + perf_hpp__register_sort_field(fmt); } static void ui_init(void) diff --git a/tools/perf/builtin-list.c b/tools/perf/builtin-list.c index 011195e..198f3c3 100644 --- a/tools/perf/builtin-list.c +++ b/tools/perf/builtin-list.c @@ -19,7 +19,9 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) { int i; - const struct option list_options[] = { + bool raw_dump = false; + struct option list_options[] = { + OPT_BOOLEAN(0, "raw-dump", &raw_dump, "Dump raw events"), OPT_END() }; const char * const list_usage[] = { @@ -27,11 +29,18 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) NULL }; + set_option_flag(list_options, 0, "raw-dump", PARSE_OPT_HIDDEN); + argc = parse_options(argc, argv, list_options, list_usage, PARSE_OPT_STOP_AT_NON_OPTION); setup_pager(); + if (raw_dump) { + print_events(NULL, true); + return 0; + } + if (argc == 0) { print_events(NULL, false); return 0; @@ -53,8 +62,6 @@ int cmd_list(int argc, const char **argv, const char *prefix __maybe_unused) print_hwcache_events(NULL, false); else if (strcmp(argv[i], "pmu") == 0) print_pmu_events(NULL, false); - else if (strcmp(argv[i], "--raw-dump") == 0) - print_events(NULL, true); else { char *sep = strchr(argv[i], ':'), *s; int sep_idx; diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c index 3936760..072ae8a 100644 --- a/tools/perf/builtin-report.c +++ b/tools/perf/builtin-report.c @@ -457,6 +457,19 @@ static void report__collapse_hists(struct report *rep) ui_progress__finish(); } +static void report__output_resort(struct report *rep) +{ + struct ui_progress prog; + struct perf_evsel *pos; + + ui_progress__init(&prog, rep->nr_entries, "Sorting events for output..."); + + evlist__for_each(rep->session->evlist, pos) + hists__output_resort(evsel__hists(pos), &prog); + + ui_progress__finish(); +} + static int __cmd_report(struct report *rep) { int ret; @@ -505,13 +518,20 @@ static int __cmd_report(struct report *rep) if (session_done()) return 0; + /* + * recalculate number of entries after collapsing since it + * might be changed during the collapse phase. + */ + rep->nr_entries = 0; + evlist__for_each(session->evlist, pos) + rep->nr_entries += evsel__hists(pos)->nr_entries; + if (rep->nr_entries == 0) { ui__error("The %s file has no samples!\n", file->path); return 0; } - evlist__for_each(session->evlist, pos) - hists__output_resort(evsel__hists(pos)); + report__output_resort(rep); return report__browse_hists(rep); } diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index 0aa7747..616f0fc 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c @@ -66,7 +66,6 @@ #include <sys/utsname.h> #include <sys/mman.h> -#include <linux/unistd.h> #include <linux/types.h> static volatile int done; @@ -285,7 +284,7 @@ static void perf_top__print_sym_table(struct perf_top *top) } hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); hists__output_recalc_col_len(hists, top->print_entries - printed); putchar('\n'); @@ -554,7 +553,7 @@ static void perf_top__sort_new_samples(void *arg) } hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); } static void *display_thread_tui(void *arg) diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile index 5d4b039..648e31f 100644 --- a/tools/perf/config/Makefile +++ b/tools/perf/config/Makefile @@ -20,7 +20,7 @@ NO_PERF_REGS := 1 # Additional ARCH settings for x86 ifeq ($(ARCH),x86) - ifeq (${IS_X86_64}, 1) + ifeq (${IS_64_BIT}, 1) CFLAGS += -DHAVE_ARCH_X86_64_SUPPORT ARCH_INCLUDE = ../../arch/x86/lib/memcpy_64.S ../../arch/x86/lib/memset_64.S LIBUNWIND_LIBS = -lunwind -lunwind-x86_64 diff --git a/tools/perf/config/Makefile.arch b/tools/perf/config/Makefile.arch index 851cd01..ff95a68 100644 --- a/tools/perf/config/Makefile.arch +++ b/tools/perf/config/Makefile.arch @@ -1,7 +1,7 @@ uname_M := $(shell uname -m 2>/dev/null || echo not) -ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ +RAW_ARCH := $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/arm.*/arm/ -e s/sa110/arm/ \ -e s/s390x/s390/ -e s/parisc64/parisc/ \ -e s/ppc.*/powerpc/ -e s/mips.*/mips/ \ @@ -9,23 +9,23 @@ ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ \ -e s/tile.*/tile/ ) # Additional ARCH settings for x86 -ifeq ($(ARCH),i386) - override ARCH := x86 +ifeq ($(RAW_ARCH),i386) + ARCH ?= x86 endif -ifeq ($(ARCH),x86_64) - override ARCH := x86 - IS_X86_64 := 0 - ifeq (, $(findstring m32,$(CFLAGS))) - IS_X86_64 := $(shell echo __x86_64__ | ${CC} -E -x c - | tail -n 1) - RAW_ARCH := x86_64 +ifeq ($(RAW_ARCH),x86_64) + ARCH ?= x86 + + ifneq (, $(findstring m32,$(CFLAGS))) + RAW_ARCH := x86_32 endif endif -ifeq (${IS_X86_64}, 1) +ARCH ?= $(RAW_ARCH) + +LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) +ifeq ($(LP64), 1) IS_64_BIT := 1 -else ifeq ($(ARCH),x86) - IS_64_BIT := 0 else - IS_64_BIT := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1) + IS_64_BIT := 0 endif diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h index a3b13d7..6ef6816 100644 --- a/tools/perf/perf-sys.h +++ b/tools/perf/perf-sys.h @@ -6,7 +6,6 @@ #include <sys/syscall.h> #include <linux/types.h> #include <linux/perf_event.h> -#include <asm/unistd.h> #if defined(__i386__) #define mb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") diff --git a/tools/perf/tests/dwarf-unwind.c b/tools/perf/tests/dwarf-unwind.c index ab28cca..0bf06be 100644 --- a/tools/perf/tests/dwarf-unwind.c +++ b/tools/perf/tests/dwarf-unwind.c @@ -11,6 +11,9 @@ #include "thread.h" #include "callchain.h" +/* For bsearch. We try to unwind functions in shared object. */ +#include <stdlib.h> + static int mmap_handler(struct perf_tool *tool __maybe_unused, union perf_event *event, struct perf_sample *sample __maybe_unused, @@ -28,7 +31,7 @@ static int init_live_machine(struct machine *machine) mmap_handler, machine, true); } -#define MAX_STACK 6 +#define MAX_STACK 8 static int unwind_entry(struct unwind_entry *entry, void *arg) { @@ -37,6 +40,8 @@ static int unwind_entry(struct unwind_entry *entry, void *arg) static const char *funcs[MAX_STACK] = { "test__arch_unwind_sample", "unwind_thread", + "compare", + "bsearch", "krava_3", "krava_2", "krava_1", @@ -88,10 +93,37 @@ static int unwind_thread(struct thread *thread) return err; } +static int global_unwind_retval = -INT_MAX; + +__attribute__ ((noinline)) +static int compare(void *p1, void *p2) +{ + /* Any possible value should be 'thread' */ + struct thread *thread = *(struct thread **)p1; + + if (global_unwind_retval == -INT_MAX) + global_unwind_retval = unwind_thread(thread); + + return p1 - p2; +} + __attribute__ ((noinline)) static int krava_3(struct thread *thread) { - return unwind_thread(thread); + struct thread *array[2] = {thread, thread}; + void *fp = &bsearch; + /* + * make _bsearch a volatile function pointer to + * prevent potential optimization, which may expand + * bsearch and call compare directly from this function, + * instead of libc shared object. + */ + void *(*volatile _bsearch)(void *, void *, size_t, + size_t, int (*)(void *, void *)); + + _bsearch = fp; + _bsearch(array, &thread, 2, sizeof(struct thread **), compare); + return global_unwind_retval; } __attribute__ ((noinline)) diff --git a/tools/perf/tests/hists_cumulate.c b/tools/perf/tests/hists_cumulate.c index 614d5c4..8d110de 100644 --- a/tools/perf/tests/hists_cumulate.c +++ b/tools/perf/tests/hists_cumulate.c @@ -187,7 +187,7 @@ static int do_test(struct hists *hists, struct result *expected, size_t nr_expec * function since TEST_ASSERT_VAL() returns in case of failure. */ hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("use callchain: %d, cumulate callchain: %d\n", @@ -454,12 +454,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine) * 30.00% 10.00% perf perf [.] cmd_record * 20.00% 0.00% bash libc [.] malloc * 10.00% 10.00% bash [kernel] [k] page_fault - * 10.00% 10.00% perf [kernel] [k] schedule - * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open + * 10.00% 10.00% bash bash [.] xmalloc * 10.00% 10.00% perf [kernel] [k] page_fault - * 10.00% 10.00% perf libc [.] free * 10.00% 10.00% perf libc [.] malloc - * 10.00% 10.00% bash bash [.] xmalloc + * 10.00% 10.00% perf [kernel] [k] schedule + * 10.00% 10.00% perf libc [.] free + * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open */ struct result expected[] = { { 7000, 2000, "perf", "perf", "main" }, @@ -468,12 +468,12 @@ static int test3(struct perf_evsel *evsel, struct machine *machine) { 3000, 1000, "perf", "perf", "cmd_record" }, { 2000, 0, "bash", "libc", "malloc" }, { 1000, 1000, "bash", "[kernel]", "page_fault" }, - { 1000, 1000, "perf", "[kernel]", "schedule" }, - { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, + { 1000, 1000, "bash", "bash", "xmalloc" }, { 1000, 1000, "perf", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "[kernel]", "schedule" }, { 1000, 1000, "perf", "libc", "free" }, { 1000, 1000, "perf", "libc", "malloc" }, - { 1000, 1000, "bash", "bash", "xmalloc" }, + { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, }; symbol_conf.use_callchain = false; @@ -537,10 +537,13 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) * malloc * main * - * 10.00% 10.00% perf [kernel] [k] schedule + * 10.00% 10.00% bash bash [.] xmalloc * | - * --- schedule - * run_command + * --- xmalloc + * malloc + * xmalloc <--- NOTE: there's a cycle + * malloc + * xmalloc * main * * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open @@ -556,6 +559,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) * run_command * main * + * 10.00% 10.00% perf [kernel] [k] schedule + * | + * --- schedule + * run_command + * main + * * 10.00% 10.00% perf libc [.] free * | * --- free @@ -570,15 +579,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) * run_command * main * - * 10.00% 10.00% bash bash [.] xmalloc - * | - * --- xmalloc - * malloc - * xmalloc <--- NOTE: there's a cycle - * malloc - * xmalloc - * main - * */ struct result expected[] = { { 7000, 2000, "perf", "perf", "main" }, @@ -587,12 +587,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) { 3000, 1000, "perf", "perf", "cmd_record" }, { 2000, 0, "bash", "libc", "malloc" }, { 1000, 1000, "bash", "[kernel]", "page_fault" }, - { 1000, 1000, "perf", "[kernel]", "schedule" }, + { 1000, 1000, "bash", "bash", "xmalloc" }, { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" }, { 1000, 1000, "perf", "[kernel]", "page_fault" }, + { 1000, 1000, "perf", "[kernel]", "schedule" }, { 1000, 1000, "perf", "libc", "free" }, { 1000, 1000, "perf", "libc", "malloc" }, - { 1000, 1000, "bash", "bash", "xmalloc" }, }; struct callchain_result expected_callchain[] = { { @@ -622,9 +622,12 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) { "bash", "main" }, }, }, { - 3, { { "[kernel]", "schedule" }, - { "perf", "run_command" }, - { "perf", "main" }, }, + 6, { { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "libc", "malloc" }, + { "bash", "xmalloc" }, + { "bash", "main" }, }, }, { 3, { { "[kernel]", "sys_perf_event_open" }, @@ -638,6 +641,11 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) { "perf", "main" }, }, }, { + 3, { { "[kernel]", "schedule" }, + { "perf", "run_command" }, + { "perf", "main" }, }, + }, + { 4, { { "libc", "free" }, { "perf", "cmd_record" }, { "perf", "run_command" }, @@ -649,14 +657,6 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) { "perf", "run_command" }, { "perf", "main" }, }, }, - { - 6, { { "bash", "xmalloc" }, - { "libc", "malloc" }, - { "bash", "xmalloc" }, - { "libc", "malloc" }, - { "bash", "xmalloc" }, - { "bash", "main" }, }, - }, }; symbol_conf.use_callchain = true; diff --git a/tools/perf/tests/hists_filter.c b/tools/perf/tests/hists_filter.c index 74f257a..59e53db 100644 --- a/tools/perf/tests/hists_filter.c +++ b/tools/perf/tests/hists_filter.c @@ -138,7 +138,7 @@ int test__hists_filter(void) struct hists *hists = evsel__hists(evsel); hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("Normal histogram\n"); diff --git a/tools/perf/tests/hists_output.c b/tools/perf/tests/hists_output.c index a748f2b..f554761 100644 --- a/tools/perf/tests/hists_output.c +++ b/tools/perf/tests/hists_output.c @@ -152,7 +152,7 @@ static int test1(struct perf_evsel *evsel, struct machine *machine) goto out; hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); @@ -252,7 +252,7 @@ static int test2(struct perf_evsel *evsel, struct machine *machine) goto out; hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); @@ -306,7 +306,7 @@ static int test3(struct perf_evsel *evsel, struct machine *machine) goto out; hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); @@ -384,7 +384,7 @@ static int test4(struct perf_evsel *evsel, struct machine *machine) goto out; hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); @@ -487,7 +487,7 @@ static int test5(struct perf_evsel *evsel, struct machine *machine) goto out; hists__collapse_resort(hists, NULL); - hists__output_resort(hists); + hists__output_resort(hists, NULL); if (verbose > 2) { pr_info("[fields = %s, sort = %s]\n", field_order, sort_order); diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c index e6bb04b..788506e 100644 --- a/tools/perf/ui/browsers/hists.c +++ b/tools/perf/ui/browsers/hists.c @@ -550,7 +550,7 @@ static int hist_browser__show_callchain(struct hist_browser *browser, bool need_percent; node = rb_first(root); - need_percent = !!rb_next(node); + need_percent = node && rb_next(node); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); diff --git a/tools/perf/ui/hist.c b/tools/perf/ui/hist.c index dc0d095..482adae 100644 --- a/tools/perf/ui/hist.c +++ b/tools/perf/ui/hist.c @@ -204,6 +204,9 @@ static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b, if (ret) return ret; + if (a->thread != b->thread || !symbol_conf.use_callchain) + return 0; + ret = b->callchain->max_depth - a->callchain->max_depth; } return ret; diff --git a/tools/perf/ui/tui/setup.c b/tools/perf/ui/tui/setup.c index 2f61256..3c38f25 100644 --- a/tools/perf/ui/tui/setup.c +++ b/tools/perf/ui/tui/setup.c @@ -1,5 +1,8 @@ #include <signal.h> #include <stdbool.h> +#ifdef HAVE_BACKTRACE_SUPPORT +#include <execinfo.h> +#endif #include "../../util/cache.h" #include "../../util/debug.h" @@ -88,6 +91,25 @@ int ui__getch(int delay_secs) return SLkp_getkey(); } +#ifdef HAVE_BACKTRACE_SUPPORT +static void ui__signal_backtrace(int sig) +{ + void *stackdump[32]; + size_t size; + + ui__exit(false); + psignal(sig, "perf"); + + printf("-------- backtrace --------\n"); + size = backtrace(stackdump, ARRAY_SIZE(stackdump)); + backtrace_symbols_fd(stackdump, size, STDOUT_FILENO); + + exit(0); +} +#else +# define ui__signal_backtrace ui__signal +#endif + static void ui__signal(int sig) { ui__exit(false); @@ -122,8 +144,8 @@ int ui__init(void) ui_browser__init(); tui_progress__init(); - signal(SIGSEGV, ui__signal); - signal(SIGFPE, ui__signal); + signal(SIGSEGV, ui__signal_backtrace); + signal(SIGFPE, ui__signal_backtrace); signal(SIGINT, ui__signal); signal(SIGQUIT, ui__signal); signal(SIGTERM, ui__signal); diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h index 0784a94..cadbdc9 100644 --- a/tools/perf/util/annotate.h +++ b/tools/perf/util/annotate.h @@ -116,11 +116,6 @@ struct annotation { struct annotated_source *src; }; -struct sannotation { - struct annotation annotation; - struct symbol symbol; -}; - static inline struct sym_hist *annotation__histogram(struct annotation *notes, int idx) { return (((void *)¬es->src->histograms) + @@ -129,8 +124,7 @@ static inline struct sym_hist *annotation__histogram(struct annotation *notes, i static inline struct annotation *symbol__annotation(struct symbol *sym) { - struct sannotation *a = container_of(sym, struct sannotation, symbol); - return &a->annotation; + return (void *)sym - symbol_conf.priv_size; } int addr_map_symbol__inc_samples(struct addr_map_symbol *ams, int evidx); diff --git a/tools/perf/util/cache.h b/tools/perf/util/cache.h index 5cf9e1b..d04d770 100644 --- a/tools/perf/util/cache.h +++ b/tools/perf/util/cache.h @@ -71,7 +71,9 @@ extern char *perf_path(const char *fmt, ...) __attribute__((format (printf, 1, 2 extern char *perf_pathdup(const char *fmt, ...) __attribute__((format (printf, 1, 2))); +#ifndef __UCLIBC__ /* Matches the libc/libbsd function attribute so we declare this unconditionally: */ extern size_t strlcpy(char *dest, const char *src, size_t size); +#endif #endif /* __PERF_CACHE_H */ diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c index 64b377e..14e7a12 100644 --- a/tools/perf/util/callchain.c +++ b/tools/perf/util/callchain.c @@ -841,3 +841,33 @@ char *callchain_list__sym_name(struct callchain_list *cl, return bf; } + +static void free_callchain_node(struct callchain_node *node) +{ + struct callchain_list *list, *tmp; + struct callchain_node *child; + struct rb_node *n; + + list_for_each_entry_safe(list, tmp, &node->val, list) { + list_del(&list->list); + free(list); + } + + n = rb_first(&node->rb_root_in); + while (n) { + child = container_of(n, struct callchain_node, rb_node_in); + n = rb_next(n); + rb_erase(&child->rb_node_in, &node->rb_root_in); + + free_callchain_node(child); + free(child); + } +} + +void free_callchain(struct callchain_root *root) +{ + if (!symbol_conf.use_callchain) + return; + + free_callchain_node(&root->node); +} diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h index dbc08cf..c0ec1ac 100644 --- a/tools/perf/util/callchain.h +++ b/tools/perf/util/callchain.h @@ -198,4 +198,6 @@ static inline int arch_skip_callchain_idx(struct thread *thread __maybe_unused, char *callchain_list__sym_name(struct callchain_list *cl, char *bf, size_t bfsize, bool show_dso); +void free_callchain(struct callchain_root *root); + #endif /* __PERF_CALLCHAIN_H */ diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c index 6e88b9e..1823955 100644 --- a/tools/perf/util/hist.c +++ b/tools/perf/util/hist.c @@ -6,6 +6,7 @@ #include "evlist.h" #include "evsel.h" #include "annotate.h" +#include "ui/progress.h" #include <math.h> static bool hists__filter_entry_by_dso(struct hists *hists, @@ -303,7 +304,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template, size_t callchain_size = 0; struct hist_entry *he; - if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain) + if (symbol_conf.use_callchain) callchain_size = sizeof(struct callchain_root); he = zalloc(sizeof(*he) + callchain_size); @@ -736,7 +737,7 @@ iter_add_single_cumulative_entry(struct hist_entry_iter *iter, iter->he = he; he_cache[iter->curr++] = he; - callchain_append(he->callchain, &callchain_cursor, sample->period); + hist_entry__append_callchain(he, sample); /* * We need to re-initialize the cursor since callchain_append() @@ -809,7 +810,8 @@ iter_add_next_cumulative_entry(struct hist_entry_iter *iter, iter->he = he; he_cache[iter->curr++] = he; - callchain_append(he->callchain, &cursor, sample->period); + if (symbol_conf.use_callchain) + callchain_append(he->callchain, &cursor, sample->period); return 0; } @@ -945,6 +947,7 @@ void hist_entry__free(struct hist_entry *he) zfree(&he->mem_info); zfree(&he->stat_acc); free_srcline(he->srcline); + free_callchain(he->callchain); free(he); } @@ -987,6 +990,7 @@ static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, else p = &(*p)->rb_right; } + hists->nr_entries++; rb_link_node(&he->rb_node_in, parent, p); rb_insert_color(&he->rb_node_in, root); @@ -1024,7 +1028,10 @@ void hists__collapse_resort(struct hists *hists, struct ui_progress *prog) if (!sort__need_collapse) return; + hists->nr_entries = 0; + root = hists__get_rotate_entries_in(hists); + next = rb_first(root); while (next) { @@ -1119,7 +1126,7 @@ static void __hists__insert_output_entry(struct rb_root *entries, rb_insert_color(&he->rb_node, entries); } -void hists__output_resort(struct hists *hists) +void hists__output_resort(struct hists *hists, struct ui_progress *prog) { struct rb_root *root; struct rb_node *next; @@ -1148,6 +1155,9 @@ void hists__output_resort(struct hists *hists) if (!n->filtered) hists__calc_col_len(hists, n); + + if (prog) + ui_progress__update(prog, 1); } } diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h index d0ef9a1..46bd503 100644 --- a/tools/perf/util/hist.h +++ b/tools/perf/util/hist.h @@ -121,7 +121,7 @@ int hist_entry__sort_snprintf(struct hist_entry *he, char *bf, size_t size, struct hists *hists); void hist_entry__free(struct hist_entry *); -void hists__output_resort(struct hists *hists); +void hists__output_resort(struct hists *hists, struct ui_progress *prog); void hists__collapse_resort(struct hists *hists, struct ui_progress *prog); void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel); diff --git a/tools/perf/util/hweight.c b/tools/perf/util/hweight.c deleted file mode 100644 index 5c1d0d0..0000000 --- a/tools/perf/util/hweight.c +++ /dev/null @@ -1,31 +0,0 @@ -#include <linux/bitops.h> - -/** - * hweightN - returns the hamming weight of a N-bit word - * @x: the word to weigh - * - * The Hamming Weight of a number is the total number of bits set in it. - */ - -unsigned int hweight32(unsigned int w) -{ - unsigned int res = w - ((w >> 1) & 0x55555555); - res = (res & 0x33333333) + ((res >> 2) & 0x33333333); - res = (res + (res >> 4)) & 0x0F0F0F0F; - res = res + (res >> 8); - return (res + (res >> 16)) & 0x000000FF; -} - -unsigned long hweight64(__u64 w) -{ -#if BITS_PER_LONG == 32 - return hweight32((unsigned int)(w >> 32)) + hweight32((unsigned int)w); -#elif BITS_PER_LONG == 64 - __u64 res = w - ((w >> 1) & 0x5555555555555555ul); - res = (res & 0x3333333333333333ul) + ((res >> 2) & 0x3333333333333333ul); - res = (res + (res >> 4)) & 0x0F0F0F0F0F0F0F0Ful; - res = res + (res >> 8); - res = res + (res >> 16); - return (res + (res >> 32)) & 0x00000000000000FFul; -#endif -} diff --git a/tools/perf/util/include/asm/hweight.h b/tools/perf/util/include/asm/hweight.h deleted file mode 100644 index 36cf26d..0000000 --- a/tools/perf/util/include/asm/hweight.h +++ /dev/null @@ -1,8 +0,0 @@ -#ifndef PERF_HWEIGHT_H -#define PERF_HWEIGHT_H - -#include <linux/types.h> -unsigned int hweight32(unsigned int w); -unsigned long hweight64(__u64 w); - -#endif /* PERF_HWEIGHT_H */ diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c index 94de3e4..1bca3a9 100644 --- a/tools/perf/util/machine.c +++ b/tools/perf/util/machine.c @@ -389,7 +389,6 @@ static struct thread *__machine__findnew_thread(struct machine *machine, if (th != NULL) { rb_link_node(&th->rb_node, parent, p); rb_insert_color(&th->rb_node, &machine->threads); - machine->last_match = th; /* * We have to initialize map_groups separately @@ -400,9 +399,12 @@ static struct thread *__machine__findnew_thread(struct machine *machine, * leader and that would screwed the rb tree. */ if (thread__init_map_groups(th, machine)) { + rb_erase(&th->rb_node, &machine->threads); thread__delete(th); return NULL; } + + machine->last_match = th; } return th; diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c index 28eb141..94a717b 100644 --- a/tools/perf/util/probe-event.c +++ b/tools/perf/util/probe-event.c @@ -495,9 +495,11 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev, } if (ntevs == 0) { /* No error but failed to find probe point. */ - pr_warning("Probe point '%s' not found.\n", + pr_warning("Probe point '%s' not found in debuginfo.\n", synthesize_perf_probe_point(&pev->point)); - return -ENOENT; + if (need_dwarf) + return -ENOENT; + return 0; } /* Error path : ntevs < 0 */ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs); @@ -2050,9 +2052,11 @@ static int write_probe_trace_event(int fd, struct probe_trace_event *tev) pr_debug("Writing event: %s\n", buf); if (!probe_event_dry_run) { ret = write(fd, buf, strlen(buf)); - if (ret <= 0) + if (ret <= 0) { + ret = -errno; pr_warning("Failed to write event: %s\n", strerror_r(errno, sbuf, sizeof(sbuf))); + } } free(buf); return ret; diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c index c7918f8..b5247d7 100644 --- a/tools/perf/util/probe-finder.c +++ b/tools/perf/util/probe-finder.c @@ -989,8 +989,24 @@ static int debuginfo__find_probes(struct debuginfo *dbg, int ret = 0; #if _ELFUTILS_PREREQ(0, 142) + Elf *elf; + GElf_Ehdr ehdr; + GElf_Shdr shdr; + /* Get the call frame information from this dwarf */ - pf->cfi = dwarf_getcfi_elf(dwarf_getelf(dbg->dbg)); + elf = dwarf_getelf(dbg->dbg); + if (elf == NULL) + return -EINVAL; + + if (gelf_getehdr(elf, &ehdr) == NULL) + return -EINVAL; + + if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) && + shdr.sh_type == SHT_PROGBITS) { + pf->cfi = dwarf_getcfi_elf(elf); + } else { + pf->cfi = dwarf_getcfi(dbg->dbg); + } #endif off = 0; diff --git a/tools/perf/util/python-ext-sources b/tools/perf/util/python-ext-sources index 16a475a..6c6a695 100644 --- a/tools/perf/util/python-ext-sources +++ b/tools/perf/util/python-ext-sources @@ -10,7 +10,7 @@ util/ctype.c util/evlist.c util/evsel.c util/cpumap.c -util/hweight.c +../../lib/hweight.c util/thread_map.c util/util.c util/xyarray.c diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c index 371219a..6edf535 100644 --- a/tools/perf/util/unwind-libunwind.c +++ b/tools/perf/util/unwind-libunwind.c @@ -185,6 +185,28 @@ static u64 elf_section_offset(int fd, const char *name) return offset; } +#ifndef NO_LIBUNWIND_DEBUG_FRAME +static int elf_is_exec(int fd, const char *name) +{ + Elf *elf; + GElf_Ehdr ehdr; + int retval = 0; + + elf = elf_begin(fd, PERF_ELF_C_READ_MMAP, NULL); + if (elf == NULL) + return 0; + if (gelf_getehdr(elf, &ehdr) == NULL) + goto out; + + retval = (ehdr.e_type == ET_EXEC); + +out: + elf_end(elf); + pr_debug("unwind: elf_is_exec(%s): %d\n", name, retval); + return retval; +} +#endif + struct table_entry { u32 start_ip_offset; u32 fde_offset; @@ -322,8 +344,12 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi, #ifndef NO_LIBUNWIND_DEBUG_FRAME /* Check the .debug_frame section for unwinding info */ if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) { + int fd = dso__data_fd(map->dso, ui->machine); + int is_exec = elf_is_exec(fd, map->dso->name); + unw_word_t base = is_exec ? 0 : map->start; + memset(&di, 0, sizeof(di)); - if (dwarf_find_debug_frame(0, &di, ip, 0, map->dso->name, + if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name, map->start, map->end)) return dwarf_search_unwind_table(as, ip, &di, pi, need_unwind_info, arg); diff --git a/tools/testing/selftests/exec/execveat.c b/tools/testing/selftests/exec/execveat.c index d273624..e238c95 100644 --- a/tools/testing/selftests/exec/execveat.c +++ b/tools/testing/selftests/exec/execveat.c @@ -62,7 +62,7 @@ static int _check_execveat_fail(int fd, const char *path, int flags, } static int check_execveat_invoked_rc(int fd, const char *path, int flags, - int expected_rc) + int expected_rc, int expected_rc2) { int status; int rc; @@ -98,9 +98,10 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags, child, status); return 1; } - if (WEXITSTATUS(status) != expected_rc) { - printf("[FAIL] (child %d exited with %d not %d)\n", - child, WEXITSTATUS(status), expected_rc); + if ((WEXITSTATUS(status) != expected_rc) && + (WEXITSTATUS(status) != expected_rc2)) { + printf("[FAIL] (child %d exited with %d not %d nor %d)\n", + child, WEXITSTATUS(status), expected_rc, expected_rc2); return 1; } printf("[OK]\n"); @@ -109,7 +110,7 @@ static int check_execveat_invoked_rc(int fd, const char *path, int flags, static int check_execveat(int fd, const char *path, int flags) { - return check_execveat_invoked_rc(fd, path, flags, 99); + return check_execveat_invoked_rc(fd, path, flags, 99, 99); } static char *concat(const char *left, const char *right) @@ -192,9 +193,15 @@ static int check_execveat_pathmax(int dot_dfd, const char *src, int is_script) * Execute as a long pathname relative to ".". If this is a script, * the interpreter will launch but fail to open the script because its * name ("/dev/fd/5/xxx....") is bigger than PATH_MAX. + * + * The failure code is usually 127 (POSIX: "If a command is not found, + * the exit status shall be 127."), but some systems give 126 (POSIX: + * "If the command name is found, but it is not an executable utility, + * the exit status shall be 126."), so allow either. */ if (is_script) - fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, 127); + fail += check_execveat_invoked_rc(dot_dfd, longpath, 0, + 127, 126); else fail += check_execveat(dot_dfd, longpath, 0); diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c index 94dae65..8519e9e 100644 --- a/tools/testing/selftests/mqueue/mq_perf_tests.c +++ b/tools/testing/selftests/mqueue/mq_perf_tests.c @@ -536,10 +536,9 @@ int main(int argc, char *argv[]) { struct mq_attr attr; char *option, *next_option; - int i, cpu; + int i, cpu, rc; struct sigaction sa; poptContext popt_context; - char rc; void *retval; main_thread = pthread_self(); diff --git a/tools/testing/selftests/vm/Makefile b/tools/testing/selftests/vm/Makefile index 4c4b1f6..077828c 100644 --- a/tools/testing/selftests/vm/Makefile +++ b/tools/testing/selftests/vm/Makefile @@ -7,7 +7,7 @@ BINARIES += transhuge-stress all: $(BINARIES) %: %.c - $(CC) $(CFLAGS) -o $@ $^ + $(CC) $(CFLAGS) -o $@ $^ -lrt run_tests: all @/bin/sh ./run_vmtests || (echo "vmtests: [FAIL]"; exit 1) |