diff options
author | David S. Miller <davem@davemloft.net> | 2014-03-14 22:31:55 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-03-14 22:31:55 -0400 |
commit | 85dcce7a73f1cc59f7a96fe52713b1630f4ca272 (patch) | |
tree | 6c645923eb2f0152073b90685ce80e46cfb7afed | |
parent | 4c4e4113db249c828fffb286bc95ffb255e081f5 (diff) | |
parent | a4ecdf82f8ea49f7d3a072121dcbd0bf3a7cb93a (diff) | |
download | op-kernel-dev-85dcce7a73f1cc59f7a96fe52713b1630f4ca272.zip op-kernel-dev-85dcce7a73f1cc59f7a96fe52713b1630f4ca272.tar.gz |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/usb/r8152.c
drivers/net/xen-netback/netback.c
Both the r8152 and netback conflicts were simple overlapping
changes.
Signed-off-by: David S. Miller <davem@davemloft.net>
261 files changed, 2200 insertions, 1635 deletions
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt index e6b72d3..68c0f51 100644 --- a/Documentation/device-mapper/cache.txt +++ b/Documentation/device-mapper/cache.txt @@ -124,12 +124,11 @@ the default being 204800 sectors (or 100MB). Updating on-disk metadata ------------------------- -On-disk metadata is committed every time a REQ_SYNC or REQ_FUA bio is -written. If no such requests are made then commits will occur every -second. This means the cache behaves like a physical disk that has a -write cache (the same is true of the thin-provisioning target). If -power is lost you may lose some recent writes. The metadata should -always be consistent in spite of any crash. +On-disk metadata is committed every time a FLUSH or FUA bio is written. +If no such requests are made then commits will occur every second. This +means the cache behaves like a physical disk that has a volatile write +cache. If power is lost you may lose some recent writes. The metadata +should always be consistent in spite of any crash. The 'dirty' state for a cache block changes far too frequently for us to keep updating it on the fly. So we treat it as a hint. In normal diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt index 8a7a3d4..05a27e9 100644 --- a/Documentation/device-mapper/thin-provisioning.txt +++ b/Documentation/device-mapper/thin-provisioning.txt @@ -116,6 +116,35 @@ Resuming a device with a new table itself triggers an event so the userspace daemon can use this to detect a situation where a new table already exceeds the threshold. +A low water mark for the metadata device is maintained in the kernel and +will trigger a dm event if free space on the metadata device drops below +it. + +Updating on-disk metadata +------------------------- + +On-disk metadata is committed every time a FLUSH or FUA bio is written. +If no such requests are made then commits will occur every second. This +means the thin-provisioning target behaves like a physical disk that has +a volatile write cache. If power is lost you may lose some recent +writes. The metadata should always be consistent in spite of any crash. + +If data space is exhausted the pool will either error or queue IO +according to the configuration (see: error_if_no_space). If metadata +space is exhausted or a metadata operation fails: the pool will error IO +until the pool is taken offline and repair is performed to 1) fix any +potential inconsistencies and 2) clear the flag that imposes repair. +Once the pool's metadata device is repaired it may be resized, which +will allow the pool to return to normal operation. Note that if a pool +is flagged as needing repair, the pool's data and metadata devices +cannot be resized until repair is performed. It should also be noted +that when the pool's metadata space is exhausted the current metadata +transaction is aborted. Given that the pool will cache IO whose +completion may have already been acknowledged to upper IO layers +(e.g. filesystem) it is strongly suggested that consistency checks +(e.g. fsck) be performed on those layers when repair of the pool is +required. + Thin provisioning ----------------- @@ -258,10 +287,9 @@ ii) Status should register for the event and then check the target's status. held metadata root: - The location, in sectors, of the metadata root that has been + The location, in blocks, of the metadata root that has been 'held' for userspace read access. '-' indicates there is no - held root. This feature is not yet implemented so '-' is - always returned. + held root. discard_passdown|no_discard_passdown Whether or not discards are actually being passed down to the diff --git a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt index 9e9e9ef..c119deb 100644 --- a/Documentation/devicetree/bindings/pinctrl/brcm,capri-pinctrl.txt +++ b/Documentation/devicetree/bindings/pinctrl/brcm,bcm11351-pinctrl.txt @@ -1,4 +1,4 @@ -Broadcom Capri Pin Controller +Broadcom BCM281xx Pin Controller This is a pin controller for the Broadcom BCM281xx SoC family, which includes BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs. @@ -7,14 +7,14 @@ BCM11130, BCM11140, BCM11351, BCM28145, and BCM28155 SoCs. Required Properties: -- compatible: Must be "brcm,capri-pinctrl". +- compatible: Must be "brcm,bcm11351-pinctrl" - reg: Base address of the PAD Controller register block and the size of the block. For example, the following is the bare minimum node: pinctrl@35004800 { - compatible = "brcm,capri-pinctrl"; + compatible = "brcm,bcm11351-pinctrl"; reg = <0x35004800 0x430>; }; @@ -119,7 +119,7 @@ Optional Properties (for HDMI pins): Example: // pin controller node pinctrl@35004800 { - compatible = "brcm,capri-pinctrl"; + compatible = "brcmbcm11351-pinctrl"; reg = <0x35004800 0x430>; // pin configuration node diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt index 1404674..6fea79e 100644 --- a/Documentation/networking/packet_mmap.txt +++ b/Documentation/networking/packet_mmap.txt @@ -453,7 +453,7 @@ TP_STATUS_COPY : This flag indicates that the frame (and associated enabled previously with setsockopt() and the PACKET_COPY_THRESH option. - The number of frames than can be buffered to + The number of frames that can be buffered to be read with recvfrom is limited like a normal socket. See the SO_RCVBUF option in the socket (7) man page. diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt index 661d3c3..048c92b 100644 --- a/Documentation/networking/timestamping.txt +++ b/Documentation/networking/timestamping.txt @@ -21,26 +21,38 @@ has such a feature). SO_TIMESTAMPING: -Instructs the socket layer which kind of information is wanted. The -parameter is an integer with some of the following bits set. Setting -other bits is an error and doesn't change the current state. - -SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamp in hardware -SOF_TIMESTAMPING_TX_SOFTWARE: if SOF_TIMESTAMPING_TX_HARDWARE is off or - fails, then do it in software -SOF_TIMESTAMPING_RX_HARDWARE: return the original, unmodified time stamp - as generated by the hardware -SOF_TIMESTAMPING_RX_SOFTWARE: if SOF_TIMESTAMPING_RX_HARDWARE is off or - fails, then do it in software -SOF_TIMESTAMPING_RAW_HARDWARE: return original raw hardware time stamp -SOF_TIMESTAMPING_SYS_HARDWARE: return hardware time stamp transformed to - the system time base -SOF_TIMESTAMPING_SOFTWARE: return system time stamp generated in - software - -SOF_TIMESTAMPING_TX/RX determine how time stamps are generated. -SOF_TIMESTAMPING_RAW/SYS determine how they are reported in the -following control message: +Instructs the socket layer which kind of information should be collected +and/or reported. The parameter is an integer with some of the following +bits set. Setting other bits is an error and doesn't change the current +state. + +Four of the bits are requests to the stack to try to generate +timestamps. Any combination of them is valid. + +SOF_TIMESTAMPING_TX_HARDWARE: try to obtain send time stamps in hardware +SOF_TIMESTAMPING_TX_SOFTWARE: try to obtain send time stamps in software +SOF_TIMESTAMPING_RX_HARDWARE: try to obtain receive time stamps in hardware +SOF_TIMESTAMPING_RX_SOFTWARE: try to obtain receive time stamps in software + +The other three bits control which timestamps will be reported in a +generated control message. If none of these bits are set or if none of +the set bits correspond to data that is available, then the control +message will not be generated: + +SOF_TIMESTAMPING_SOFTWARE: report systime if available +SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available +SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available + +It is worth noting that timestamps may be collected for reasons other +than being requested by a particular socket with +SOF_TIMESTAMPING_[TR]X_(HARD|SOFT)WARE. For example, most drivers that +can generate hardware receive timestamps ignore +SOF_TIMESTAMPING_RX_HARDWARE. It is still a good idea to set that flag +in case future drivers pay attention. + +If timestamps are reported, they will appear in a control message with +cmsg_level==SOL_SOCKET, cmsg_type==SO_TIMESTAMPING, and a payload like +this: struct scm_timestamping { struct timespec systime; diff --git a/MAINTAINERS b/MAINTAINERS index e4ee191..0dc5e0c 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -474,7 +474,7 @@ F: net/rxrpc/af_rxrpc.c AGPGART DRIVER M: David Airlie <airlied@linux.ie> -T: git git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6.git +T: git git://people.freedesktop.org/~airlied/linux (part of drm maint) S: Maintained F: drivers/char/agp/ F: include/linux/agp* @@ -1738,6 +1738,7 @@ F: include/uapi/linux/bfs_fs.h BLACKFIN ARCHITECTURE M: Steven Miao <realmz6@gmail.com> L: adi-buildroot-devel@lists.sourceforge.net +T: git git://git.code.sf.net/p/adi-linux/code W: http://blackfin.uclinux.org S: Supported F: arch/blackfin/ @@ -6008,6 +6009,8 @@ F: include/linux/netdevice.h F: include/uapi/linux/in.h F: include/uapi/linux/net.h F: include/uapi/linux/netdevice.h +F: tools/net/ +F: tools/testing/selftests/net/ NETWORKING [IPv4/IPv6] M: "David S. Miller" <davem@davemloft.net> @@ -6182,6 +6185,12 @@ S: Supported F: drivers/block/nvme* F: include/linux/nvme.h +NXP TDA998X DRM DRIVER +M: Russell King <rmk+kernel@arm.linux.org.uk> +S: Supported +F: drivers/gpu/drm/i2c/tda998x_drv.c +F: include/drm/i2c/tda998x.h + OMAP SUPPORT M: Tony Lindgren <tony@atomide.com> L: linux-omap@vger.kernel.org @@ -1,7 +1,7 @@ VERSION = 3 PATCHLEVEL = 14 SUBLEVEL = 0 -EXTRAVERSION = -rc5 +EXTRAVERSION = -rc6 NAME = Shuffling Zombie Juror # *DOCUMENTATION* diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c index 6b58c1d..400c663 100644 --- a/arch/arc/mm/cache_arc700.c +++ b/arch/arc/mm/cache_arc700.c @@ -282,7 +282,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, #else /* if V-P const for loop, PTAG can be written once outside loop */ if (full_page_op) - write_aux_reg(ARC_REG_DC_PTAG, paddr); + write_aux_reg(aux_tag, paddr); #endif while (num_lines-- > 0) { @@ -296,7 +296,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr, write_aux_reg(aux_cmd, vaddr); vaddr += L1_CACHE_BYTES; #else - write_aux_reg(aux, paddr); + write_aux_reg(aux_cmd, paddr); paddr += L1_CACHE_BYTES; #endif } diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index e254198..1594945 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1578,6 +1578,7 @@ config BL_SWITCHER_DUMMY_IF choice prompt "Memory split" + depends on MMU default VMSPLIT_3G help Select the desired split between kernel and user memory. @@ -1595,6 +1596,7 @@ endchoice config PAGE_OFFSET hex + default PHYS_OFFSET if !MMU default 0x40000000 if VMSPLIT_1G default 0x80000000 if VMSPLIT_2G default 0xC0000000 @@ -1903,6 +1905,7 @@ config XEN depends on ARM && AEABI && OF depends on CPU_V7 && !CPU_V6 depends on !GENERIC_ATOMIC64 + depends on MMU select ARM_PSCI select SWIOTLB_XEN select ARCH_DMA_ADDR_T_64BIT diff --git a/arch/arm/boot/compressed/.gitignore b/arch/arm/boot/compressed/.gitignore index 47279aa..0714e03 100644 --- a/arch/arm/boot/compressed/.gitignore +++ b/arch/arm/boot/compressed/.gitignore @@ -1,4 +1,5 @@ ashldi3.S +bswapsdi2.S font.c lib1funcs.S hyp-stub.S diff --git a/arch/arm/boot/dts/bcm11351.dtsi b/arch/arm/boot/dts/bcm11351.dtsi index e491b82..792fde1 100644 --- a/arch/arm/boot/dts/bcm11351.dtsi +++ b/arch/arm/boot/dts/bcm11351.dtsi @@ -147,7 +147,7 @@ }; pinctrl@35004800 { - compatible = "brcm,capri-pinctrl"; + compatible = "brcm,bcm11351-pinctrl"; reg = <0x35004800 0x430>; }; diff --git a/arch/arm/boot/dts/omap3-gta04.dts b/arch/arm/boot/dts/omap3-gta04.dts index c551e4a..d3b253b 100644 --- a/arch/arm/boot/dts/omap3-gta04.dts +++ b/arch/arm/boot/dts/omap3-gta04.dts @@ -13,7 +13,7 @@ / { model = "OMAP3 GTA04"; - compatible = "ti,omap3-gta04", "ti,omap3"; + compatible = "ti,omap3-gta04", "ti,omap36xx", "ti,omap3"; cpus { cpu@0 { diff --git a/arch/arm/boot/dts/omap3-igep0020.dts b/arch/arm/boot/dts/omap3-igep0020.dts index 25a2b5f..f2779ac 100644 --- a/arch/arm/boot/dts/omap3-igep0020.dts +++ b/arch/arm/boot/dts/omap3-igep0020.dts @@ -14,7 +14,7 @@ / { model = "IGEPv2 (TI OMAP AM/DM37x)"; - compatible = "isee,omap3-igep0020", "ti,omap3"; + compatible = "isee,omap3-igep0020", "ti,omap36xx", "ti,omap3"; leds { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/omap3-igep0030.dts b/arch/arm/boot/dts/omap3-igep0030.dts index 145c58c..2793749 100644 --- a/arch/arm/boot/dts/omap3-igep0030.dts +++ b/arch/arm/boot/dts/omap3-igep0030.dts @@ -13,7 +13,7 @@ / { model = "IGEP COM MODULE (TI OMAP AM/DM37x)"; - compatible = "isee,omap3-igep0030", "ti,omap3"; + compatible = "isee,omap3-igep0030", "ti,omap36xx", "ti,omap3"; leds { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/sun4i-a10.dtsi b/arch/arm/boot/dts/sun4i-a10.dtsi index 10666ca..d4d2763 100644 --- a/arch/arm/boot/dts/sun4i-a10.dtsi +++ b/arch/arm/boot/dts/sun4i-a10.dtsi @@ -426,7 +426,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <29>; }; diff --git a/arch/arm/boot/dts/sun5i-a10s.dtsi b/arch/arm/boot/dts/sun5i-a10s.dtsi index 6496159..79fd412 100644 --- a/arch/arm/boot/dts/sun5i-a10s.dtsi +++ b/arch/arm/boot/dts/sun5i-a10s.dtsi @@ -383,7 +383,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <29>; }; diff --git a/arch/arm/boot/dts/sun5i-a13.dtsi b/arch/arm/boot/dts/sun5i-a13.dtsi index 320335a..c463fd7 100644 --- a/arch/arm/boot/dts/sun5i-a13.dtsi +++ b/arch/arm/boot/dts/sun5i-a13.dtsi @@ -346,7 +346,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <29>; }; diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi index 9ff0948..6f25cf5 100644 --- a/arch/arm/boot/dts/sun7i-a20.dtsi +++ b/arch/arm/boot/dts/sun7i-a20.dtsi @@ -454,7 +454,7 @@ rtc: rtc@01c20d00 { compatible = "allwinner,sun7i-a20-rtc"; reg = <0x01c20d00 0x20>; - interrupts = <0 24 1>; + interrupts = <0 24 4>; }; sid: eeprom@01c23800 { @@ -463,7 +463,7 @@ }; rtp: rtp@01c25000 { - compatible = "allwinner,sun4i-ts"; + compatible = "allwinner,sun4i-a10-ts"; reg = <0x01c25000 0x100>; interrupts = <0 29 4>; }; @@ -596,10 +596,10 @@ hstimer@01c60000 { compatible = "allwinner,sun7i-a20-hstimer"; reg = <0x01c60000 0x1000>; - interrupts = <0 81 1>, - <0 82 1>, - <0 83 1>, - <0 84 1>; + interrupts = <0 81 4>, + <0 82 4>, + <0 83 4>, + <0 84 4>; clocks = <&ahb_gates 28>; }; diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig index 00fe9e9..27d69b5 100644 --- a/arch/arm/configs/tegra_defconfig +++ b/arch/arm/configs/tegra_defconfig @@ -204,7 +204,10 @@ CONFIG_MMC_BLOCK_MINORS=16 CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_PLTFM=y CONFIG_MMC_SDHCI_TEGRA=y +CONFIG_NEW_LEDS=y +CONFIG_LEDS_CLASS=y CONFIG_LEDS_GPIO=y +CONFIG_LEDS_TRIGGERS=y CONFIG_LEDS_TRIGGER_TIMER=y CONFIG_LEDS_TRIGGER_ONESHOT=y CONFIG_LEDS_TRIGGER_HEARTBEAT=y diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h index 8756e4b..4afb376 100644 --- a/arch/arm/include/asm/memory.h +++ b/arch/arm/include/asm/memory.h @@ -30,14 +30,15 @@ */ #define UL(x) _AC(x, UL) +/* PAGE_OFFSET - the virtual address of the start of the kernel image */ +#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) + #ifdef CONFIG_MMU /* - * PAGE_OFFSET - the virtual address of the start of the kernel image * TASK_SIZE - the maximum size of a user space task. * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area */ -#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M)) #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) @@ -104,10 +105,6 @@ #define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE) #endif -#ifndef PAGE_OFFSET -#define PAGE_OFFSET PLAT_PHYS_OFFSET -#endif - /* * The module can be at any place in ram in nommu mode. */ diff --git a/arch/arm/kernel/head-common.S b/arch/arm/kernel/head-common.S index 47cd974..c96ecac 100644 --- a/arch/arm/kernel/head-common.S +++ b/arch/arm/kernel/head-common.S @@ -177,6 +177,18 @@ __lookup_processor_type_data: .long __proc_info_end .size __lookup_processor_type_data, . - __lookup_processor_type_data +__error_lpae: +#ifdef CONFIG_DEBUG_LL + adr r0, str_lpae + bl printascii + b __error +str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n" +#else + b __error +#endif + .align +ENDPROC(__error_lpae) + __error_p: #ifdef CONFIG_DEBUG_LL adr r0, str_p1 diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 914616e..f5f381d 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -102,7 +102,7 @@ ENTRY(stext) and r3, r3, #0xf @ extract VMSA support cmp r3, #5 @ long-descriptor translation table format? THUMB( it lo ) @ force fixup-able long branch encoding - blo __error_p @ only classic page table format + blo __error_lpae @ only classic page table format #endif #ifndef CONFIG_XIP_KERNEL diff --git a/arch/arm/mach-omap2/cclock3xxx_data.c b/arch/arm/mach-omap2/cclock3xxx_data.c index 3b05aea..11ed915 100644 --- a/arch/arm/mach-omap2/cclock3xxx_data.c +++ b/arch/arm/mach-omap2/cclock3xxx_data.c @@ -433,7 +433,9 @@ static const struct clk_ops dpll4_m5x2_ck_ops = { .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, + .set_rate = &omap3_clkoutx2_set_rate, .recalc_rate = &omap3_clkoutx2_recalc, + .round_rate = &omap3_clkoutx2_round_rate, }; static const struct clk_ops dpll4_m5x2_ck_3630_ops = { diff --git a/arch/arm/mach-omap2/cpuidle44xx.c b/arch/arm/mach-omap2/cpuidle44xx.c index 4c158c8..01fc710 100644 --- a/arch/arm/mach-omap2/cpuidle44xx.c +++ b/arch/arm/mach-omap2/cpuidle44xx.c @@ -23,6 +23,8 @@ #include "prm.h" #include "clockdomain.h" +#define MAX_CPUS 2 + /* Machine specific information */ struct idle_statedata { u32 cpu_state; @@ -48,11 +50,11 @@ static struct idle_statedata omap4_idle_data[] = { }, }; -static struct powerdomain *mpu_pd, *cpu_pd[NR_CPUS]; -static struct clockdomain *cpu_clkdm[NR_CPUS]; +static struct powerdomain *mpu_pd, *cpu_pd[MAX_CPUS]; +static struct clockdomain *cpu_clkdm[MAX_CPUS]; static atomic_t abort_barrier; -static bool cpu_done[NR_CPUS]; +static bool cpu_done[MAX_CPUS]; static struct idle_statedata *state_ptr = &omap4_idle_data[0]; /* Private functions */ diff --git a/arch/arm/mach-omap2/dpll3xxx.c b/arch/arm/mach-omap2/dpll3xxx.c index 3185ced..3c418ea 100644 --- a/arch/arm/mach-omap2/dpll3xxx.c +++ b/arch/arm/mach-omap2/dpll3xxx.c @@ -623,6 +623,32 @@ void omap3_dpll_deny_idle(struct clk_hw_omap *clk) /* Clock control for DPLL outputs */ +/* Find the parent DPLL for the given clkoutx2 clock */ +static struct clk_hw_omap *omap3_find_clkoutx2_dpll(struct clk_hw *hw) +{ + struct clk_hw_omap *pclk = NULL; + struct clk *parent; + + /* Walk up the parents of clk, looking for a DPLL */ + do { + do { + parent = __clk_get_parent(hw->clk); + hw = __clk_get_hw(parent); + } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); + if (!hw) + break; + pclk = to_clk_hw_omap(hw); + } while (pclk && !pclk->dpll_data); + + /* clk does not have a DPLL as a parent? error in the clock data */ + if (!pclk) { + WARN_ON(1); + return NULL; + } + + return pclk; +} + /** * omap3_clkoutx2_recalc - recalculate DPLL X2 output virtual clock rate * @clk: DPLL output struct clk @@ -637,27 +663,14 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, unsigned long rate; u32 v; struct clk_hw_omap *pclk = NULL; - struct clk *parent; if (!parent_rate) return 0; - /* Walk up the parents of clk, looking for a DPLL */ - do { - do { - parent = __clk_get_parent(hw->clk); - hw = __clk_get_hw(parent); - } while (hw && (__clk_get_flags(hw->clk) & CLK_IS_BASIC)); - if (!hw) - break; - pclk = to_clk_hw_omap(hw); - } while (pclk && !pclk->dpll_data); + pclk = omap3_find_clkoutx2_dpll(hw); - /* clk does not have a DPLL as a parent? error in the clock data */ - if (!pclk) { - WARN_ON(1); + if (!pclk) return 0; - } dd = pclk->dpll_data; @@ -672,6 +685,55 @@ unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, return rate; } +int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate) +{ + return 0; +} + +long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate) +{ + const struct dpll_data *dd; + u32 v; + struct clk_hw_omap *pclk = NULL; + + if (!*prate) + return 0; + + pclk = omap3_find_clkoutx2_dpll(hw); + + if (!pclk) + return 0; + + dd = pclk->dpll_data; + + /* TYPE J does not have a clkoutx2 */ + if (dd->flags & DPLL_J_TYPE) { + *prate = __clk_round_rate(__clk_get_parent(pclk->hw.clk), rate); + return *prate; + } + + WARN_ON(!dd->enable_mask); + + v = omap2_clk_readl(pclk, dd->control_reg) & dd->enable_mask; + v >>= __ffs(dd->enable_mask); + + /* If in bypass, the rate is fixed to the bypass rate*/ + if (v != OMAP3XXX_EN_DPLL_LOCKED) + return *prate; + + if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) { + unsigned long best_parent; + + best_parent = (rate / 2); + *prate = __clk_round_rate(__clk_get_parent(hw->clk), + best_parent); + } + + return *prate * 2; +} + /* OMAP3/4 non-CORE DPLL clkops */ const struct clk_hw_omap_ops clkhwops_omap3_dpll = { .allow_idle = omap3_dpll_allow_idle, diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index 42d8188..1f33f5d 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -1947,29 +1947,31 @@ static int _ocp_softreset(struct omap_hwmod *oh) goto dis_opt_clks; _write_sysconfig(v, oh); - ret = _clear_softreset(oh, &v); - if (ret) - goto dis_opt_clks; - - _write_sysconfig(v, oh); if (oh->class->sysc->srst_udelay) udelay(oh->class->sysc->srst_udelay); c = _wait_softreset_complete(oh); - if (c == MAX_MODULE_SOFTRESET_WAIT) + if (c == MAX_MODULE_SOFTRESET_WAIT) { pr_warning("omap_hwmod: %s: softreset failed (waited %d usec)\n", oh->name, MAX_MODULE_SOFTRESET_WAIT); - else + ret = -ETIMEDOUT; + goto dis_opt_clks; + } else { pr_debug("omap_hwmod: %s: softreset in %d usec\n", oh->name, c); + } + + ret = _clear_softreset(oh, &v); + if (ret) + goto dis_opt_clks; + + _write_sysconfig(v, oh); /* * XXX add _HWMOD_STATE_WEDGED for modules that don't come back from * _wait_target_ready() or _reset() */ - ret = (c == MAX_MODULE_SOFTRESET_WAIT) ? -ETIMEDOUT : 0; - dis_opt_clks: if (oh->flags & HWMOD_CONTROL_OPT_CLKS_IN_RESET) _disable_optional_clocks(oh); diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c index 18f333c..810c205 100644 --- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c +++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c @@ -1365,11 +1365,10 @@ static struct omap_hwmod_class_sysconfig dra7xx_spinlock_sysc = { .rev_offs = 0x0000, .sysc_offs = 0x0010, .syss_offs = 0x0014, - .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_CLOCKACTIVITY | - SYSC_HAS_ENAWAKEUP | SYSC_HAS_SIDLEMODE | - SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), - .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | - SIDLE_SMART_WKUP), + .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_ENAWAKEUP | + SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET | + SYSS_HAS_RESET_STATUS), + .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART), .sysc_fields = &omap_hwmod_sysc_type1, }; diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c index 3d5b24d..c33e07e 100644 --- a/arch/arm/mach-omap2/pdata-quirks.c +++ b/arch/arm/mach-omap2/pdata-quirks.c @@ -22,6 +22,8 @@ #include "common-board-devices.h" #include "dss-common.h" #include "control.h" +#include "omap-secure.h" +#include "soc.h" struct pdata_init { const char *compatible; @@ -169,6 +171,22 @@ static void __init am3517_evm_legacy_init(void) omap_ctrl_writel(v, AM35XX_CONTROL_IP_SW_RESET); omap_ctrl_readl(AM35XX_CONTROL_IP_SW_RESET); /* OCP barrier */ } + +static void __init nokia_n900_legacy_init(void) +{ + hsmmc2_internal_input_clk(); + + if (omap_type() == OMAP2_DEVICE_TYPE_SEC) { + if (IS_ENABLED(CONFIG_ARM_ERRATA_430973)) { + pr_info("RX-51: Enabling ARM errata 430973 workaround\n"); + /* set IBE to 1 */ + rx51_secure_update_aux_cr(BIT(6), 0); + } else { + pr_warning("RX-51: Not enabling ARM errata 430973 workaround\n"); + pr_warning("Thumb binaries may crash randomly without this workaround\n"); + } + } +} #endif /* CONFIG_ARCH_OMAP3 */ #ifdef CONFIG_ARCH_OMAP4 @@ -239,6 +257,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { #endif #ifdef CONFIG_ARCH_OMAP3 OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002030, "48002030.pinmux", &pcs_pdata), + OF_DEV_AUXDATA("ti,omap3-padconf", 0x480025a0, "480025a0.pinmux", &pcs_pdata), OF_DEV_AUXDATA("ti,omap3-padconf", 0x48002a00, "48002a00.pinmux", &pcs_pdata), /* Only on am3517 */ OF_DEV_AUXDATA("ti,davinci_mdio", 0x5c030000, "davinci_mdio.0", NULL), @@ -259,7 +278,7 @@ struct of_dev_auxdata omap_auxdata_lookup[] __initdata = { static struct pdata_init pdata_quirks[] __initdata = { #ifdef CONFIG_ARCH_OMAP3 { "compulab,omap3-sbc-t3730", omap3_sbc_t3730_legacy_init, }, - { "nokia,omap3-n900", hsmmc2_internal_input_clk, }, + { "nokia,omap3-n900", nokia_n900_legacy_init, }, { "nokia,omap3-n9", hsmmc2_internal_input_clk, }, { "nokia,omap3-n950", hsmmc2_internal_input_clk, }, { "isee,omap3-igep0020", omap3_igep0020_legacy_init, }, diff --git a/arch/arm/mach-omap2/prminst44xx.c b/arch/arm/mach-omap2/prminst44xx.c index 6334b96..280f3c5 100644 --- a/arch/arm/mach-omap2/prminst44xx.c +++ b/arch/arm/mach-omap2/prminst44xx.c @@ -183,11 +183,11 @@ void omap4_prminst_global_warm_sw_reset(void) OMAP4_PRM_RSTCTRL_OFFSET); v |= OMAP4430_RST_GLOBAL_WARM_SW_MASK; omap4_prminst_write_inst_reg(v, OMAP4430_PRM_PARTITION, - OMAP4430_PRM_DEVICE_INST, + dev_inst, OMAP4_PRM_RSTCTRL_OFFSET); /* OCP barrier */ v = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, - OMAP4430_PRM_DEVICE_INST, + dev_inst, OMAP4_PRM_RSTCTRL_OFFSET); } diff --git a/arch/arm/mach-sa1100/include/mach/collie.h b/arch/arm/mach-sa1100/include/mach/collie.h index f33679d..50e1d85 100644 --- a/arch/arm/mach-sa1100/include/mach/collie.h +++ b/arch/arm/mach-sa1100/include/mach/collie.h @@ -13,6 +13,8 @@ #ifndef __ASM_ARCH_COLLIE_H #define __ASM_ARCH_COLLIE_H +#include "hardware.h" /* Gives GPIO_MAX */ + extern void locomolcd_power(int on); #define COLLIE_SCOOP_GPIO_BASE (GPIO_MAX + 1) diff --git a/arch/arm/mm/dump.c b/arch/arm/mm/dump.c index 2b3a564..ef69152 100644 --- a/arch/arm/mm/dump.c +++ b/arch/arm/mm/dump.c @@ -264,6 +264,9 @@ static void walk_pmd(struct pg_state *st, pud_t *pud, unsigned long start) note_page(st, addr, 3, pmd_val(*pmd)); else walk_pte(st, pmd, addr); + + if (SECTION_SIZE < PMD_SIZE && pmd_large(pmd[1])) + note_page(st, addr + SECTION_SIZE, 3, pmd_val(pmd[1])); } } diff --git a/arch/c6x/include/asm/cache.h b/arch/c6x/include/asm/cache.h index 09c5a0f..86648c0 100644 --- a/arch/c6x/include/asm/cache.h +++ b/arch/c6x/include/asm/cache.h @@ -12,6 +12,7 @@ #define _ASM_C6X_CACHE_H #include <linux/irqflags.h> +#include <linux/init.h> /* * Cache line size diff --git a/arch/cris/include/asm/bitops.h b/arch/cris/include/asm/bitops.h index 184066c..053c17b 100644 --- a/arch/cris/include/asm/bitops.h +++ b/arch/cris/include/asm/bitops.h @@ -144,7 +144,7 @@ static inline int test_and_change_bit(int nr, volatile unsigned long *addr) * definition, which doesn't have the same semantics. We don't want to * use -fno-builtin, so just hide the name ffs. */ -#define ffs kernel_ffs +#define ffs(x) kernel_ffs(x) #include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c index a96bcf8..20e8a9b 100644 --- a/arch/ia64/kernel/uncached.c +++ b/arch/ia64/kernel/uncached.c @@ -98,7 +98,7 @@ static int uncached_add_chunk(struct uncached_pool *uc_pool, int nid) /* attempt to allocate a granule's worth of cached memory pages */ page = alloc_pages_exact_node(nid, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, IA64_GRANULE_SHIFT-PAGE_SHIFT); if (!page) { mutex_unlock(&uc_pool->add_chunk_mutex); diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index 8d4c247f1..af064d2 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c @@ -1048,6 +1048,15 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) flush_altivec_to_thread(src); flush_vsx_to_thread(src); flush_spe_to_thread(src); + /* + * Flush TM state out so we can copy it. __switch_to_tm() does this + * flush but it removes the checkpointed state from the current CPU and + * transitions the CPU out of TM mode. Hence we need to call + * tm_recheckpoint_new_task() (on the same task) to restore the + * checkpointed state back and the TM mode. + */ + __switch_to_tm(src); + tm_recheckpoint_new_task(src); *dst = *src; diff --git a/arch/powerpc/kernel/reloc_64.S b/arch/powerpc/kernel/reloc_64.S index 1482327..d88736f 100644 --- a/arch/powerpc/kernel/reloc_64.S +++ b/arch/powerpc/kernel/reloc_64.S @@ -81,6 +81,7 @@ _GLOBAL(relocate) 6: blr +.balign 8 p_dyn: .llong __dynamic_start - 0b p_rela: .llong __rela_dyn_start - 0b p_st: .llong _stext - 0b diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 5ec1e47..e865d74 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c @@ -123,7 +123,8 @@ static int __init cbe_ptcal_enable_on_node(int nid, int order) area->nid = nid; area->order = order; - area->pages = alloc_pages_exact_node(area->nid, GFP_KERNEL|GFP_THISNODE, + area->pages = alloc_pages_exact_node(area->nid, + GFP_KERNEL|__GFP_THISNODE, area->order); if (!area->pages) { diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu index c026cca..f3aaf23 100644 --- a/arch/x86/Kconfig.cpu +++ b/arch/x86/Kconfig.cpu @@ -341,10 +341,6 @@ config X86_USE_3DNOW def_bool y depends on (MCYRIXIII || MK7 || MGEODE_LX) && !UML -config X86_OOSTORE - def_bool y - depends on (MWINCHIP3D || MWINCHIPC6) && MTRR - # # P6_NOPs are a relatively minor optimization that require a family >= # 6 processor, except that it is broken on certain VIA chips. diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h index 04a4890..69bbb48 100644 --- a/arch/x86/include/asm/barrier.h +++ b/arch/x86/include/asm/barrier.h @@ -85,11 +85,7 @@ #else # define smp_rmb() barrier() #endif -#ifdef CONFIG_X86_OOSTORE -# define smp_wmb() wmb() -#else -# define smp_wmb() barrier() -#endif +#define smp_wmb() barrier() #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) #else /* !SMP */ @@ -100,7 +96,7 @@ #define set_mb(var, value) do { var = value; barrier(); } while (0) #endif /* SMP */ -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) +#if defined(CONFIG_X86_PPRO_FENCE) /* * For either of these options x86 doesn't have a strong TSO memory diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h index 3d6b9f8..acd86c8 100644 --- a/arch/x86/include/asm/efi.h +++ b/arch/x86/include/asm/efi.h @@ -134,6 +134,7 @@ extern void efi_setup_page_tables(void); extern void __init old_map_region(efi_memory_desc_t *md); extern void __init runtime_code_page_mkexec(void); extern void __init efi_runtime_mkexec(void); +extern void __init efi_apply_memmap_quirks(void); struct efi_setup_data { u64 fw_vendor; diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h index 34f69cb..91d9c69 100644 --- a/arch/x86/include/asm/io.h +++ b/arch/x86/include/asm/io.h @@ -237,7 +237,7 @@ memcpy_toio(volatile void __iomem *dst, const void *src, size_t count) static inline void flush_write_buffers(void) { -#if defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE) +#if defined(CONFIG_X86_PPRO_FENCE) asm volatile("lock; addl $0,0(%%esp)": : :"memory"); #endif } diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h index bf156de..0f62f54 100644 --- a/arch/x86/include/asm/spinlock.h +++ b/arch/x86/include/asm/spinlock.h @@ -26,10 +26,9 @@ # define LOCK_PTR_REG "D" #endif -#if defined(CONFIG_X86_32) && \ - (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) +#if defined(CONFIG_X86_32) && (defined(CONFIG_X86_PPRO_FENCE)) /* - * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock + * On PPro SMP, we use a locked operation to unlock * (PPro errata 66, 92) */ # define UNLOCK_LOCK_PREFIX LOCK_PREFIX diff --git a/arch/x86/kernel/cpu/centaur.c b/arch/x86/kernel/cpu/centaur.c index 8779eda..d8fba5c 100644 --- a/arch/x86/kernel/cpu/centaur.c +++ b/arch/x86/kernel/cpu/centaur.c @@ -8,236 +8,6 @@ #include "cpu.h" -#ifdef CONFIG_X86_OOSTORE - -static u32 power2(u32 x) -{ - u32 s = 1; - - while (s <= x) - s <<= 1; - - return s >>= 1; -} - - -/* - * Set up an actual MCR - */ -static void centaur_mcr_insert(int reg, u32 base, u32 size, int key) -{ - u32 lo, hi; - - hi = base & ~0xFFF; - lo = ~(size-1); /* Size is a power of 2 so this makes a mask */ - lo &= ~0xFFF; /* Remove the ctrl value bits */ - lo |= key; /* Attribute we wish to set */ - wrmsr(reg+MSR_IDT_MCR0, lo, hi); - mtrr_centaur_report_mcr(reg, lo, hi); /* Tell the mtrr driver */ -} - -/* - * Figure what we can cover with MCR's - * - * Shortcut: We know you can't put 4Gig of RAM on a winchip - */ -static u32 ramtop(void) -{ - u32 clip = 0xFFFFFFFFUL; - u32 top = 0; - int i; - - for (i = 0; i < e820.nr_map; i++) { - unsigned long start, end; - - if (e820.map[i].addr > 0xFFFFFFFFUL) - continue; - /* - * Don't MCR over reserved space. Ignore the ISA hole - * we frob around that catastrophe already - */ - if (e820.map[i].type == E820_RESERVED) { - if (e820.map[i].addr >= 0x100000UL && - e820.map[i].addr < clip) - clip = e820.map[i].addr; - continue; - } - start = e820.map[i].addr; - end = e820.map[i].addr + e820.map[i].size; - if (start >= end) - continue; - if (end > top) - top = end; - } - /* - * Everything below 'top' should be RAM except for the ISA hole. - * Because of the limited MCR's we want to map NV/ACPI into our - * MCR range for gunk in RAM - * - * Clip might cause us to MCR insufficient RAM but that is an - * acceptable failure mode and should only bite obscure boxes with - * a VESA hole at 15Mb - * - * The second case Clip sometimes kicks in is when the EBDA is marked - * as reserved. Again we fail safe with reasonable results - */ - if (top > clip) - top = clip; - - return top; -} - -/* - * Compute a set of MCR's to give maximum coverage - */ -static int centaur_mcr_compute(int nr, int key) -{ - u32 mem = ramtop(); - u32 root = power2(mem); - u32 base = root; - u32 top = root; - u32 floor = 0; - int ct = 0; - - while (ct < nr) { - u32 fspace = 0; - u32 high; - u32 low; - - /* - * Find the largest block we will fill going upwards - */ - high = power2(mem-top); - - /* - * Find the largest block we will fill going downwards - */ - low = base/2; - - /* - * Don't fill below 1Mb going downwards as there - * is an ISA hole in the way. - */ - if (base <= 1024*1024) - low = 0; - - /* - * See how much space we could cover by filling below - * the ISA hole - */ - - if (floor == 0) - fspace = 512*1024; - else if (floor == 512*1024) - fspace = 128*1024; - - /* And forget ROM space */ - - /* - * Now install the largest coverage we get - */ - if (fspace > high && fspace > low) { - centaur_mcr_insert(ct, floor, fspace, key); - floor += fspace; - } else if (high > low) { - centaur_mcr_insert(ct, top, high, key); - top += high; - } else if (low > 0) { - base -= low; - centaur_mcr_insert(ct, base, low, key); - } else - break; - ct++; - } - /* - * We loaded ct values. We now need to set the mask. The caller - * must do this bit. - */ - return ct; -} - -static void centaur_create_optimal_mcr(void) -{ - int used; - int i; - - /* - * Allocate up to 6 mcrs to mark as much of ram as possible - * as write combining and weak write ordered. - * - * To experiment with: Linux never uses stack operations for - * mmio spaces so we could globally enable stack operation wc - * - * Load the registers with type 31 - full write combining, all - * writes weakly ordered. - */ - used = centaur_mcr_compute(6, 31); - - /* - * Wipe unused MCRs - */ - for (i = used; i < 8; i++) - wrmsr(MSR_IDT_MCR0+i, 0, 0); -} - -static void winchip2_create_optimal_mcr(void) -{ - u32 lo, hi; - int used; - int i; - - /* - * Allocate up to 6 mcrs to mark as much of ram as possible - * as write combining, weak store ordered. - * - * Load the registers with type 25 - * 8 - weak write ordering - * 16 - weak read ordering - * 1 - write combining - */ - used = centaur_mcr_compute(6, 25); - - /* - * Mark the registers we are using. - */ - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - for (i = 0; i < used; i++) - lo |= 1<<(9+i); - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - - /* - * Wipe unused MCRs - */ - - for (i = used; i < 8; i++) - wrmsr(MSR_IDT_MCR0+i, 0, 0); -} - -/* - * Handle the MCR key on the Winchip 2. - */ -static void winchip2_unprotect_mcr(void) -{ - u32 lo, hi; - u32 key; - - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - lo &= ~0x1C0; /* blank bits 8-6 */ - key = (lo>>17) & 7; - lo |= key<<6; /* replace with unlock key */ - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); -} - -static void winchip2_protect_mcr(void) -{ - u32 lo, hi; - - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - lo &= ~0x1C0; /* blank bits 8-6 */ - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); -} -#endif /* CONFIG_X86_OOSTORE */ - #define ACE_PRESENT (1 << 6) #define ACE_ENABLED (1 << 7) #define ACE_FCR (1 << 28) /* MSR_VIA_FCR */ @@ -362,20 +132,6 @@ static void init_centaur(struct cpuinfo_x86 *c) fcr_clr = DPDC; printk(KERN_NOTICE "Disabling bugged TSC.\n"); clear_cpu_cap(c, X86_FEATURE_TSC); -#ifdef CONFIG_X86_OOSTORE - centaur_create_optimal_mcr(); - /* - * Enable: - * write combining on non-stack, non-string - * write combining on string, all types - * weak write ordering - * - * The C6 original lacks weak read order - * - * Note 0x120 is write only on Winchip 1 - */ - wrmsr(MSR_IDT_MCR_CTRL, 0x01F0001F, 0); -#endif break; case 8: switch (c->x86_mask) { @@ -392,40 +148,12 @@ static void init_centaur(struct cpuinfo_x86 *c) fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| E2MMX|EAMD3D; fcr_clr = DPDC; -#ifdef CONFIG_X86_OOSTORE - winchip2_unprotect_mcr(); - winchip2_create_optimal_mcr(); - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - /* - * Enable: - * write combining on non-stack, non-string - * write combining on string, all types - * weak write ordering - */ - lo |= 31; - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - winchip2_protect_mcr(); -#endif break; case 9: name = "3"; fcr_set = ECX8|DSMC|DTLOCK|EMMX|EBRPRED|ERETSTK| E2MMX|EAMD3D; fcr_clr = DPDC; -#ifdef CONFIG_X86_OOSTORE - winchip2_unprotect_mcr(); - winchip2_create_optimal_mcr(); - rdmsr(MSR_IDT_MCR_CTRL, lo, hi); - /* - * Enable: - * write combining on non-stack, non-string - * write combining on string, all types - * weak write ordering - */ - lo |= 31; - wrmsr(MSR_IDT_MCR_CTRL, lo, hi); - winchip2_protect_mcr(); -#endif break; default: name = "??"; diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S index 81ba276..f36bd42 100644 --- a/arch/x86/kernel/head_32.S +++ b/arch/x86/kernel/head_32.S @@ -544,6 +544,10 @@ ENDPROC(early_idt_handlers) /* This is global to keep gas from relaxing the jumps */ ENTRY(early_idt_handler) cld + + cmpl $2,(%esp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,%ss:early_recursion_flag je hlt_loop incl %ss:early_recursion_flag @@ -594,8 +598,9 @@ ex_entry: pop %edx pop %ecx pop %eax - addl $8,%esp /* drop vector number and error code */ decl %ss:early_recursion_flag +is_nmi: + addl $8,%esp /* drop vector number and error code */ iret ENDPROC(early_idt_handler) diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S index e1aabdb..a468c0a 100644 --- a/arch/x86/kernel/head_64.S +++ b/arch/x86/kernel/head_64.S @@ -343,6 +343,9 @@ early_idt_handlers: ENTRY(early_idt_handler) cld + cmpl $2,(%rsp) # X86_TRAP_NMI + je is_nmi # Ignore NMI + cmpl $2,early_recursion_flag(%rip) jz 1f incl early_recursion_flag(%rip) @@ -405,8 +408,9 @@ ENTRY(early_idt_handler) popq %rdx popq %rcx popq %rax - addq $16,%rsp # drop vector number and error code decl early_recursion_flag(%rip) +is_nmi: + addq $16,%rsp # drop vector number and error code INTERRUPT_RETURN ENDPROC(early_idt_handler) diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index e8368c6..d5dd808 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -86,10 +86,19 @@ EXPORT_SYMBOL(__kernel_fpu_begin); void __kernel_fpu_end(void) { - if (use_eager_fpu()) - math_state_restore(); - else + if (use_eager_fpu()) { + /* + * For eager fpu, most the time, tsk_used_math() is true. + * Restore the user math as we are done with the kernel usage. + * At few instances during thread exit, signal handling etc, + * tsk_used_math() is false. Those few places will take proper + * actions, so we don't need to restore the math here. + */ + if (likely(tsk_used_math(current))) + math_state_restore(); + } else { stts(); + } } EXPORT_SYMBOL(__kernel_fpu_end); diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c index 7c6acd4..ff898bb 100644 --- a/arch/x86/kernel/quirks.c +++ b/arch/x86/kernel/quirks.c @@ -529,7 +529,7 @@ static void quirk_amd_nb_node(struct pci_dev *dev) return; pci_read_config_dword(nb_ht, 0x60, &val); - node = val & 7; + node = pcibus_to_node(dev->bus) | (val & 7); /* * Some hardware may return an invalid node ID, * so check it first: diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 06853e6..ce72964 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -1239,14 +1239,8 @@ void __init setup_arch(char **cmdline_p) register_refined_jiffies(CLOCK_TICK_RATE); #ifdef CONFIG_EFI - /* Once setup is done above, unmap the EFI memory map on - * mismatched firmware/kernel archtectures since there is no - * support for runtime services. - */ - if (efi_enabled(EFI_BOOT) && !efi_is_native()) { - pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); - efi_unmap_memmap(); - } + if (efi_enabled(EFI_BOOT)) + efi_apply_memmap_quirks(); #endif } diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e81df8f..2de1bc0 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c @@ -3002,10 +3002,8 @@ static int cr8_write_interception(struct vcpu_svm *svm) u8 cr8_prev = kvm_get_cr8(&svm->vcpu); /* instruction emulation calls kvm_set_cr8() */ r = cr_interception(svm); - if (irqchip_in_kernel(svm->vcpu.kvm)) { - clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + if (irqchip_in_kernel(svm->vcpu.kvm)) return r; - } if (cr8_prev <= kvm_get_cr8(&svm->vcpu)) return r; kvm_run->exit_reason = KVM_EXIT_SET_TPR; @@ -3567,6 +3565,8 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) if (is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK)) return; + clr_cr_intercept(svm, INTERCEPT_CR8_WRITE); + if (irr == -1) return; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 6dea040..a10c8c7 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -1020,13 +1020,17 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs) * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. + * + * This function must have noinline because both callers + * {,trace_}do_page_fault() have notrace on. Having this an actual function + * guarantees there's a function trace entry. */ -static void __kprobes -__do_page_fault(struct pt_regs *regs, unsigned long error_code) +static void __kprobes noinline +__do_page_fault(struct pt_regs *regs, unsigned long error_code, + unsigned long address) { struct vm_area_struct *vma; struct task_struct *tsk; - unsigned long address; struct mm_struct *mm; int fault; unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE; @@ -1034,9 +1038,6 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code) tsk = current; mm = tsk->mm; - /* Get the faulting address: */ - address = read_cr2(); - /* * Detect and handle instructions that would cause a page fault for * both a tracked kernel page and a userspace page. @@ -1248,32 +1249,50 @@ good_area: up_read(&mm->mmap_sem); } -dotraplinkage void __kprobes +dotraplinkage void __kprobes notrace do_page_fault(struct pt_regs *regs, unsigned long error_code) { + unsigned long address = read_cr2(); /* Get the faulting address */ enum ctx_state prev_state; + /* + * We must have this function tagged with __kprobes, notrace and call + * read_cr2() before calling anything else. To avoid calling any kind + * of tracing machinery before we've observed the CR2 value. + * + * exception_{enter,exit}() contain all sorts of tracepoints. + */ + prev_state = exception_enter(); - __do_page_fault(regs, error_code); + __do_page_fault(regs, error_code, address); exception_exit(prev_state); } -static void trace_page_fault_entries(struct pt_regs *regs, +#ifdef CONFIG_TRACING +static void trace_page_fault_entries(unsigned long address, struct pt_regs *regs, unsigned long error_code) { if (user_mode(regs)) - trace_page_fault_user(read_cr2(), regs, error_code); + trace_page_fault_user(address, regs, error_code); else - trace_page_fault_kernel(read_cr2(), regs, error_code); + trace_page_fault_kernel(address, regs, error_code); } -dotraplinkage void __kprobes +dotraplinkage void __kprobes notrace trace_do_page_fault(struct pt_regs *regs, unsigned long error_code) { + /* + * The exception_enter and tracepoint processing could + * trigger another page faults (user space callchain + * reading) and destroy the original cr2 value, so read + * the faulting address now. + */ + unsigned long address = read_cr2(); enum ctx_state prev_state; prev_state = exception_enter(); - trace_page_fault_entries(regs, error_code); - __do_page_fault(regs, error_code); + trace_page_fault_entries(address, regs, error_code); + __do_page_fault(regs, error_code, address); exception_exit(prev_state); } +#endif /* CONFIG_TRACING */ diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S index 877b9a1..0149575 100644 --- a/arch/x86/net/bpf_jit.S +++ b/arch/x86/net/bpf_jit.S @@ -140,7 +140,7 @@ bpf_slow_path_byte_msh: push %r9; \ push SKBDATA; \ /* rsi already has offset */ \ - mov $SIZE,%ecx; /* size */ \ + mov $SIZE,%edx; /* size */ \ call bpf_internal_load_pointer_neg_helper; \ test %rax,%rax; \ pop SKBDATA; \ diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 1a201ac..b97acec 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -52,6 +52,7 @@ #include <asm/tlbflush.h> #include <asm/x86_init.h> #include <asm/rtc.h> +#include <asm/uv/uv.h> #define EFI_DEBUG @@ -1210,3 +1211,22 @@ static int __init parse_efi_cmdline(char *str) return 0; } early_param("efi", parse_efi_cmdline); + +void __init efi_apply_memmap_quirks(void) +{ + /* + * Once setup is done earlier, unmap the EFI memory map on mismatched + * firmware/kernel architectures since there is no support for runtime + * services. + */ + if (!efi_is_native()) { + pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n"); + efi_unmap_memmap(); + } + + /* + * UV doesn't support the new EFI pagetable mapping yet. + */ + if (is_uv_system()) + set_bit(EFI_OLD_MEMMAP, &x86_efi_facility); +} diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h index 7d01b8c..cc04e67 100644 --- a/arch/x86/um/asm/barrier.h +++ b/arch/x86/um/asm/barrier.h @@ -40,11 +40,7 @@ #define smp_rmb() barrier() #endif /* CONFIG_X86_PPRO_FENCE */ -#ifdef CONFIG_X86_OOSTORE -#define smp_wmb() wmb() -#else /* CONFIG_X86_OOSTORE */ #define smp_wmb() barrier() -#endif /* CONFIG_X86_OOSTORE */ #define smp_read_barrier_depends() read_barrier_depends() #define set_mb(var, value) do { (void)xchg(&var, value); } while (0) diff --git a/block/blk-exec.c b/block/blk-exec.c index c68613b..dbf4502 100644 --- a/block/blk-exec.c +++ b/block/blk-exec.c @@ -65,7 +65,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, * be resued after dying flag is set */ if (q->mq_ops) { - blk_mq_insert_request(q, rq, at_head, true); + blk_mq_insert_request(rq, at_head, true, false); return; } diff --git a/block/blk-flush.c b/block/blk-flush.c index 66e2b69..f598f79 100644 --- a/block/blk-flush.c +++ b/block/blk-flush.c @@ -137,7 +137,7 @@ static void mq_flush_run(struct work_struct *work) rq = container_of(work, struct request, mq_flush_work); memset(&rq->csd, 0, sizeof(rq->csd)); - blk_mq_run_request(rq, true, false); + blk_mq_insert_request(rq, false, true, false); } static bool blk_flush_queue_rq(struct request *rq) @@ -411,7 +411,7 @@ void blk_insert_flush(struct request *rq) if ((policy & REQ_FSEQ_DATA) && !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { if (q->mq_ops) { - blk_mq_run_request(rq, false, true); + blk_mq_insert_request(rq, false, false, true); } else list_add_tail(&rq->queuelist, &q->queue_head); return; diff --git a/block/blk-mq-cpu.c b/block/blk-mq-cpu.c index 3146bef..136ef86 100644 --- a/block/blk-mq-cpu.c +++ b/block/blk-mq-cpu.c @@ -11,7 +11,7 @@ #include "blk-mq.h" static LIST_HEAD(blk_mq_cpu_notify_list); -static DEFINE_SPINLOCK(blk_mq_cpu_notify_lock); +static DEFINE_RAW_SPINLOCK(blk_mq_cpu_notify_lock); static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) @@ -19,12 +19,12 @@ static int blk_mq_main_cpu_notify(struct notifier_block *self, unsigned int cpu = (unsigned long) hcpu; struct blk_mq_cpu_notifier *notify; - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_for_each_entry(notify, &blk_mq_cpu_notify_list, list) notify->notify(notify->data, action, cpu); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); return NOTIFY_OK; } @@ -32,16 +32,16 @@ void blk_mq_register_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { BUG_ON(!notifier->notify); - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_add_tail(¬ifier->list, &blk_mq_cpu_notify_list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_unregister_cpu_notifier(struct blk_mq_cpu_notifier *notifier) { - spin_lock(&blk_mq_cpu_notify_lock); + raw_spin_lock(&blk_mq_cpu_notify_lock); list_del(¬ifier->list); - spin_unlock(&blk_mq_cpu_notify_lock); + raw_spin_unlock(&blk_mq_cpu_notify_lock); } void blk_mq_init_cpu_notifier(struct blk_mq_cpu_notifier *notifier, diff --git a/block/blk-mq.c b/block/blk-mq.c index 1fa9dd1..883f720 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -73,8 +73,8 @@ static void blk_mq_hctx_mark_pending(struct blk_mq_hw_ctx *hctx, set_bit(ctx->index_hw, hctx->ctx_map); } -static struct request *blk_mq_alloc_rq(struct blk_mq_hw_ctx *hctx, gfp_t gfp, - bool reserved) +static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, + gfp_t gfp, bool reserved) { struct request *rq; unsigned int tag; @@ -193,12 +193,6 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, ctx->rq_dispatched[rw_is_sync(rw_flags)]++; } -static struct request *__blk_mq_alloc_request(struct blk_mq_hw_ctx *hctx, - gfp_t gfp, bool reserved) -{ - return blk_mq_alloc_rq(hctx, gfp, reserved); -} - static struct request *blk_mq_alloc_request_pinned(struct request_queue *q, int rw, gfp_t gfp, bool reserved) @@ -289,38 +283,10 @@ void blk_mq_free_request(struct request *rq) __blk_mq_free_request(hctx, ctx, rq); } -static void blk_mq_bio_endio(struct request *rq, struct bio *bio, int error) -{ - if (error) - clear_bit(BIO_UPTODATE, &bio->bi_flags); - else if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) - error = -EIO; - - if (unlikely(rq->cmd_flags & REQ_QUIET)) - set_bit(BIO_QUIET, &bio->bi_flags); - - /* don't actually finish bio if it's part of flush sequence */ - if (!(rq->cmd_flags & REQ_FLUSH_SEQ)) - bio_endio(bio, error); -} - -void blk_mq_end_io(struct request *rq, int error) +bool blk_mq_end_io_partial(struct request *rq, int error, unsigned int nr_bytes) { - struct bio *bio = rq->bio; - unsigned int bytes = 0; - - trace_block_rq_complete(rq->q, rq); - - while (bio) { - struct bio *next = bio->bi_next; - - bio->bi_next = NULL; - bytes += bio->bi_iter.bi_size; - blk_mq_bio_endio(rq, bio, error); - bio = next; - } - - blk_account_io_completion(rq, bytes); + if (blk_update_request(rq, error, blk_rq_bytes(rq))) + return true; blk_account_io_done(rq); @@ -328,8 +294,9 @@ void blk_mq_end_io(struct request *rq, int error) rq->end_io(rq, error); else blk_mq_free_request(rq); + return false; } -EXPORT_SYMBOL(blk_mq_end_io); +EXPORT_SYMBOL(blk_mq_end_io_partial); static void __blk_mq_complete_request_remote(void *data) { @@ -730,60 +697,27 @@ static void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, blk_mq_add_timer(rq); } -void blk_mq_insert_request(struct request_queue *q, struct request *rq, - bool at_head, bool run_queue) +void blk_mq_insert_request(struct request *rq, bool at_head, bool run_queue, + bool async) { + struct request_queue *q = rq->q; struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; + struct blk_mq_ctx *ctx = rq->mq_ctx, *current_ctx; + + current_ctx = blk_mq_get_ctx(q); + if (!cpu_online(ctx->cpu)) + rq->mq_ctx = ctx = current_ctx; - ctx = rq->mq_ctx; hctx = q->mq_ops->map_queue(q, ctx->cpu); - if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA)) { + if (rq->cmd_flags & (REQ_FLUSH | REQ_FUA) && + !(rq->cmd_flags & (REQ_FLUSH_SEQ))) { blk_insert_flush(rq); } else { - current_ctx = blk_mq_get_ctx(q); - - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - hctx = q->mq_ops->map_queue(q, ctx->cpu); - rq->mq_ctx = ctx; - } spin_lock(&ctx->lock); __blk_mq_insert_request(hctx, rq, at_head); spin_unlock(&ctx->lock); - - blk_mq_put_ctx(current_ctx); - } - - if (run_queue) - __blk_mq_run_hw_queue(hctx); -} -EXPORT_SYMBOL(blk_mq_insert_request); - -/* - * This is a special version of blk_mq_insert_request to bypass FLUSH request - * check. Should only be used internally. - */ -void blk_mq_run_request(struct request *rq, bool run_queue, bool async) -{ - struct request_queue *q = rq->q; - struct blk_mq_hw_ctx *hctx; - struct blk_mq_ctx *ctx, *current_ctx; - - current_ctx = blk_mq_get_ctx(q); - - ctx = rq->mq_ctx; - if (!cpu_online(ctx->cpu)) { - ctx = current_ctx; - rq->mq_ctx = ctx; } - hctx = q->mq_ops->map_queue(q, ctx->cpu); - - /* ctx->cpu might be offline */ - spin_lock(&ctx->lock); - __blk_mq_insert_request(hctx, rq, false); - spin_unlock(&ctx->lock); blk_mq_put_ctx(current_ctx); @@ -926,6 +860,8 @@ static void blk_mq_make_request(struct request_queue *q, struct bio *bio) ctx = blk_mq_get_ctx(q); hctx = q->mq_ops->map_queue(q, ctx->cpu); + if (is_sync) + rw |= REQ_SYNC; trace_block_getrq(q, bio, rw); rq = __blk_mq_alloc_request(hctx, GFP_ATOMIC, false); if (likely(rq)) diff --git a/block/blk-mq.h b/block/blk-mq.h index ed0035c..72beba1 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -23,7 +23,6 @@ struct blk_mq_ctx { }; void __blk_mq_complete_request(struct request *rq); -void blk_mq_run_request(struct request *rq, bool run_queue, bool async); void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async); void blk_mq_init_flush(struct request_queue *q); void blk_mq_drain_queue(struct request_queue *q); diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index 959d41a..d7d32c2 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -67,6 +67,8 @@ enum ec_command { #define ACPI_EC_DELAY 500 /* Wait 500ms max. during EC ops */ #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ +#define ACPI_EC_CLEAR_MAX 100 /* Maximum number of events to query + * when trying to clear the EC */ enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ @@ -116,6 +118,7 @@ EXPORT_SYMBOL(first_ec); static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ +static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ /* -------------------------------------------------------------------------- Transaction Management @@ -440,6 +443,29 @@ acpi_handle ec_get_handle(void) EXPORT_SYMBOL(ec_get_handle); +static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data); + +/* + * Clears stale _Q events that might have accumulated in the EC. + * Run with locked ec mutex. + */ +static void acpi_ec_clear(struct acpi_ec *ec) +{ + int i, status; + u8 value = 0; + + for (i = 0; i < ACPI_EC_CLEAR_MAX; i++) { + status = acpi_ec_query_unlocked(ec, &value); + if (status || !value) + break; + } + + if (unlikely(i == ACPI_EC_CLEAR_MAX)) + pr_warn("Warning: Maximum of %d stale EC events cleared\n", i); + else + pr_info("%d stale EC events cleared\n", i); +} + void acpi_ec_block_transactions(void) { struct acpi_ec *ec = first_ec; @@ -463,6 +489,10 @@ void acpi_ec_unblock_transactions(void) mutex_lock(&ec->mutex); /* Allow transactions to be carried out again */ clear_bit(EC_FLAGS_BLOCKED, &ec->flags); + + if (EC_FLAGS_CLEAR_ON_RESUME) + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); } @@ -821,6 +851,13 @@ static int acpi_ec_add(struct acpi_device *device) /* EC is fully operational, allow queries */ clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); + + /* Clear stale _Q events if hardware might require that */ + if (EC_FLAGS_CLEAR_ON_RESUME) { + mutex_lock(&ec->mutex); + acpi_ec_clear(ec); + mutex_unlock(&ec->mutex); + } return ret; } @@ -922,6 +959,30 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) return 0; } +/* + * On some hardware it is necessary to clear events accumulated by the EC during + * sleep. These ECs stop reporting GPEs until they are manually polled, if too + * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) + * + * https://bugzilla.kernel.org/show_bug.cgi?id=44161 + * + * Ideally, the EC should also be instructed NOT to accumulate events during + * sleep (which Windows seems to do somehow), but the interface to control this + * behaviour is not known at this time. + * + * Models known to be affected are Samsung 530Uxx/535Uxx/540Uxx/550Pxx/900Xxx, + * however it is very likely that other Samsung models are affected. + * + * On systems which don't accumulate _Q events during sleep, this extra check + * should be harmless. + */ +static int ec_clear_on_resume(const struct dmi_system_id *id) +{ + pr_debug("Detected system needing EC poll on resume.\n"); + EC_FLAGS_CLEAR_ON_RESUME = 1; + return 0; +} + static struct dmi_system_id ec_dmi_table[] __initdata = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -965,6 +1026,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, + { + ec_clear_on_resume, "Samsung hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, {}, }; diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index b7201fc..0bdacc5 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -77,18 +77,24 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_MEMORY24: memory24 = &ares->data.memory24; + if (!memory24->address_length) + return false; acpi_dev_get_memresource(res, memory24->minimum, memory24->address_length, memory24->write_protect); break; case ACPI_RESOURCE_TYPE_MEMORY32: memory32 = &ares->data.memory32; + if (!memory32->address_length) + return false; acpi_dev_get_memresource(res, memory32->minimum, memory32->address_length, memory32->write_protect); break; case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: fixed_memory32 = &ares->data.fixed_memory32; + if (!fixed_memory32->address_length) + return false; acpi_dev_get_memresource(res, fixed_memory32->address, fixed_memory32->address_length, fixed_memory32->write_protect); @@ -144,12 +150,16 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res) switch (ares->type) { case ACPI_RESOURCE_TYPE_IO: io = &ares->data.io; + if (!io->address_length) + return false; acpi_dev_get_ioresource(res, io->minimum, io->address_length, io->io_decode); break; case ACPI_RESOURCE_TYPE_FIXED_IO: fixed_io = &ares->data.fixed_io; + if (!fixed_io->address_length) + return false; acpi_dev_get_ioresource(res, fixed_io->address, fixed_io->address_length, ACPI_DECODE_10); diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c index b718806..c40fb2e 100644 --- a/drivers/acpi/sleep.c +++ b/drivers/acpi/sleep.c @@ -71,6 +71,17 @@ static int acpi_sleep_prepare(u32 acpi_state) return 0; } +static bool acpi_sleep_state_supported(u8 sleep_state) +{ + acpi_status status; + u8 type_a, type_b; + + status = acpi_get_sleep_type_data(sleep_state, &type_a, &type_b); + return ACPI_SUCCESS(status) && (!acpi_gbl_reduced_hardware + || (acpi_gbl_FADT.sleep_control.address + && acpi_gbl_FADT.sleep_status.address)); +} + #ifdef CONFIG_ACPI_SLEEP static u32 acpi_target_sleep_state = ACPI_STATE_S0; @@ -604,15 +615,9 @@ static void acpi_sleep_suspend_setup(void) { int i; - for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) { - acpi_status status; - u8 type_a, type_b; - - status = acpi_get_sleep_type_data(i, &type_a, &type_b); - if (ACPI_SUCCESS(status)) { + for (i = ACPI_STATE_S1; i < ACPI_STATE_S4; i++) + if (acpi_sleep_state_supported(i)) sleep_states[i] = 1; - } - } suspend_set_ops(old_suspend_ordering ? &acpi_suspend_ops_old : &acpi_suspend_ops); @@ -740,11 +745,7 @@ static const struct platform_hibernation_ops acpi_hibernation_ops_old = { static void acpi_sleep_hibernate_setup(void) { - acpi_status status; - u8 type_a, type_b; - - status = acpi_get_sleep_type_data(ACPI_STATE_S4, &type_a, &type_b); - if (ACPI_FAILURE(status)) + if (!acpi_sleep_state_supported(ACPI_STATE_S4)) return; hibernation_set_ops(old_suspend_ordering ? @@ -793,8 +794,6 @@ static void acpi_power_off(void) int __init acpi_sleep_init(void) { - acpi_status status; - u8 type_a, type_b; char supported[ACPI_S_STATE_COUNT * 3 + 1]; char *pos = supported; int i; @@ -806,8 +805,7 @@ int __init acpi_sleep_init(void) acpi_sleep_suspend_setup(); acpi_sleep_hibernate_setup(); - status = acpi_get_sleep_type_data(ACPI_STATE_S5, &type_a, &type_b); - if (ACPI_SUCCESS(status)) { + if (acpi_sleep_state_supported(ACPI_STATE_S5)) { sleep_states[ACPI_STATE_S5] = 1; pm_power_off_prepare = acpi_power_off_prepare; pm_power_off = acpi_power_off; diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 1a3dbd1..8cb2522 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -4175,6 +4175,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, + { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, /* Blacklist entries taken from Silicon Image 3124/3132 Windows driver .inf file - also several Linux problem reports */ @@ -4224,7 +4225,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* devices that don't properly handle queued TRIM commands */ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, - { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, + { "Crucial_CT???M500SSD*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, }, /* * Some WD SATA-I drives spin up and down erratically when the link diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h index b52e9a6..54174cb 100644 --- a/drivers/block/mtip32xx/mtip32xx.h +++ b/drivers/block/mtip32xx/mtip32xx.h @@ -53,7 +53,7 @@ #define MTIP_FTL_REBUILD_TIMEOUT_MS 2400000 /* unaligned IO handling */ -#define MTIP_MAX_UNALIGNED_SLOTS 8 +#define MTIP_MAX_UNALIGNED_SLOTS 2 /* Macro to extract the tag bit number from a tag value. */ #define MTIP_TAG_BIT(tag) (tag & 0x1F) diff --git a/drivers/clk/shmobile/clk-rcar-gen2.c b/drivers/clk/shmobile/clk-rcar-gen2.c index dd272a0..99c27b1 100644 --- a/drivers/clk/shmobile/clk-rcar-gen2.c +++ b/drivers/clk/shmobile/clk-rcar-gen2.c @@ -26,6 +26,8 @@ struct rcar_gen2_cpg { void __iomem *reg; }; +#define CPG_FRQCRB 0x00000004 +#define CPG_FRQCRB_KICK BIT(31) #define CPG_SDCKCR 0x00000074 #define CPG_PLL0CR 0x000000d8 #define CPG_FRQCRC 0x000000e0 @@ -45,6 +47,7 @@ struct rcar_gen2_cpg { struct cpg_z_clk { struct clk_hw hw; void __iomem *reg; + void __iomem *kick_reg; }; #define to_z_clk(_hw) container_of(_hw, struct cpg_z_clk, hw) @@ -83,17 +86,45 @@ static int cpg_z_clk_set_rate(struct clk_hw *hw, unsigned long rate, { struct cpg_z_clk *zclk = to_z_clk(hw); unsigned int mult; - u32 val; + u32 val, kick; + unsigned int i; mult = div_u64((u64)rate * 32, parent_rate); mult = clamp(mult, 1U, 32U); + if (clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK) + return -EBUSY; + val = clk_readl(zclk->reg); val &= ~CPG_FRQCRC_ZFC_MASK; val |= (32 - mult) << CPG_FRQCRC_ZFC_SHIFT; clk_writel(val, zclk->reg); - return 0; + /* + * Set KICK bit in FRQCRB to update hardware setting and wait for + * clock change completion. + */ + kick = clk_readl(zclk->kick_reg); + kick |= CPG_FRQCRB_KICK; + clk_writel(kick, zclk->kick_reg); + + /* + * Note: There is no HW information about the worst case latency. + * + * Using experimental measurements, it seems that no more than + * ~10 iterations are needed, independently of the CPU rate. + * Since this value might be dependant of external xtal rate, pll1 + * rate or even the other emulation clocks rate, use 1000 as a + * "super" safe value. + */ + for (i = 1000; i; i--) { + if (!(clk_readl(zclk->kick_reg) & CPG_FRQCRB_KICK)) + return 0; + + cpu_relax(); + } + + return -ETIMEDOUT; } static const struct clk_ops cpg_z_clk_ops = { @@ -120,6 +151,7 @@ static struct clk * __init cpg_z_clk_register(struct rcar_gen2_cpg *cpg) init.num_parents = 1; zclk->reg = cpg->reg + CPG_FRQCRC; + zclk->kick_reg = cpg->reg + CPG_FRQCRB; zclk->hw.init = &init; clk = clk_register(NULL, &zclk->hw); diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c index cb003a6..199b52b 100644 --- a/drivers/cpufreq/cpufreq.c +++ b/drivers/cpufreq/cpufreq.c @@ -1109,12 +1109,27 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, goto err_set_policy_cpu; } + /* related cpus should atleast have policy->cpus */ + cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); + + /* + * affected cpus must always be the one, which are online. We aren't + * managing offline cpus here. + */ + cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); + + if (!frozen) { + policy->user_policy.min = policy->min; + policy->user_policy.max = policy->max; + } + + down_write(&policy->rwsem); write_lock_irqsave(&cpufreq_driver_lock, flags); for_each_cpu(j, policy->cpus) per_cpu(cpufreq_cpu_data, j) = policy; write_unlock_irqrestore(&cpufreq_driver_lock, flags); - if (cpufreq_driver->get) { + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { policy->cur = cpufreq_driver->get(policy->cpu); if (!policy->cur) { pr_err("%s: ->get() failed\n", __func__); @@ -1162,20 +1177,6 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, } } - /* related cpus should atleast have policy->cpus */ - cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); - - /* - * affected cpus must always be the one, which are online. We aren't - * managing offline cpus here. - */ - cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); - - if (!frozen) { - policy->user_policy.min = policy->min; - policy->user_policy.max = policy->max; - } - blocking_notifier_call_chain(&cpufreq_policy_notifier_list, CPUFREQ_START, policy); @@ -1206,6 +1207,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif, policy->user_policy.policy = policy->policy; policy->user_policy.governor = policy->governor; } + up_write(&policy->rwsem); kobject_uevent(&policy->kobj, KOBJ_ADD); up_read(&cpufreq_rwsem); @@ -1546,23 +1548,16 @@ static unsigned int __cpufreq_get(unsigned int cpu) */ unsigned int cpufreq_get(unsigned int cpu) { - struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); + struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); unsigned int ret_freq = 0; - if (cpufreq_disabled() || !cpufreq_driver) - return -ENOENT; - - BUG_ON(!policy); - - if (!down_read_trylock(&cpufreq_rwsem)) - return 0; - - down_read(&policy->rwsem); - - ret_freq = __cpufreq_get(cpu); + if (policy) { + down_read(&policy->rwsem); + ret_freq = __cpufreq_get(cpu); + up_read(&policy->rwsem); - up_read(&policy->rwsem); - up_read(&cpufreq_rwsem); + cpufreq_cpu_put(policy); + } return ret_freq; } @@ -2148,7 +2143,7 @@ int cpufreq_update_policy(unsigned int cpu) * BIOS might change freq behind our back * -> ask driver for current freq and notify governors about a change */ - if (cpufreq_driver->get) { + if (cpufreq_driver->get && !cpufreq_driver->setpolicy) { new_policy.cur = cpufreq_driver->get(cpu); if (!policy->cur) { pr_debug("Driver did not initialize current freq"); diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index de4aa40..2c6d5e1 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -916,7 +916,7 @@ static int lookup_existing_device(struct device *dev, void *data) old->config_rom_retries = 0; fw_notice(card, "rediscovered device %s\n", dev_name(dev)); - PREPARE_DELAYED_WORK(&old->work, fw_device_update); + old->workfn = fw_device_update; fw_schedule_device_work(old, 0); if (current_node == card->root_node) @@ -1075,7 +1075,7 @@ static void fw_device_init(struct work_struct *work) if (atomic_cmpxchg(&device->state, FW_DEVICE_INITIALIZING, FW_DEVICE_RUNNING) == FW_DEVICE_GONE) { - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); } else { fw_notice(card, "created device %s: GUID %08x%08x, S%d00\n", @@ -1196,13 +1196,20 @@ static void fw_device_refresh(struct work_struct *work) dev_name(&device->device), fw_rcode_string(ret)); gone: atomic_set(&device->state, FW_DEVICE_GONE); - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, SHUTDOWN_DELAY); out: if (node_id == card->root_node->node_id) fw_schedule_bm_work(card, 0); } +static void fw_device_workfn(struct work_struct *work) +{ + struct fw_device *device = container_of(to_delayed_work(work), + struct fw_device, work); + device->workfn(work); +} + void fw_node_event(struct fw_card *card, struct fw_node *node, int event) { struct fw_device *device; @@ -1252,7 +1259,8 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) * power-up after getting plugged in. We schedule the * first config rom scan half a second after bus reset. */ - INIT_DELAYED_WORK(&device->work, fw_device_init); + device->workfn = fw_device_init; + INIT_DELAYED_WORK(&device->work, fw_device_workfn); fw_schedule_device_work(device, INITIAL_DELAY); break; @@ -1268,7 +1276,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) if (atomic_cmpxchg(&device->state, FW_DEVICE_RUNNING, FW_DEVICE_INITIALIZING) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_refresh); + device->workfn = fw_device_refresh; fw_schedule_device_work(device, device->is_local ? 0 : INITIAL_DELAY); } @@ -1283,7 +1291,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) smp_wmb(); /* update node_id before generation */ device->generation = card->generation; if (atomic_read(&device->state) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_update); + device->workfn = fw_device_update; fw_schedule_device_work(device, 0); } break; @@ -1308,7 +1316,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) device = node->data; if (atomic_xchg(&device->state, FW_DEVICE_GONE) == FW_DEVICE_RUNNING) { - PREPARE_DELAYED_WORK(&device->work, fw_device_shutdown); + device->workfn = fw_device_shutdown; fw_schedule_device_work(device, list_empty(&card->link) ? 0 : SHUTDOWN_DELAY); } diff --git a/drivers/firewire/net.c b/drivers/firewire/net.c index 6b89598..4af0a7b 100644 --- a/drivers/firewire/net.c +++ b/drivers/firewire/net.c @@ -929,8 +929,6 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, if (rcode == RCODE_COMPLETE) { fwnet_transmit_packet_done(ptask); } else { - fwnet_transmit_packet_failed(ptask); - if (printk_timed_ratelimit(&j, 1000) || rcode != last_rcode) { dev_err(&ptask->dev->netdev->dev, "fwnet_write_complete failed: %x (skipped %d)\n", @@ -938,8 +936,10 @@ static void fwnet_write_complete(struct fw_card *card, int rcode, errors_skipped = 0; last_rcode = rcode; - } else + } else { errors_skipped++; + } + fwnet_transmit_packet_failed(ptask); } } diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c index 6f74d8d..8db6632 100644 --- a/drivers/firewire/ohci.c +++ b/drivers/firewire/ohci.c @@ -290,7 +290,6 @@ static char ohci_driver_name[] = KBUILD_MODNAME; #define QUIRK_NO_MSI 0x10 #define QUIRK_TI_SLLZ059 0x20 #define QUIRK_IR_WAKE 0x40 -#define QUIRK_PHY_LCTRL_TIMEOUT 0x80 /* In case of multiple matches in ohci_quirks[], only the first one is used. */ static const struct { @@ -303,10 +302,7 @@ static const struct { QUIRK_BE_HEADERS}, {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6, - QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI}, - - {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID, - QUIRK_PHY_LCTRL_TIMEOUT}, + QUIRK_NO_MSI}, {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID, QUIRK_RESET_PACKET}, @@ -353,7 +349,6 @@ MODULE_PARM_DESC(quirks, "Chip quirks (default = 0" ", disable MSI = " __stringify(QUIRK_NO_MSI) ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059) ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE) - ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT) ")"); #define OHCI_PARAM_DEBUG_AT_AR 1 @@ -2299,9 +2294,6 @@ static int ohci_enable(struct fw_card *card, * TI TSB82AA2 + TSB81BA3(A) cards signal LPS enabled early but * cannot actually use the phy at that time. These need tens of * millisecods pause between LPS write and first phy access too. - * - * But do not wait for 50msec on Agere/LSI cards. Their phy - * arbitration state machine may time out during such a long wait. */ reg_write(ohci, OHCI1394_HCControlSet, @@ -2309,11 +2301,8 @@ static int ohci_enable(struct fw_card *card, OHCI1394_HCControl_postedWriteEnable); flush_writes(ohci); - if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT)) + for (lps = 0, i = 0; !lps && i < 3; i++) { msleep(50); - - for (lps = 0, i = 0; !lps && i < 150; i++) { - msleep(1); lps = reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS; } diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c index 281029d..7aef911 100644 --- a/drivers/firewire/sbp2.c +++ b/drivers/firewire/sbp2.c @@ -146,6 +146,7 @@ struct sbp2_logical_unit { */ int generation; int retries; + work_func_t workfn; struct delayed_work work; bool has_sdev; bool blocked; @@ -864,7 +865,7 @@ static void sbp2_login(struct work_struct *work) /* set appropriate retry limit(s) in BUSY_TIMEOUT register */ sbp2_set_busy_timeout(lu); - PREPARE_DELAYED_WORK(&lu->work, sbp2_reconnect); + lu->workfn = sbp2_reconnect; sbp2_agent_reset(lu); /* This was a re-login. */ @@ -918,7 +919,7 @@ static void sbp2_login(struct work_struct *work) * If a bus reset happened, sbp2_update will have requeued * lu->work already. Reset the work from reconnect to login. */ - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; } static void sbp2_reconnect(struct work_struct *work) @@ -952,7 +953,7 @@ static void sbp2_reconnect(struct work_struct *work) lu->retries++ >= 5) { dev_err(tgt_dev(tgt), "failed to reconnect\n"); lu->retries = 0; - PREPARE_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; } sbp2_queue_work(lu, DIV_ROUND_UP(HZ, 5)); @@ -972,6 +973,13 @@ static void sbp2_reconnect(struct work_struct *work) sbp2_conditionally_unblock(lu); } +static void sbp2_lu_workfn(struct work_struct *work) +{ + struct sbp2_logical_unit *lu = container_of(to_delayed_work(work), + struct sbp2_logical_unit, work); + lu->workfn(work); +} + static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) { struct sbp2_logical_unit *lu; @@ -998,7 +1006,8 @@ static int sbp2_add_logical_unit(struct sbp2_target *tgt, int lun_entry) lu->blocked = false; ++tgt->dont_block; INIT_LIST_HEAD(&lu->orb_list); - INIT_DELAYED_WORK(&lu->work, sbp2_login); + lu->workfn = sbp2_login; + INIT_DELAYED_WORK(&lu->work, sbp2_lu_workfn); list_add_tail(&lu->link, &tgt->lu_list); return 0; diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index acf3a36..32982da 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -68,15 +68,7 @@ void __armada_drm_queue_unref_work(struct drm_device *dev, { struct armada_private *priv = dev->dev_private; - /* - * Yes, we really must jump through these hoops just to store a - * _pointer_ to something into the kfifo. This is utterly insane - * and idiotic, because it kfifo requires the _data_ pointed to by - * the pointer const, not the pointer itself. Not only that, but - * you have to pass a pointer _to_ the pointer you want stored. - */ - const struct drm_framebuffer *silly_api_alert = fb; - WARN_ON(!kfifo_put(&priv->fb_unref, &silly_api_alert)); + WARN_ON(!kfifo_put(&priv->fb_unref, fb)); schedule_work(&priv->fb_unref_work); } diff --git a/drivers/gpu/drm/bochs/Kconfig b/drivers/gpu/drm/bochs/Kconfig index c8fcf12..5f8b0c2 100644 --- a/drivers/gpu/drm/bochs/Kconfig +++ b/drivers/gpu/drm/bochs/Kconfig @@ -2,6 +2,7 @@ config DRM_BOCHS tristate "DRM Support for bochs dispi vga interface (qemu stdvga)" depends on DRM && PCI select DRM_KMS_HELPER + select DRM_KMS_FB_HELPER select FB_SYS_FILLRECT select FB_SYS_COPYAREA select FB_SYS_IMAGEBLIT diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index 04f1f02..ec7bb0f 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -403,7 +403,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist); void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - struct pci_dev *pch; + struct pci_dev *pch = NULL; /* In all current cases, num_pipes is equivalent to the PCH_NOP setting * (which really amounts to a PCH but no South Display). @@ -424,12 +424,9 @@ void intel_detect_pch(struct drm_device *dev) * all the ISA bridge devices and check for the first match, instead * of only checking the first one. */ - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); - while (pch) { - struct pci_dev *curr = pch; + while ((pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, pch))) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { - unsigned short id; - id = pch->device & INTEL_PCH_DEVICE_ID_MASK; + unsigned short id = pch->device & INTEL_PCH_DEVICE_ID_MASK; dev_priv->pch_id = id; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { @@ -461,18 +458,16 @@ void intel_detect_pch(struct drm_device *dev) DRM_DEBUG_KMS("Found LynxPoint LP PCH\n"); WARN_ON(!IS_HASWELL(dev)); WARN_ON(!IS_ULT(dev)); - } else { - goto check_next; - } - pci_dev_put(pch); + } else + continue; + break; } -check_next: - pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, curr); - pci_dev_put(curr); } if (!pch) - DRM_DEBUG_KMS("No PCH found?\n"); + DRM_DEBUG_KMS("No PCH found.\n"); + + pci_dev_put(pch); } bool i915_semaphore_is_enabled(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c index 1a24e84..d58b4e2 100644 --- a/drivers/gpu/drm/i915/i915_gem_stolen.c +++ b/drivers/gpu/drm/i915/i915_gem_stolen.c @@ -82,9 +82,22 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev) r = devm_request_mem_region(dev->dev, base, dev_priv->gtt.stolen_size, "Graphics Stolen Memory"); if (r == NULL) { - DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", - base, base + (uint32_t)dev_priv->gtt.stolen_size); - base = 0; + /* + * One more attempt but this time requesting region from + * base + 1, as we have seen that this resolves the region + * conflict with the PCI Bus. + * This is a BIOS w/a: Some BIOS wrap stolen in the root + * PCI bus, but have an off-by-one error. Hence retry the + * reservation starting from 1 instead of 0. + */ + r = devm_request_mem_region(dev->dev, base + 1, + dev_priv->gtt.stolen_size - 1, + "Graphics Stolen Memory"); + if (r == NULL) { + DRM_ERROR("conflict detected with stolen region: [0x%08x - 0x%08x]\n", + base, base + (uint32_t)dev_priv->gtt.stolen_size); + base = 0; + } } return base; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4c16728..9b8a7c7 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -1092,12 +1092,12 @@ static void assert_cursor(struct drm_i915_private *dev_priv, struct drm_device *dev = dev_priv->dev; bool cur_state; - if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) - cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; - else if (IS_845G(dev) || IS_I865G(dev)) + if (IS_845G(dev) || IS_I865G(dev)) cur_state = I915_READ(_CURACNTR) & CURSOR_ENABLE; - else + else if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) cur_state = I915_READ(CURCNTR(pipe)) & CURSOR_MODE; + else + cur_state = I915_READ(CURCNTR_IVB(pipe)) & CURSOR_MODE; WARN(cur_state != state, "cursor on pipe %c assertion failure (expected %s, current %s)\n", diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 6db0d9d..ee3181e 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -845,7 +845,7 @@ static int hdmi_portclock_limit(struct intel_hdmi *hdmi) { struct drm_device *dev = intel_hdmi_to_dev(hdmi); - if (IS_G4X(dev)) + if (!hdmi->has_hdmi_sink || IS_G4X(dev)) return 165000; else if (IS_HASWELL(dev) || INTEL_INFO(dev)->gen >= 8) return 300000; @@ -899,8 +899,8 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, * outputs. We also need to check that the higher clock still fits * within limits. */ - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit - && HAS_PCH_SPLIT(dev)) { + if (pipe_config->pipe_bpp > 8*3 && intel_hdmi->has_hdmi_sink && + clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 350de35..079ea38 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -698,7 +698,7 @@ static void i9xx_enable_backlight(struct intel_connector *connector) freq /= 0xff; ctl = freq << 17; - if (IS_GEN2(dev) && panel->backlight.combination_mode) + if (panel->backlight.combination_mode) ctl |= BLM_LEGACY_MODE; if (IS_PINEVIEW(dev) && panel->backlight.active_low_pwm) ctl |= BLM_POLARITY_PNV; @@ -979,7 +979,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector) ctl = I915_READ(BLC_PWM_CTL); - if (IS_GEN2(dev)) + if (IS_GEN2(dev) || IS_I915GM(dev) || IS_I945GM(dev)) panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE; if (IS_PINEVIEW(dev)) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d77cc81..e1fc35a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -3493,6 +3493,8 @@ static void valleyview_setup_pctx(struct drm_device *dev) u32 pcbr; int pctx_size = 24*1024; + WARN_ON(!mutex_is_locked(&dev->struct_mutex)); + pcbr = I915_READ(VLV_PCBR); if (pcbr) { /* BIOS set it up already, grab the pre-alloc'd space */ @@ -3542,8 +3544,6 @@ static void valleyview_enable_rps(struct drm_device *dev) I915_WRITE(GTFIFODBG, gtfifodbg); } - valleyview_setup_pctx(dev); - /* If VLV, Forcewake all wells, else re-direct to regular path */ gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); @@ -4395,6 +4395,8 @@ void intel_enable_gt_powersave(struct drm_device *dev) ironlake_enable_rc6(dev); intel_init_emon(dev); } else if (IS_GEN6(dev) || IS_GEN7(dev)) { + if (IS_VALLEYVIEW(dev)) + valleyview_setup_pctx(dev); /* * PCU communication is slow and this doesn't need to be * done at any specific time, so do this out of our fast path diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c index 2cec2ab..607dc14 100644 --- a/drivers/gpu/drm/radeon/atombios_encoders.c +++ b/drivers/gpu/drm/radeon/atombios_encoders.c @@ -1314,7 +1314,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t } if (is_dp) args.v5.ucLaneNum = dp_lane_count; - else if (radeon_encoder->pixel_clock > 165000) + else if (radeon_dig_monitor_is_duallink(encoder, radeon_encoder->pixel_clock)) args.v5.ucLaneNum = 8; else args.v5.ucLaneNum = 4; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index e6419ca..bbb1784 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -3046,7 +3046,7 @@ static u32 cik_create_bitmask(u32 bit_width) } /** - * cik_select_se_sh - select which SE, SH to address + * cik_get_rb_disabled - computes the mask of disabled RBs * * @rdev: radeon_device pointer * @max_rb_num: max RBs (render backends) for the asic @@ -4134,8 +4134,11 @@ static void cik_cp_compute_enable(struct radeon_device *rdev, bool enable) { if (enable) WREG32(CP_MEC_CNTL, 0); - else + else { WREG32(CP_MEC_CNTL, (MEC_ME1_HALT | MEC_ME2_HALT)); + rdev->ring[CAYMAN_RING_TYPE_CP1_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_CP2_INDEX].ready = false; + } udelay(50); } @@ -7902,7 +7905,8 @@ int cik_resume(struct radeon_device *rdev) /* init golden registers */ cik_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cik_startup(rdev); diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c index 1ecb3f1..94626ea 100644 --- a/drivers/gpu/drm/radeon/cik_sdma.c +++ b/drivers/gpu/drm/radeon/cik_sdma.c @@ -264,6 +264,8 @@ static void cik_sdma_gfx_stop(struct radeon_device *rdev) WREG32(SDMA0_GFX_RB_CNTL + reg_offset, rb_cntl); WREG32(SDMA0_GFX_IB_CNTL + reg_offset, 0); } + rdev->ring[R600_RING_TYPE_DMA_INDEX].ready = false; + rdev->ring[CAYMAN_RING_TYPE_DMA1_INDEX].ready = false; } /** @@ -291,6 +293,11 @@ void cik_sdma_enable(struct radeon_device *rdev, bool enable) u32 me_cntl, reg_offset; int i; + if (enable == false) { + cik_sdma_gfx_stop(rdev); + cik_sdma_rlc_stop(rdev); + } + for (i = 0; i < 2; i++) { if (i == 0) reg_offset = SDMA0_REGISTER_OFFSET; @@ -420,10 +427,6 @@ static int cik_sdma_load_microcode(struct radeon_device *rdev) if (!rdev->sdma_fw) return -EINVAL; - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); - /* halt the MEs */ cik_sdma_enable(rdev, false); @@ -492,9 +495,6 @@ int cik_sdma_resume(struct radeon_device *rdev) */ void cik_sdma_fini(struct radeon_device *rdev) { - /* stop the gfx rings and rlc compute queues */ - cik_sdma_gfx_stop(rdev); - cik_sdma_rlc_stop(rdev); /* halt the MEs */ cik_sdma_enable(rdev, false); radeon_ring_fini(rdev, &rdev->ring[R600_RING_TYPE_DMA_INDEX]); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 8a2c010..27b0ff1 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5299,7 +5299,8 @@ int evergreen_resume(struct radeon_device *rdev) /* init golden registers */ evergreen_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = evergreen_startup(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_smc.h b/drivers/gpu/drm/radeon/evergreen_smc.h index 76ada8c..3a03ba3 100644 --- a/drivers/gpu/drm/radeon/evergreen_smc.h +++ b/drivers/gpu/drm/radeon/evergreen_smc.h @@ -57,7 +57,7 @@ typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; #define EVERGREEN_SMC_FIRMWARE_HEADER_LOCATION 0x100 -#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x0 +#define EVERGREEN_SMC_FIRMWARE_HEADER_softRegisters 0x8 #define EVERGREEN_SMC_FIRMWARE_HEADER_stateTable 0xC #define EVERGREEN_SMC_FIRMWARE_HEADER_mcRegisterTable 0x20 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index ea932ac..bf6300c 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2105,7 +2105,8 @@ int cayman_resume(struct radeon_device *rdev) /* init golden registers */ ni_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = cayman_startup(rdev); diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c index ef024ce..3cc78bb 100644 --- a/drivers/gpu/drm/radeon/r100.c +++ b/drivers/gpu/drm/radeon/r100.c @@ -3942,8 +3942,6 @@ int r100_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r100_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c index 7c63ef8..0b658b3 100644 --- a/drivers/gpu/drm/radeon/r300.c +++ b/drivers/gpu/drm/radeon/r300.c @@ -1430,8 +1430,6 @@ int r300_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r300_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c index 3768aab..802b192 100644 --- a/drivers/gpu/drm/radeon/r420.c +++ b/drivers/gpu/drm/radeon/r420.c @@ -325,8 +325,6 @@ int r420_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r420_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c index e209eb7..98d6053 100644 --- a/drivers/gpu/drm/radeon/r520.c +++ b/drivers/gpu/drm/radeon/r520.c @@ -240,8 +240,6 @@ int r520_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = r520_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index cdbc417..647ef40 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2968,7 +2968,8 @@ int r600_resume(struct radeon_device *rdev) /* post card */ atom_asic_init(rdev->mode_info.atom_context); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = r600_startup(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index b012cbb..044bc98 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1521,13 +1521,16 @@ int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon) if (r) DRM_ERROR("ib ring test failed (%d).\n", r); - if (rdev->pm.dpm_enabled) { + if ((rdev->pm.pm_method == PM_METHOD_DPM) && rdev->pm.dpm_enabled) { /* do dpm late init */ r = radeon_pm_late_init(rdev); if (r) { rdev->pm.dpm_enabled = false; DRM_ERROR("radeon_pm_late_init failed, disabling dpm\n"); } + } else { + /* resume old pm late */ + radeon_pm_resume(rdev); } radeon_restore_bios_scratch_regs(rdev); diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index 2aecd6d..66ed3ea 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -33,6 +33,13 @@ #include <linux/vga_switcheroo.h> #include <linux/slab.h> #include <linux/pm_runtime.h> + +#if defined(CONFIG_VGA_SWITCHEROO) +bool radeon_is_px(void); +#else +static inline bool radeon_is_px(void) { return false; } +#endif + /** * radeon_driver_unload_kms - Main unload function for KMS. * @@ -130,7 +137,8 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags) "Error during ACPI methods call\n"); } - if (radeon_runtime_pm != 0) { + if ((radeon_runtime_pm == 1) || + ((radeon_runtime_pm == -1) && radeon_is_px())) { pm_runtime_use_autosuspend(dev->dev); pm_runtime_set_autosuspend_delay(dev->dev, 5000); pm_runtime_set_active(dev->dev); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 77f5b0c..040a2a1 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -714,6 +714,9 @@ int radeon_ttm_init(struct radeon_device *rdev) DRM_ERROR("Failed initializing VRAM heap.\n"); return r; } + /* Change the size here instead of the init above so only lpfn is affected */ + radeon_ttm_set_active_vram_size(rdev, rdev->mc.visible_vram_size); + r = radeon_bo_create(rdev, 256 * 1024, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->stollen_vga_memory); @@ -935,7 +938,7 @@ static ssize_t radeon_ttm_gtt_read(struct file *f, char __user *buf, while (size) { loff_t p = *pos / PAGE_SIZE; unsigned off = *pos & ~PAGE_MASK; - ssize_t cur_size = min(size, PAGE_SIZE - off); + size_t cur_size = min_t(size_t, size, PAGE_SIZE - off); struct page *page; void *ptr; diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index b5c2369..130d5cc 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -474,8 +474,6 @@ int rs400_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs400_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c index fdcde76..72d3616 100644 --- a/drivers/gpu/drm/radeon/rs600.c +++ b/drivers/gpu/drm/radeon/rs600.c @@ -1048,8 +1048,6 @@ int rs600_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs600_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c index 3595073..3462b64 100644 --- a/drivers/gpu/drm/radeon/rs690.c +++ b/drivers/gpu/drm/radeon/rs690.c @@ -756,8 +756,6 @@ int rs690_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rs690_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c index 98e8138..237dd29 100644 --- a/drivers/gpu/drm/radeon/rv515.c +++ b/drivers/gpu/drm/radeon/rv515.c @@ -586,8 +586,6 @@ int rv515_resume(struct radeon_device *rdev) /* Initialize surface registers */ radeon_surface_init(rdev); - radeon_pm_resume(rdev); - rdev->accel_working = true; r = rv515_startup(rdev); if (r) { diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 4e37a42..fef3107 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1811,7 +1811,8 @@ int rv770_resume(struct radeon_device *rdev) /* init golden registers */ rv770_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = rv770_startup(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 8357832..9a124d0 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6618,7 +6618,8 @@ int si_resume(struct radeon_device *rdev) /* init golden registers */ si_init_golden_registers(rdev); - radeon_pm_resume(rdev); + if (rdev->pm.pm_method == PM_METHOD_DPM) + radeon_pm_resume(rdev); rdev->accel_working = true; r = si_startup(rdev); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a066513..214b799 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -351,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, moved: if (bo->evicted) { - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); - if (ret) - pr_err("Can not flush read caches\n"); + if (bdev->driver->invalidate_caches) { + ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); + if (ret) + pr_err("Can not flush read caches\n"); + } bo->evicted = false; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 801231c..0ce48e5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -339,11 +339,13 @@ int ttm_bo_mmap(struct file *filp, struct vm_area_struct *vma, vma->vm_private_data = bo; /* - * PFNMAP is faster than MIXEDMAP due to reduced page - * administration. So use MIXEDMAP only if private VMA, where - * we need to support COW. + * We'd like to use VM_PFNMAP on shared mappings, where + * (vma->vm_flags & VM_SHARED) != 0, for performance reasons, + * but for some reason VM_PFNMAP + x86 PAT + write-combine is very + * bad for performance. Until that has been sorted out, use + * VM_MIXEDMAP on all mappings. See freedesktop.org bug #75719 */ - vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; + vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP; return 0; out_unref: @@ -359,7 +361,7 @@ int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) vma->vm_ops = &ttm_bo_vm_ops; vma->vm_private_data = ttm_bo_reference(bo); - vma->vm_flags |= (vma->vm_flags & VM_SHARED) ? VM_PFNMAP : VM_MIXEDMAP; + vma->vm_flags |= VM_MIXEDMAP; vma->vm_flags |= VM_IO | VM_DONTEXPAND; return 0; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 82468d9..e7af580 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -830,6 +830,24 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data, if (unlikely(ret != 0)) goto out_unlock; + /* + * A gb-aware client referencing a shared surface will + * expect a backup buffer to be present. + */ + if (dev_priv->has_mob && req->shareable) { + uint32_t backup_handle; + + ret = vmw_user_dmabuf_alloc(dev_priv, tfile, + res->backup_size, + true, + &backup_handle, + &res->backup); + if (unlikely(ret != 0)) { + vmw_resource_unreference(&res); + goto out_unlock; + } + } + tmp = vmw_resource_reference(&srf->res); ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, req->shareable, VMW_RES_SURFACE, diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig index f5ed031..de17c55 100644 --- a/drivers/i2c/busses/Kconfig +++ b/drivers/i2c/busses/Kconfig @@ -387,7 +387,7 @@ config I2C_CBUS_GPIO config I2C_CPM tristate "Freescale CPM1 or CPM2 (MPC8xx/826x)" - depends on (CPM1 || CPM2) && OF_I2C + depends on CPM1 || CPM2 help This supports the use of the I2C interface on Freescale processors with CPM1 or CPM2. diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c index d18d08a..8ee228e 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.c +++ b/drivers/infiniband/ulp/isert/ib_isert.c @@ -492,12 +492,11 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event) isert_conn->state = ISER_CONN_INIT; INIT_LIST_HEAD(&isert_conn->conn_accept_node); init_completion(&isert_conn->conn_login_comp); - init_waitqueue_head(&isert_conn->conn_wait); - init_waitqueue_head(&isert_conn->conn_wait_comp_err); + init_completion(&isert_conn->conn_wait); + init_completion(&isert_conn->conn_wait_comp_err); kref_init(&isert_conn->conn_kref); kref_get(&isert_conn->conn_kref); mutex_init(&isert_conn->conn_mutex); - mutex_init(&isert_conn->conn_comp_mutex); spin_lock_init(&isert_conn->conn_lock); cma_id->context = isert_conn; @@ -688,11 +687,11 @@ isert_disconnect_work(struct work_struct *work) pr_debug("isert_disconnect_work(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); mutex_lock(&isert_conn->conn_mutex); - isert_conn->state = ISER_CONN_DOWN; + if (isert_conn->state == ISER_CONN_UP) + isert_conn->state = ISER_CONN_TERMINATING; if (isert_conn->post_recv_buf_count == 0 && atomic_read(&isert_conn->post_send_buf_count) == 0) { - pr_debug("Calling wake_up(&isert_conn->conn_wait);\n"); mutex_unlock(&isert_conn->conn_mutex); goto wake_up; } @@ -712,7 +711,7 @@ isert_disconnect_work(struct work_struct *work) mutex_unlock(&isert_conn->conn_mutex); wake_up: - wake_up(&isert_conn->conn_wait); + complete(&isert_conn->conn_wait); isert_put_conn(isert_conn); } @@ -888,16 +887,17 @@ isert_init_send_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd, * Coalesce send completion interrupts by only setting IB_SEND_SIGNALED * bit for every ISERT_COMP_BATCH_COUNT number of ib_post_send() calls. */ - mutex_lock(&isert_conn->conn_comp_mutex); - if (coalesce && + mutex_lock(&isert_conn->conn_mutex); + if (coalesce && isert_conn->state == ISER_CONN_UP && ++isert_conn->conn_comp_batch < ISERT_COMP_BATCH_COUNT) { + tx_desc->llnode_active = true; llist_add(&tx_desc->comp_llnode, &isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_comp_mutex); + mutex_unlock(&isert_conn->conn_mutex); return; } isert_conn->conn_comp_batch = 0; tx_desc->comp_llnode_batch = llist_del_all(&isert_conn->conn_comp_llist); - mutex_unlock(&isert_conn->conn_comp_mutex); + mutex_unlock(&isert_conn->conn_mutex); send_wr->send_flags = IB_SEND_SIGNALED; } @@ -1464,7 +1464,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) case ISCSI_OP_SCSI_CMD: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); if (cmd->data_direction == DMA_TO_DEVICE) @@ -1476,7 +1476,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) case ISCSI_OP_SCSI_TMFUNC: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); transport_generic_free_cmd(&cmd->se_cmd, 0); @@ -1486,7 +1486,7 @@ isert_put_cmd(struct isert_cmd *isert_cmd) case ISCSI_OP_TEXT: spin_lock_bh(&conn->cmd_lock); if (!list_empty(&cmd->i_conn_node)) - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); /* @@ -1549,6 +1549,7 @@ isert_completion_rdma_read(struct iser_tx_desc *tx_desc, iscsit_stop_dataout_timer(cmd); device->unreg_rdma_mem(isert_cmd, isert_conn); cmd->write_data_done = wr->cur_rdma_length; + wr->send_wr_num = 0; pr_debug("Cmd: %p RDMA_READ comp calling execute_cmd\n", isert_cmd); spin_lock_bh(&cmd->istate_lock); @@ -1589,7 +1590,7 @@ isert_do_control_comp(struct work_struct *work) pr_debug("Calling iscsit_logout_post_handler >>>>>>>>>>>>>>\n"); /* * Call atomic_dec(&isert_conn->post_send_buf_count) - * from isert_free_conn() + * from isert_wait_conn() */ isert_conn->logout_posted = true; iscsit_logout_post_handler(cmd, cmd->conn); @@ -1613,6 +1614,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, struct ib_device *ib_dev) { struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd; + struct isert_rdma_wr *wr = &isert_cmd->rdma_wr; if (cmd->i_state == ISTATE_SEND_TASKMGTRSP || cmd->i_state == ISTATE_SEND_LOGOUTRSP || @@ -1624,7 +1626,7 @@ isert_response_completion(struct iser_tx_desc *tx_desc, queue_work(isert_comp_wq, &isert_cmd->comp_work); return; } - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); cmd->i_state = ISTATE_SENT_STATUS; isert_completion_put(tx_desc, isert_cmd, ib_dev); @@ -1662,7 +1664,7 @@ __isert_send_completion(struct iser_tx_desc *tx_desc, case ISER_IB_RDMA_READ: pr_debug("isert_send_completion: Got ISER_IB_RDMA_READ:\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); isert_completion_rdma_read(tx_desc, isert_cmd); break; default: @@ -1691,31 +1693,76 @@ isert_send_completion(struct iser_tx_desc *tx_desc, } static void -isert_cq_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) +isert_cq_drain_comp_llist(struct isert_conn *isert_conn, struct ib_device *ib_dev) +{ + struct llist_node *llnode; + struct isert_rdma_wr *wr; + struct iser_tx_desc *t; + + mutex_lock(&isert_conn->conn_mutex); + llnode = llist_del_all(&isert_conn->conn_comp_llist); + isert_conn->conn_comp_batch = 0; + mutex_unlock(&isert_conn->conn_mutex); + + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + wr = &t->isert_cmd->rdma_wr; + + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + isert_completion_put(t, t->isert_cmd, ib_dev); + } +} + +static void +isert_cq_tx_comp_err(struct iser_tx_desc *tx_desc, struct isert_conn *isert_conn) { struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + struct llist_node *llnode = tx_desc->comp_llnode_batch; + struct isert_rdma_wr *wr; + struct iser_tx_desc *t; - if (tx_desc) { - struct isert_cmd *isert_cmd = tx_desc->isert_cmd; + while (llnode) { + t = llist_entry(llnode, struct iser_tx_desc, comp_llnode); + llnode = llist_next(llnode); + wr = &t->isert_cmd->rdma_wr; - if (!isert_cmd) - isert_unmap_tx_desc(tx_desc, ib_dev); - else - isert_completion_put(tx_desc, isert_cmd, ib_dev); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); + isert_completion_put(t, t->isert_cmd, ib_dev); } + tx_desc->comp_llnode_batch = NULL; - if (isert_conn->post_recv_buf_count == 0 && - atomic_read(&isert_conn->post_send_buf_count) == 0) { - pr_debug("isert_cq_comp_err >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n"); - pr_debug("Calling wake_up from isert_cq_comp_err\n"); + if (!isert_cmd) + isert_unmap_tx_desc(tx_desc, ib_dev); + else + isert_completion_put(tx_desc, isert_cmd, ib_dev); +} - mutex_lock(&isert_conn->conn_mutex); - if (isert_conn->state != ISER_CONN_DOWN) - isert_conn->state = ISER_CONN_TERMINATING; - mutex_unlock(&isert_conn->conn_mutex); +static void +isert_cq_rx_comp_err(struct isert_conn *isert_conn) +{ + struct ib_device *ib_dev = isert_conn->conn_cm_id->device; + struct iscsi_conn *conn = isert_conn->conn; - wake_up(&isert_conn->conn_wait_comp_err); + if (isert_conn->post_recv_buf_count) + return; + + isert_cq_drain_comp_llist(isert_conn, ib_dev); + + if (conn->sess) { + target_sess_cmd_list_set_waiting(conn->sess->se_sess); + target_wait_for_sess_cmds(conn->sess->se_sess); } + + while (atomic_read(&isert_conn->post_send_buf_count)) + msleep(3000); + + mutex_lock(&isert_conn->conn_mutex); + isert_conn->state = ISER_CONN_DOWN; + mutex_unlock(&isert_conn->conn_mutex); + + complete(&isert_conn->conn_wait_comp_err); } static void @@ -1740,8 +1787,14 @@ isert_cq_tx_work(struct work_struct *work) pr_debug("TX wc.status != IB_WC_SUCCESS >>>>>>>>>>>>>>\n"); pr_debug("TX wc.status: 0x%08x\n", wc.status); pr_debug("TX wc.vendor_err: 0x%08x\n", wc.vendor_err); - atomic_dec(&isert_conn->post_send_buf_count); - isert_cq_comp_err(tx_desc, isert_conn); + + if (wc.wr_id != ISER_FASTREG_LI_WRID) { + if (tx_desc->llnode_active) + continue; + + atomic_dec(&isert_conn->post_send_buf_count); + isert_cq_tx_comp_err(tx_desc, isert_conn); + } } } @@ -1784,7 +1837,7 @@ isert_cq_rx_work(struct work_struct *work) wc.vendor_err); } isert_conn->post_recv_buf_count--; - isert_cq_comp_err(NULL, isert_conn); + isert_cq_rx_comp_err(isert_conn); } } @@ -2202,6 +2255,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, if (!fr_desc->valid) { memset(&inv_wr, 0, sizeof(inv_wr)); + inv_wr.wr_id = ISER_FASTREG_LI_WRID; inv_wr.opcode = IB_WR_LOCAL_INV; inv_wr.ex.invalidate_rkey = fr_desc->data_mr->rkey; wr = &inv_wr; @@ -2212,6 +2266,7 @@ isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc, /* Prepare FASTREG WR */ memset(&fr_wr, 0, sizeof(fr_wr)); + fr_wr.wr_id = ISER_FASTREG_LI_WRID; fr_wr.opcode = IB_WR_FAST_REG_MR; fr_wr.wr.fast_reg.iova_start = fr_desc->data_frpl->page_list[0] + page_off; @@ -2377,12 +2432,12 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd) isert_init_send_wr(isert_conn, isert_cmd, &isert_cmd->tx_desc.send_wr, true); - atomic_inc(&isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_WRITE\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num + 1, &isert_conn->post_send_buf_count); } pr_debug("Cmd: %p posted RDMA_WRITE + Response for iSER Data READ\n", isert_cmd); @@ -2410,12 +2465,12 @@ isert_get_dataout(struct iscsi_conn *conn, struct iscsi_cmd *cmd, bool recovery) return rc; } - atomic_inc(&isert_conn->post_send_buf_count); + atomic_add(wr->send_wr_num, &isert_conn->post_send_buf_count); rc = ib_post_send(isert_conn->conn_qp, wr->send_wr, &wr_failed); if (rc) { pr_warn("ib_post_send() failed for IB_WR_RDMA_READ\n"); - atomic_dec(&isert_conn->post_send_buf_count); + atomic_sub(wr->send_wr_num, &isert_conn->post_send_buf_count); } pr_debug("Cmd: %p posted RDMA_READ memory for ISER Data WRITE\n", isert_cmd); @@ -2702,22 +2757,11 @@ isert_free_np(struct iscsi_np *np) kfree(isert_np); } -static int isert_check_state(struct isert_conn *isert_conn, int state) -{ - int ret; - - mutex_lock(&isert_conn->conn_mutex); - ret = (isert_conn->state == state); - mutex_unlock(&isert_conn->conn_mutex); - - return ret; -} - -static void isert_free_conn(struct iscsi_conn *conn) +static void isert_wait_conn(struct iscsi_conn *conn) { struct isert_conn *isert_conn = conn->context; - pr_debug("isert_free_conn: Starting \n"); + pr_debug("isert_wait_conn: Starting \n"); /* * Decrement post_send_buf_count for special case when called * from isert_do_control_comp() -> iscsit_logout_post_handler() @@ -2727,38 +2771,29 @@ static void isert_free_conn(struct iscsi_conn *conn) atomic_dec(&isert_conn->post_send_buf_count); if (isert_conn->conn_cm_id && isert_conn->state != ISER_CONN_DOWN) { - pr_debug("Calling rdma_disconnect from isert_free_conn\n"); + pr_debug("Calling rdma_disconnect from isert_wait_conn\n"); rdma_disconnect(isert_conn->conn_cm_id); } /* * Only wait for conn_wait_comp_err if the isert_conn made it * into full feature phase.. */ - if (isert_conn->state == ISER_CONN_UP) { - pr_debug("isert_free_conn: Before wait_event comp_err %d\n", - isert_conn->state); - mutex_unlock(&isert_conn->conn_mutex); - - wait_event(isert_conn->conn_wait_comp_err, - (isert_check_state(isert_conn, ISER_CONN_TERMINATING))); - - wait_event(isert_conn->conn_wait, - (isert_check_state(isert_conn, ISER_CONN_DOWN))); - - isert_put_conn(isert_conn); - return; - } if (isert_conn->state == ISER_CONN_INIT) { mutex_unlock(&isert_conn->conn_mutex); - isert_put_conn(isert_conn); return; } - pr_debug("isert_free_conn: wait_event conn_wait %d\n", - isert_conn->state); + if (isert_conn->state == ISER_CONN_UP) + isert_conn->state = ISER_CONN_TERMINATING; mutex_unlock(&isert_conn->conn_mutex); - wait_event(isert_conn->conn_wait, - (isert_check_state(isert_conn, ISER_CONN_DOWN))); + wait_for_completion(&isert_conn->conn_wait_comp_err); + + wait_for_completion(&isert_conn->conn_wait); +} + +static void isert_free_conn(struct iscsi_conn *conn) +{ + struct isert_conn *isert_conn = conn->context; isert_put_conn(isert_conn); } @@ -2771,6 +2806,7 @@ static struct iscsit_transport iser_target_transport = { .iscsit_setup_np = isert_setup_np, .iscsit_accept_np = isert_accept_np, .iscsit_free_np = isert_free_np, + .iscsit_wait_conn = isert_wait_conn, .iscsit_free_conn = isert_free_conn, .iscsit_get_login_rx = isert_get_login_rx, .iscsit_put_login_tx = isert_put_login_tx, diff --git a/drivers/infiniband/ulp/isert/ib_isert.h b/drivers/infiniband/ulp/isert/ib_isert.h index 708a069..f6ae7f5 100644 --- a/drivers/infiniband/ulp/isert/ib_isert.h +++ b/drivers/infiniband/ulp/isert/ib_isert.h @@ -6,6 +6,7 @@ #define ISERT_RDMA_LISTEN_BACKLOG 10 #define ISCSI_ISER_SG_TABLESIZE 256 +#define ISER_FASTREG_LI_WRID 0xffffffffffffffffULL enum isert_desc_type { ISCSI_TX_CONTROL, @@ -45,6 +46,7 @@ struct iser_tx_desc { struct isert_cmd *isert_cmd; struct llist_node *comp_llnode_batch; struct llist_node comp_llnode; + bool llnode_active; struct ib_send_wr send_wr; } __packed; @@ -116,8 +118,8 @@ struct isert_conn { struct isert_device *conn_device; struct work_struct conn_logout_work; struct mutex conn_mutex; - wait_queue_head_t conn_wait; - wait_queue_head_t conn_wait_comp_err; + struct completion conn_wait; + struct completion conn_wait_comp_err; struct kref conn_kref; struct list_head conn_fr_pool; int conn_fr_pool_size; @@ -126,7 +128,6 @@ struct isert_conn { #define ISERT_COMP_BATCH_COUNT 8 int conn_comp_batch; struct llist_head conn_comp_llist; - struct mutex conn_comp_mutex; }; #define ISERT_MAX_CQ 64 diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig index 9a06fe8..95ad936 100644 --- a/drivers/md/Kconfig +++ b/drivers/md/Kconfig @@ -254,16 +254,6 @@ config DM_THIN_PROVISIONING ---help--- Provides thin provisioning and snapshots that share a data store. -config DM_DEBUG_BLOCK_STACK_TRACING - boolean "Keep stack trace of persistent data block lock holders" - depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA - select STACKTRACE - ---help--- - Enable this for messages that may help debug problems with the - block manager locking used by thin provisioning and caching. - - If unsure, say N. - config DM_CACHE tristate "Cache target (EXPERIMENTAL)" depends on BLK_DEV_DM diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 1e018e9..0e385e4 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c @@ -872,7 +872,7 @@ static void mq_destroy(struct dm_cache_policy *p) { struct mq_policy *mq = to_mq_policy(p); - kfree(mq->table); + vfree(mq->table); epool_exit(&mq->cache_pool); epool_exit(&mq->pre_cache_pool); kfree(mq); @@ -1245,7 +1245,7 @@ static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); mq->hash_bits = ffs(mq->nr_buckets) - 1; - mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); + mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets); if (!mq->table) goto bad_alloc_table; diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 1af7014..074b9c8 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c @@ -979,12 +979,13 @@ static void issue_copy_real(struct dm_cache_migration *mg) int r; struct dm_io_region o_region, c_region; struct cache *cache = mg->cache; + sector_t cblock = from_cblock(mg->cblock); o_region.bdev = cache->origin_dev->bdev; o_region.count = cache->sectors_per_block; c_region.bdev = cache->cache_dev->bdev; - c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block; + c_region.sector = cblock * cache->sectors_per_block; c_region.count = cache->sectors_per_block; if (mg->writeback || mg->demote) { @@ -2464,20 +2465,18 @@ static int cache_map(struct dm_target *ti, struct bio *bio) bool discarded_block; struct dm_bio_prison_cell *cell; struct policy_result lookup_result; - struct per_bio_data *pb; + struct per_bio_data *pb = init_per_bio_data(bio, pb_data_size); - if (from_oblock(block) > from_oblock(cache->origin_blocks)) { + if (unlikely(from_oblock(block) >= from_oblock(cache->origin_blocks))) { /* * This can only occur if the io goes to a partial block at * the end of the origin device. We don't cache these. * Just remap to the origin and carry on. */ - remap_to_origin_clear_discard(cache, bio, block); + remap_to_origin(cache, bio); return DM_MAPIO_REMAPPED; } - pb = init_per_bio_data(bio, pb_data_size); - if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) { defer_bio(cache, bio); return DM_MAPIO_SUBMITTED; diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index afc3d01..d6e8817 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -546,6 +546,9 @@ static int read_exceptions(struct pstore *ps, r = insert_exceptions(ps, area, callback, callback_context, &full); + if (!full) + memcpy(ps->area, area, ps->store->chunk_size << SECTOR_SHIFT); + dm_bufio_release(bp); dm_bufio_forget(client, chunk); diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c index baa87ff..fb9efc8 100644 --- a/drivers/md/dm-thin-metadata.c +++ b/drivers/md/dm-thin-metadata.c @@ -76,7 +76,7 @@ #define THIN_SUPERBLOCK_MAGIC 27022010 #define THIN_SUPERBLOCK_LOCATION 0 -#define THIN_VERSION 1 +#define THIN_VERSION 2 #define THIN_METADATA_CACHE_SIZE 64 #define SECTOR_TO_BLOCK_SHIFT 3 @@ -1755,3 +1755,38 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, return r; } + +int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd) +{ + int r; + struct dm_block *sblock; + struct thin_disk_superblock *disk_super; + + down_write(&pmd->root_lock); + pmd->flags |= THIN_METADATA_NEEDS_CHECK_FLAG; + + r = superblock_lock(pmd, &sblock); + if (r) { + DMERR("couldn't read superblock"); + goto out; + } + + disk_super = dm_block_data(sblock); + disk_super->flags = cpu_to_le32(pmd->flags); + + dm_bm_unlock(sblock); +out: + up_write(&pmd->root_lock); + return r; +} + +bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd) +{ + bool needs_check; + + down_read(&pmd->root_lock); + needs_check = pmd->flags & THIN_METADATA_NEEDS_CHECK_FLAG; + up_read(&pmd->root_lock); + + return needs_check; +} diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h index 82ea384..e3c857d 100644 --- a/drivers/md/dm-thin-metadata.h +++ b/drivers/md/dm-thin-metadata.h @@ -25,6 +25,11 @@ /*----------------------------------------------------------------*/ +/* + * Thin metadata superblock flags. + */ +#define THIN_METADATA_NEEDS_CHECK_FLAG (1 << 0) + struct dm_pool_metadata; struct dm_thin_device; @@ -202,6 +207,12 @@ int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd, dm_sm_threshold_fn fn, void *context); +/* + * Updates the superblock immediately. + */ +int dm_pool_metadata_set_needs_check(struct dm_pool_metadata *pmd); +bool dm_pool_metadata_needs_check(struct dm_pool_metadata *pmd); + /*----------------------------------------------------------------*/ #endif diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 7e84bac..be70d38 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c @@ -130,10 +130,11 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b, struct dm_thin_new_mapping; /* - * The pool runs in 3 modes. Ordered in degraded order for comparisons. + * The pool runs in 4 modes. Ordered in degraded order for comparisons. */ enum pool_mode { PM_WRITE, /* metadata may be changed */ + PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */ PM_READ_ONLY, /* metadata may not be changed */ PM_FAIL, /* all I/O fails */ }; @@ -198,7 +199,6 @@ struct pool { }; static enum pool_mode get_pool_mode(struct pool *pool); -static void out_of_data_space(struct pool *pool); static void metadata_operation_failed(struct pool *pool, const char *op, int r); /* @@ -226,6 +226,7 @@ struct thin_c { struct pool *pool; struct dm_thin_device *td; + bool requeue_mode:1; }; /*----------------------------------------------------------------*/ @@ -369,14 +370,18 @@ struct dm_thin_endio_hook { struct dm_thin_new_mapping *overwrite_mapping; }; -static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) +static void requeue_bio_list(struct thin_c *tc, struct bio_list *master) { struct bio *bio; struct bio_list bios; + unsigned long flags; bio_list_init(&bios); + + spin_lock_irqsave(&tc->pool->lock, flags); bio_list_merge(&bios, master); bio_list_init(master); + spin_unlock_irqrestore(&tc->pool->lock, flags); while ((bio = bio_list_pop(&bios))) { struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); @@ -391,12 +396,26 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) static void requeue_io(struct thin_c *tc) { struct pool *pool = tc->pool; + + requeue_bio_list(tc, &pool->deferred_bios); + requeue_bio_list(tc, &pool->retry_on_resume_list); +} + +static void error_retry_list(struct pool *pool) +{ + struct bio *bio; unsigned long flags; + struct bio_list bios; + + bio_list_init(&bios); spin_lock_irqsave(&pool->lock, flags); - __requeue_bio_list(tc, &pool->deferred_bios); - __requeue_bio_list(tc, &pool->retry_on_resume_list); + bio_list_merge(&bios, &pool->retry_on_resume_list); + bio_list_init(&pool->retry_on_resume_list); spin_unlock_irqrestore(&pool->lock, flags); + + while ((bio = bio_list_pop(&bios))) + bio_io_error(bio); } /* @@ -925,13 +944,15 @@ static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) } } +static void set_pool_mode(struct pool *pool, enum pool_mode new_mode); + static int alloc_data_block(struct thin_c *tc, dm_block_t *result) { int r; dm_block_t free_blocks; struct pool *pool = tc->pool; - if (get_pool_mode(pool) != PM_WRITE) + if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) return -EINVAL; r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); @@ -958,7 +979,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result) } if (!free_blocks) { - out_of_data_space(pool); + set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); return -ENOSPC; } } @@ -988,15 +1009,32 @@ static void retry_on_resume(struct bio *bio) spin_unlock_irqrestore(&pool->lock, flags); } -static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) +static bool should_error_unserviceable_bio(struct pool *pool) { - /* - * When pool is read-only, no cell locking is needed because - * nothing is changing. - */ - WARN_ON_ONCE(get_pool_mode(pool) != PM_READ_ONLY); + enum pool_mode m = get_pool_mode(pool); + + switch (m) { + case PM_WRITE: + /* Shouldn't get here */ + DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); + return true; + + case PM_OUT_OF_DATA_SPACE: + return pool->pf.error_if_no_space; - if (pool->pf.error_if_no_space) + case PM_READ_ONLY: + case PM_FAIL: + return true; + default: + /* Shouldn't get here */ + DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); + return true; + } +} + +static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) +{ + if (should_error_unserviceable_bio(pool)) bio_io_error(bio); else retry_on_resume(bio); @@ -1007,11 +1045,20 @@ static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *c struct bio *bio; struct bio_list bios; + if (should_error_unserviceable_bio(pool)) { + cell_error(pool, cell); + return; + } + bio_list_init(&bios); cell_release(pool, cell, &bios); - while ((bio = bio_list_pop(&bios))) - handle_unserviceable_bio(pool, bio); + if (should_error_unserviceable_bio(pool)) + while ((bio = bio_list_pop(&bios))) + bio_io_error(bio); + else + while ((bio = bio_list_pop(&bios))) + retry_on_resume(bio); } static void process_discard(struct thin_c *tc, struct bio *bio) @@ -1296,6 +1343,11 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) } } +static void process_bio_success(struct thin_c *tc, struct bio *bio) +{ + bio_endio(bio, 0); +} + static void process_bio_fail(struct thin_c *tc, struct bio *bio) { bio_io_error(bio); @@ -1328,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool) struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); struct thin_c *tc = h->tc; + if (tc->requeue_mode) { + bio_endio(bio, DM_ENDIO_REQUEUE); + continue; + } + /* * If we've got no free new_mapping structs, and processing * this bio might require one, we pause until there are some @@ -1394,51 +1451,134 @@ static void do_waker(struct work_struct *ws) /*----------------------------------------------------------------*/ +struct noflush_work { + struct work_struct worker; + struct thin_c *tc; + + atomic_t complete; + wait_queue_head_t wait; +}; + +static void complete_noflush_work(struct noflush_work *w) +{ + atomic_set(&w->complete, 1); + wake_up(&w->wait); +} + +static void do_noflush_start(struct work_struct *ws) +{ + struct noflush_work *w = container_of(ws, struct noflush_work, worker); + w->tc->requeue_mode = true; + requeue_io(w->tc); + complete_noflush_work(w); +} + +static void do_noflush_stop(struct work_struct *ws) +{ + struct noflush_work *w = container_of(ws, struct noflush_work, worker); + w->tc->requeue_mode = false; + complete_noflush_work(w); +} + +static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *)) +{ + struct noflush_work w; + + INIT_WORK(&w.worker, fn); + w.tc = tc; + atomic_set(&w.complete, 0); + init_waitqueue_head(&w.wait); + + queue_work(tc->pool->wq, &w.worker); + + wait_event(w.wait, atomic_read(&w.complete)); +} + +/*----------------------------------------------------------------*/ + static enum pool_mode get_pool_mode(struct pool *pool) { return pool->pf.mode; } +static void notify_of_pool_mode_change(struct pool *pool, const char *new_mode) +{ + dm_table_event(pool->ti->table); + DMINFO("%s: switching pool to %s mode", + dm_device_name(pool->pool_md), new_mode); +} + static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) { - int r; - enum pool_mode old_mode = pool->pf.mode; + struct pool_c *pt = pool->ti->private; + bool needs_check = dm_pool_metadata_needs_check(pool->pmd); + enum pool_mode old_mode = get_pool_mode(pool); + + /* + * Never allow the pool to transition to PM_WRITE mode if user + * intervention is required to verify metadata and data consistency. + */ + if (new_mode == PM_WRITE && needs_check) { + DMERR("%s: unable to switch pool to write mode until repaired.", + dm_device_name(pool->pool_md)); + if (old_mode != new_mode) + new_mode = old_mode; + else + new_mode = PM_READ_ONLY; + } + /* + * If we were in PM_FAIL mode, rollback of metadata failed. We're + * not going to recover without a thin_repair. So we never let the + * pool move out of the old mode. + */ + if (old_mode == PM_FAIL) + new_mode = old_mode; switch (new_mode) { case PM_FAIL: if (old_mode != new_mode) - DMERR("%s: switching pool to failure mode", - dm_device_name(pool->pool_md)); + notify_of_pool_mode_change(pool, "failure"); dm_pool_metadata_read_only(pool->pmd); pool->process_bio = process_bio_fail; pool->process_discard = process_bio_fail; pool->process_prepared_mapping = process_prepared_mapping_fail; pool->process_prepared_discard = process_prepared_discard_fail; + + error_retry_list(pool); break; case PM_READ_ONLY: if (old_mode != new_mode) - DMERR("%s: switching pool to read-only mode", - dm_device_name(pool->pool_md)); - r = dm_pool_abort_metadata(pool->pmd); - if (r) { - DMERR("%s: aborting transaction failed", - dm_device_name(pool->pool_md)); - new_mode = PM_FAIL; - set_pool_mode(pool, new_mode); - } else { - dm_pool_metadata_read_only(pool->pmd); - pool->process_bio = process_bio_read_only; - pool->process_discard = process_discard; - pool->process_prepared_mapping = process_prepared_mapping_fail; - pool->process_prepared_discard = process_prepared_discard_passdown; - } + notify_of_pool_mode_change(pool, "read-only"); + dm_pool_metadata_read_only(pool->pmd); + pool->process_bio = process_bio_read_only; + pool->process_discard = process_bio_success; + pool->process_prepared_mapping = process_prepared_mapping_fail; + pool->process_prepared_discard = process_prepared_discard_passdown; + + error_retry_list(pool); + break; + + case PM_OUT_OF_DATA_SPACE: + /* + * Ideally we'd never hit this state; the low water mark + * would trigger userland to extend the pool before we + * completely run out of data space. However, many small + * IOs to unprovisioned space can consume data space at an + * alarming rate. Adjust your low water mark if you're + * frequently seeing this mode. + */ + if (old_mode != new_mode) + notify_of_pool_mode_change(pool, "out-of-data-space"); + pool->process_bio = process_bio_read_only; + pool->process_discard = process_discard; + pool->process_prepared_mapping = process_prepared_mapping; + pool->process_prepared_discard = process_prepared_discard_passdown; break; case PM_WRITE: if (old_mode != new_mode) - DMINFO("%s: switching pool to write mode", - dm_device_name(pool->pool_md)); + notify_of_pool_mode_change(pool, "write"); dm_pool_metadata_read_write(pool->pmd); pool->process_bio = process_bio; pool->process_discard = process_discard; @@ -1448,32 +1588,35 @@ static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) } pool->pf.mode = new_mode; + /* + * The pool mode may have changed, sync it so bind_control_target() + * doesn't cause an unexpected mode transition on resume. + */ + pt->adjusted_pf.mode = new_mode; } -/* - * Rather than calling set_pool_mode directly, use these which describe the - * reason for mode degradation. - */ -static void out_of_data_space(struct pool *pool) +static void abort_transaction(struct pool *pool) { - DMERR_LIMIT("%s: no free data space available.", - dm_device_name(pool->pool_md)); - set_pool_mode(pool, PM_READ_ONLY); + const char *dev_name = dm_device_name(pool->pool_md); + + DMERR_LIMIT("%s: aborting current metadata transaction", dev_name); + if (dm_pool_abort_metadata(pool->pmd)) { + DMERR("%s: failed to abort metadata transaction", dev_name); + set_pool_mode(pool, PM_FAIL); + } + + if (dm_pool_metadata_set_needs_check(pool->pmd)) { + DMERR("%s: failed to set 'needs_check' flag in metadata", dev_name); + set_pool_mode(pool, PM_FAIL); + } } static void metadata_operation_failed(struct pool *pool, const char *op, int r) { - dm_block_t free_blocks; - DMERR_LIMIT("%s: metadata operation '%s' failed: error = %d", dm_device_name(pool->pool_md), op, r); - if (r == -ENOSPC && - !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) && - !free_blocks) - DMERR_LIMIT("%s: no free metadata space available.", - dm_device_name(pool->pool_md)); - + abort_transaction(pool); set_pool_mode(pool, PM_READ_ONLY); } @@ -1524,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio) thin_hook_bio(tc, bio); + if (tc->requeue_mode) { + bio_endio(bio, DM_ENDIO_REQUEUE); + return DM_MAPIO_SUBMITTED; + } + if (get_pool_mode(tc->pool) == PM_FAIL) { bio_io_error(bio); return DM_MAPIO_SUBMITTED; @@ -1687,7 +1835,7 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) /* * We want to make sure that a pool in PM_FAIL mode is never upgraded. */ - enum pool_mode old_mode = pool->pf.mode; + enum pool_mode old_mode = get_pool_mode(pool); enum pool_mode new_mode = pt->adjusted_pf.mode; /* @@ -1701,16 +1849,6 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti) pool->pf = pt->adjusted_pf; pool->low_water_blocks = pt->low_water_blocks; - /* - * If we were in PM_FAIL mode, rollback of metadata failed. We're - * not going to recover without a thin_repair. So we never let the - * pool move out of the old mode. On the other hand a PM_READ_ONLY - * may have been due to a lack of metadata or data space, and may - * now work (ie. if the underlying devices have been resized). - */ - if (old_mode == PM_FAIL) - new_mode = old_mode; - set_pool_mode(pool, new_mode); return 0; @@ -2253,6 +2391,12 @@ static int maybe_resize_data_dev(struct dm_target *ti, bool *need_commit) return -EINVAL; } else if (data_size > sb_data_size) { + if (dm_pool_metadata_needs_check(pool->pmd)) { + DMERR("%s: unable to grow the data device until repaired.", + dm_device_name(pool->pool_md)); + return 0; + } + if (sb_data_size) DMINFO("%s: growing the data device from %llu to %llu blocks", dm_device_name(pool->pool_md), @@ -2294,6 +2438,12 @@ static int maybe_resize_metadata_dev(struct dm_target *ti, bool *need_commit) return -EINVAL; } else if (metadata_dev_size > sb_metadata_dev_size) { + if (dm_pool_metadata_needs_check(pool->pmd)) { + DMERR("%s: unable to grow the metadata device until repaired.", + dm_device_name(pool->pool_md)); + return 0; + } + warn_if_metadata_device_too_big(pool->md_dev); DMINFO("%s: growing the metadata device from %llu to %llu blocks", dm_device_name(pool->pool_md), @@ -2681,7 +2831,9 @@ static void pool_status(struct dm_target *ti, status_type_t type, else DMEMIT("- "); - if (pool->pf.mode == PM_READ_ONLY) + if (pool->pf.mode == PM_OUT_OF_DATA_SPACE) + DMEMIT("out_of_data_space "); + else if (pool->pf.mode == PM_READ_ONLY) DMEMIT("ro "); else DMEMIT("rw "); @@ -2795,7 +2947,7 @@ static struct target_type pool_target = { .name = "thin-pool", .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | DM_TARGET_IMMUTABLE, - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = pool_ctr, .dtr = pool_dtr, @@ -2997,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err) return 0; } -static void thin_postsuspend(struct dm_target *ti) +static void thin_presuspend(struct dm_target *ti) { + struct thin_c *tc = ti->private; + if (dm_noflush_suspending(ti)) - requeue_io((struct thin_c *)ti->private); + noflush_work(tc, do_noflush_start); +} + +static void thin_postsuspend(struct dm_target *ti) +{ + struct thin_c *tc = ti->private; + + /* + * The dm_noflush_suspending flag has been cleared by now, so + * unfortunately we must always run this. + */ + noflush_work(tc, do_noflush_stop); } /* @@ -3085,12 +3250,13 @@ static int thin_iterate_devices(struct dm_target *ti, static struct target_type thin_target = { .name = "thin", - .version = {1, 10, 0}, + .version = {1, 11, 0}, .module = THIS_MODULE, .ctr = thin_ctr, .dtr = thin_dtr, .map = thin_map, .end_io = thin_endio, + .presuspend = thin_presuspend, .postsuspend = thin_postsuspend, .status = thin_status, .iterate_devices = thin_iterate_devices, diff --git a/drivers/md/persistent-data/Kconfig b/drivers/md/persistent-data/Kconfig index 19b2687..0c2dec7 100644 --- a/drivers/md/persistent-data/Kconfig +++ b/drivers/md/persistent-data/Kconfig @@ -6,3 +6,13 @@ config DM_PERSISTENT_DATA ---help--- Library providing immutable on-disk data structure support for device-mapper targets such as the thin provisioning target. + +config DM_DEBUG_BLOCK_STACK_TRACING + boolean "Keep stack trace of persistent data block lock holders" + depends on STACKTRACE_SUPPORT && DM_PERSISTENT_DATA + select STACKTRACE + ---help--- + Enable this for messages that may help debug problems with the + block manager locking used by thin provisioning and caching. + + If unsure, say N. diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c index e9bdd46..786b689 100644 --- a/drivers/md/persistent-data/dm-space-map-metadata.c +++ b/drivers/md/persistent-data/dm-space-map-metadata.c @@ -91,6 +91,69 @@ struct block_op { dm_block_t block; }; +struct bop_ring_buffer { + unsigned begin; + unsigned end; + struct block_op bops[MAX_RECURSIVE_ALLOCATIONS + 1]; +}; + +static void brb_init(struct bop_ring_buffer *brb) +{ + brb->begin = 0; + brb->end = 0; +} + +static bool brb_empty(struct bop_ring_buffer *brb) +{ + return brb->begin == brb->end; +} + +static unsigned brb_next(struct bop_ring_buffer *brb, unsigned old) +{ + unsigned r = old + 1; + return (r >= (sizeof(brb->bops) / sizeof(*brb->bops))) ? 0 : r; +} + +static int brb_push(struct bop_ring_buffer *brb, + enum block_op_type type, dm_block_t b) +{ + struct block_op *bop; + unsigned next = brb_next(brb, brb->end); + + /* + * We don't allow the last bop to be filled, this way we can + * differentiate between full and empty. + */ + if (next == brb->begin) + return -ENOMEM; + + bop = brb->bops + brb->end; + bop->type = type; + bop->block = b; + + brb->end = next; + + return 0; +} + +static int brb_pop(struct bop_ring_buffer *brb, struct block_op *result) +{ + struct block_op *bop; + + if (brb_empty(brb)) + return -ENODATA; + + bop = brb->bops + brb->begin; + result->type = bop->type; + result->block = bop->block; + + brb->begin = brb_next(brb, brb->begin); + + return 0; +} + +/*----------------------------------------------------------------*/ + struct sm_metadata { struct dm_space_map sm; @@ -101,25 +164,20 @@ struct sm_metadata { unsigned recursion_count; unsigned allocated_this_transaction; - unsigned nr_uncommitted; - struct block_op uncommitted[MAX_RECURSIVE_ALLOCATIONS]; + struct bop_ring_buffer uncommitted; struct threshold threshold; }; static int add_bop(struct sm_metadata *smm, enum block_op_type type, dm_block_t b) { - struct block_op *op; + int r = brb_push(&smm->uncommitted, type, b); - if (smm->nr_uncommitted == MAX_RECURSIVE_ALLOCATIONS) { + if (r) { DMERR("too many recursive allocations"); return -ENOMEM; } - op = smm->uncommitted + smm->nr_uncommitted++; - op->type = type; - op->block = b; - return 0; } @@ -158,11 +216,17 @@ static int out(struct sm_metadata *smm) return -ENOMEM; } - if (smm->recursion_count == 1 && smm->nr_uncommitted) { - while (smm->nr_uncommitted && !r) { - smm->nr_uncommitted--; - r = commit_bop(smm, smm->uncommitted + - smm->nr_uncommitted); + if (smm->recursion_count == 1) { + while (!brb_empty(&smm->uncommitted)) { + struct block_op bop; + + r = brb_pop(&smm->uncommitted, &bop); + if (r) { + DMERR("bug in bop ring buffer"); + break; + } + + r = commit_bop(smm, &bop); if (r) break; } @@ -217,7 +281,8 @@ static int sm_metadata_get_nr_free(struct dm_space_map *sm, dm_block_t *count) static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, uint32_t *result) { - int r, i; + int r; + unsigned i; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); unsigned adjustment = 0; @@ -225,8 +290,10 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, * We may have some uncommitted adjustments to add. This list * should always be really short. */ - for (i = 0; i < smm->nr_uncommitted; i++) { - struct block_op *op = smm->uncommitted + i; + for (i = smm->uncommitted.begin; + i != smm->uncommitted.end; + i = brb_next(&smm->uncommitted, i)) { + struct block_op *op = smm->uncommitted.bops + i; if (op->block != b) continue; @@ -254,7 +321,8 @@ static int sm_metadata_get_count(struct dm_space_map *sm, dm_block_t b, static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, dm_block_t b, int *result) { - int r, i, adjustment = 0; + int r, adjustment = 0; + unsigned i; struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm); uint32_t rc; @@ -262,8 +330,11 @@ static int sm_metadata_count_is_more_than_one(struct dm_space_map *sm, * We may have some uncommitted adjustments to add. This list * should always be really short. */ - for (i = 0; i < smm->nr_uncommitted; i++) { - struct block_op *op = smm->uncommitted + i; + for (i = smm->uncommitted.begin; + i != smm->uncommitted.end; + i = brb_next(&smm->uncommitted, i)) { + + struct block_op *op = smm->uncommitted.bops + i; if (op->block != b) continue; @@ -671,7 +742,7 @@ int dm_sm_metadata_create(struct dm_space_map *sm, smm->begin = superblock + 1; smm->recursion_count = 0; smm->allocated_this_transaction = 0; - smm->nr_uncommitted = 0; + brb_init(&smm->uncommitted); threshold_init(&smm->threshold); memcpy(&smm->sm, &bootstrap_ops, sizeof(smm->sm)); @@ -715,7 +786,7 @@ int dm_sm_metadata_open(struct dm_space_map *sm, smm->begin = 0; smm->recursion_count = 0; smm->allocated_this_transaction = 0; - smm->nr_uncommitted = 0; + brb_init(&smm->uncommitted); threshold_init(&smm->threshold); memcpy(&smm->old_ll, &smm->ll, sizeof(smm->old_ll)); diff --git a/drivers/misc/sgi-xp/xpc_uv.c b/drivers/misc/sgi-xp/xpc_uv.c index b9e2000..95c8944 100644 --- a/drivers/misc/sgi-xp/xpc_uv.c +++ b/drivers/misc/sgi-xp/xpc_uv.c @@ -240,7 +240,7 @@ xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name, nid = cpu_to_node(cpu); page = alloc_pages_exact_node(nid, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, pg_order); if (page == NULL) { dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d " diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c index 060b611..9f69e81 100644 --- a/drivers/net/bonding/bond_alb.c +++ b/drivers/net/bonding/bond_alb.c @@ -722,7 +722,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon client_info->ntt = 0; } - if (!vlan_get_tag(skb, &client_info->vlan_id)) + if (vlan_get_tag(skb, &client_info->vlan_id)) client_info->vlan_id = 0; if (!client_info->assigned) { diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c index 22800bd..6e6b093 100644 --- a/drivers/net/bonding/bond_options.c +++ b/drivers/net/bonding/bond_options.c @@ -176,6 +176,7 @@ static const struct bond_opt_value bond_resend_igmp_tbl[] = { static const struct bond_opt_value bond_lp_interval_tbl[] = { { "minval", 1, BOND_VALFLAG_MIN | BOND_VALFLAG_DEFAULT}, { "maxval", INT_MAX, BOND_VALFLAG_MAX}, + { NULL, -1, 0}, }; static const struct bond_option bond_opts[] = { diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c index 2e42de2..a8efb18 100644 --- a/drivers/net/ethernet/broadcom/bnx2.c +++ b/drivers/net/ethernet/broadcom/bnx2.c @@ -2507,6 +2507,7 @@ bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent) bp->fw_wr_seq++; msg_data |= bp->fw_wr_seq; + bp->fw_last_msg = msg_data; bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data); @@ -4003,8 +4004,23 @@ bnx2_setup_wol(struct bnx2 *bp) wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL; } - if (!(bp->flags & BNX2_FLAG_NO_WOL)) - bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 1, 0); + if (!(bp->flags & BNX2_FLAG_NO_WOL)) { + u32 val; + + wol_msg |= BNX2_DRV_MSG_DATA_WAIT3; + if (bp->fw_last_msg || BNX2_CHIP(bp) != BNX2_CHIP_5709) { + bnx2_fw_sync(bp, wol_msg, 1, 0); + return; + } + /* Tell firmware not to power down the PHY yet, otherwise + * the chip will take a long time to respond to MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, + val | BNX2_PORT_FEATURE_ASF_ENABLED); + bnx2_fw_sync(bp, wol_msg, 1, 0); + bnx2_shmem_wr(bp, BNX2_PORT_FEATURE, val); + } } @@ -4036,9 +4052,22 @@ bnx2_set_power_state(struct bnx2 *bp, pci_power_t state) if (bp->wol) pci_set_power_state(bp->pdev, PCI_D3hot); - } else { - pci_set_power_state(bp->pdev, PCI_D3hot); + break; + + } + if (!bp->fw_last_msg && BNX2_CHIP(bp) == BNX2_CHIP_5709) { + u32 val; + + /* Tell firmware not to power down the PHY yet, + * otherwise the other port may not respond to + * MMIO reads. + */ + val = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION); + val &= ~BNX2_CONDITION_PM_STATE_MASK; + val |= BNX2_CONDITION_PM_STATE_UNPREP; + bnx2_shmem_wr(bp, BNX2_BC_STATE_CONDITION, val); } + pci_set_power_state(bp->pdev, PCI_D3hot); /* No more memory access after this point until * device is brought back to D0. diff --git a/drivers/net/ethernet/broadcom/bnx2.h b/drivers/net/ethernet/broadcom/bnx2.h index f1cf2c4..e341bc3 100644 --- a/drivers/net/ethernet/broadcom/bnx2.h +++ b/drivers/net/ethernet/broadcom/bnx2.h @@ -6900,6 +6900,7 @@ struct bnx2 { u16 fw_wr_seq; u16 fw_drv_pulse_wr_seq; + u32 fw_last_msg; int rx_max_ring; int rx_ring_size; @@ -7406,6 +7407,10 @@ struct bnx2_rv2p_fw_file { #define BNX2_CONDITION_MFW_RUN_NCSI 0x00006000 #define BNX2_CONDITION_MFW_RUN_NONE 0x0000e000 #define BNX2_CONDITION_MFW_RUN_MASK 0x0000e000 +#define BNX2_CONDITION_PM_STATE_MASK 0x00030000 +#define BNX2_CONDITION_PM_STATE_FULL 0x00030000 +#define BNX2_CONDITION_PM_STATE_PREP 0x00020000 +#define BNX2_CONDITION_PM_STATE_UNPREP 0x00010000 #define BNX2_BC_STATE_DEBUG_CMD 0x1dc #define BNX2_BC_STATE_BC_DBG_CMD_SIGNATURE 0x42440000 diff --git a/drivers/net/ethernet/brocade/bna/bfa_ioc.c b/drivers/net/ethernet/brocade/bna/bfa_ioc.c index 1803c39..354ae97 100644 --- a/drivers/net/ethernet/brocade/bna/bfa_ioc.c +++ b/drivers/net/ethernet/brocade/bna/bfa_ioc.c @@ -1704,7 +1704,7 @@ bfa_flash_sem_get(void __iomem *bar) while (!bfa_raw_sem_get(bar)) { if (--n <= 0) return BFA_STATUS_BADFLASH; - udelay(10000); + mdelay(10); } return BFA_STATUS_OK; } diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c index 3190d38..d0c38e0 100644 --- a/drivers/net/ethernet/cadence/macb.c +++ b/drivers/net/ethernet/cadence/macb.c @@ -632,11 +632,16 @@ static void gem_rx_refill(struct macb *bp) "Unable to allocate sk_buff\n"); break; } - bp->rx_skbuff[entry] = skb; /* now fill corresponding descriptor entry */ paddr = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buffer_size, DMA_FROM_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, paddr)) { + dev_kfree_skb(skb); + break; + } + + bp->rx_skbuff[entry] = skb; if (entry == RX_RING_SIZE - 1) paddr |= MACB_BIT(RX_WRAP); @@ -725,7 +730,7 @@ static int gem_rx(struct macb *bp, int budget) skb_put(skb, len); addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr)); dma_unmap_single(&bp->pdev->dev, addr, - len, DMA_FROM_DEVICE); + bp->rx_buffer_size, DMA_FROM_DEVICE); skb->protocol = eth_type_trans(skb, bp->dev); skb_checksum_none_assert(skb); @@ -1036,11 +1041,15 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) } entry = macb_tx_ring_wrap(bp->tx_head); - bp->tx_head++; netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry); mapping = dma_map_single(&bp->pdev->dev, skb->data, len, DMA_TO_DEVICE); + if (dma_mapping_error(&bp->pdev->dev, mapping)) { + kfree_skb(skb); + goto unlock; + } + bp->tx_head++; tx_skb = &bp->tx_skb[entry]; tx_skb->skb = skb; tx_skb->mapping = mapping; @@ -1066,6 +1075,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) netif_stop_queue(dev); +unlock: spin_unlock_irqrestore(&bp->lock, flags); return NETDEV_TX_OK; diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c index 479a7cb..03a3513 100644 --- a/drivers/net/ethernet/freescale/fec_main.c +++ b/drivers/net/ethernet/freescale/fec_main.c @@ -528,13 +528,6 @@ fec_restart(struct net_device *ndev, int duplex) /* Clear any outstanding interrupt. */ writel(0xffc00000, fep->hwp + FEC_IEVENT); - /* Setup multicast filter. */ - set_multicast_list(ndev); -#ifndef CONFIG_M5272 - writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); - writel(0, fep->hwp + FEC_HASH_TABLE_LOW); -#endif - /* Set maximum receive buffer size. */ writel(PKT_MAXBLR_SIZE, fep->hwp + FEC_R_BUFF_SIZE); @@ -655,6 +648,13 @@ fec_restart(struct net_device *ndev, int duplex) writel(rcntl, fep->hwp + FEC_R_CNTRL); + /* Setup multicast filter. */ + set_multicast_list(ndev); +#ifndef CONFIG_M5272 + writel(0, fep->hwp + FEC_HASH_TABLE_HIGH); + writel(0, fep->hwp + FEC_HASH_TABLE_LOW); +#endif + if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) { /* enable ENET endian swap */ ecntl |= (1 << 8); diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c index 4be9715..1fc8334 100644 --- a/drivers/net/ethernet/ibm/ibmveth.c +++ b/drivers/net/ethernet/ibm/ibmveth.c @@ -522,10 +522,21 @@ retry: return rc; } +static u64 ibmveth_encode_mac_addr(u8 *mac) +{ + int i; + u64 encoded = 0; + + for (i = 0; i < ETH_ALEN; i++) + encoded = (encoded << 8) | mac[i]; + + return encoded; +} + static int ibmveth_open(struct net_device *netdev) { struct ibmveth_adapter *adapter = netdev_priv(netdev); - u64 mac_address = 0; + u64 mac_address; int rxq_entries = 1; unsigned long lpar_rc; int rc; @@ -579,8 +590,7 @@ static int ibmveth_open(struct net_device *netdev) adapter->rx_queue.num_slots = rxq_entries; adapter->rx_queue.toggle = 1; - memcpy(&mac_address, netdev->dev_addr, netdev->addr_len); - mac_address = mac_address >> 16; + mac_address = ibmveth_encode_mac_addr(netdev->dev_addr); rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len; @@ -1183,8 +1193,8 @@ static void ibmveth_set_multicast_list(struct net_device *netdev) /* add the addresses to the filter table */ netdev_for_each_mc_addr(ha, netdev) { /* add the multicast address to the filter table */ - unsigned long mcast_addr = 0; - memcpy(((char *)&mcast_addr)+2, ha->addr, ETH_ALEN); + u64 mcast_addr; + mcast_addr = ibmveth_encode_mac_addr(ha->addr); lpar_rc = h_multicast_ctrl(adapter->vdev->unit_address, IbmVethMcastAddFilter, mcast_addr); @@ -1372,9 +1382,6 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) netif_napi_add(netdev, &adapter->napi, ibmveth_poll, 16); - adapter->mac_addr = 0; - memcpy(&adapter->mac_addr, mac_addr_p, ETH_ALEN); - netdev->irq = dev->irq; netdev->netdev_ops = &ibmveth_netdev_ops; netdev->ethtool_ops = &netdev_ethtool_ops; @@ -1383,7 +1390,7 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; netdev->features |= netdev->hw_features; - memcpy(netdev->dev_addr, &adapter->mac_addr, netdev->addr_len); + memcpy(netdev->dev_addr, mac_addr_p, ETH_ALEN); for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) { struct kobject *kobj = &adapter->rx_buff_pool[i].kobj; diff --git a/drivers/net/ethernet/ibm/ibmveth.h b/drivers/net/ethernet/ibm/ibmveth.h index 451ba79..1f37499 100644 --- a/drivers/net/ethernet/ibm/ibmveth.h +++ b/drivers/net/ethernet/ibm/ibmveth.h @@ -138,7 +138,6 @@ struct ibmveth_adapter { struct napi_struct napi; struct net_device_stats stats; unsigned int mcastFilterSize; - unsigned long mac_addr; void * buffer_list_addr; void * filter_list_addr; dma_addr_t buffer_list_dma; diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c index 3db5946..fa5ee71 100644 --- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c +++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c @@ -742,6 +742,14 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn, err = mlx4_en_uc_steer_add(priv, new_mac, &qpn, &entry->reg_id); + if (err) + return err; + if (priv->tunnel_reg_id) { + mlx4_flow_detach(priv->mdev->dev, priv->tunnel_reg_id); + priv->tunnel_reg_id = 0; + } + err = mlx4_en_tunnel_steer_add(priv, new_mac, qpn, + &priv->tunnel_reg_id); return err; } } @@ -1780,6 +1788,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach) mc_list[5] = priv->port; mlx4_multicast_detach(mdev->dev, &priv->rss_map.indir_qp, mc_list, MLX4_PROT_ETH, mclist->reg_id); + if (mclist->tunnel_reg_id) + mlx4_flow_detach(mdev->dev, mclist->tunnel_reg_id); } mlx4_en_clear_list(dev); list_for_each_entry_safe(mclist, tmp, &priv->curr_list, list) { diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c index 6e1ee21..d0d8dd8 100644 --- a/drivers/net/ethernet/mellanox/mlx4/fw.c +++ b/drivers/net/ethernet/mellanox/mlx4/fw.c @@ -129,13 +129,14 @@ static void dump_dev_cap_flags2(struct mlx4_dev *dev, u64 flags) [0] = "RSS support", [1] = "RSS Toeplitz Hash Function support", [2] = "RSS XOR Hash Function support", - [3] = "Device manage flow steering support", + [3] = "Device managed flow steering support", [4] = "Automatic MAC reassignment support", [5] = "Time stamping support", [6] = "VST (control vlan insertion/stripping) support", [7] = "FSM (MAC anti-spoofing) support", [8] = "Dynamic QP updates support", - [9] = "TCP/IP offloads/flow-steering for VXLAN support" + [9] = "Device managed flow steering IPoIB support", + [10] = "TCP/IP offloads/flow-steering for VXLAN support" }; int i; @@ -859,7 +860,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_CQ_TS_SUPPORT_OFFSET); /* For guests, disable vxlan tunneling */ - MLX4_GET(field, outbox, QUERY_DEV_CAP_VXLAN); + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_VXLAN); field &= 0xf7; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_VXLAN); @@ -869,7 +870,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET); /* For guests, disable mw type 2 */ - MLX4_GET(bmme_flags, outbox, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); + MLX4_GET(bmme_flags, outbox->buf, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); bmme_flags &= ~MLX4_BMME_FLAG_TYPE_2_WIN; MLX4_PUT(outbox->buf, bmme_flags, QUERY_DEV_CAP_BMME_FLAGS_OFFSET); @@ -883,7 +884,7 @@ int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave, } /* turn off ipoib managed steering for guests */ - MLX4_GET(field, outbox, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); + MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); field &= ~0x80; MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_FLOW_STEERING_IPOIB_OFFSET); diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c index 4c441aa..abd0b1d 100644 --- a/drivers/net/ethernet/mellanox/mlx4/main.c +++ b/drivers/net/ethernet/mellanox/mlx4/main.c @@ -149,6 +149,8 @@ struct mlx4_port_config { struct pci_dev *pdev; }; +static atomic_t pf_loading = ATOMIC_INIT(0); + int mlx4_check_port_params(struct mlx4_dev *dev, enum mlx4_port_type *port_type) { @@ -748,7 +750,7 @@ static void mlx4_request_modules(struct mlx4_dev *dev) has_eth_port = true; } - if (has_ib_port) + if (has_ib_port || (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE)) request_module_nowait(IB_DRV_NAME); if (has_eth_port) request_module_nowait(EN_DRV_NAME); @@ -1406,6 +1408,11 @@ static int mlx4_init_slave(struct mlx4_dev *dev) u32 slave_read; u32 cmd_channel_ver; + if (atomic_read(&pf_loading)) { + mlx4_warn(dev, "PF is not ready. Deferring probe\n"); + return -EPROBE_DEFER; + } + mutex_lock(&priv->cmd.slave_cmd_mutex); priv->cmd.max_cmds = 1; mlx4_warn(dev, "Sending reset\n"); @@ -2311,7 +2318,11 @@ static int __mlx4_init_one(struct pci_dev *pdev, int pci_dev_data) if (num_vfs) { mlx4_warn(dev, "Enabling SR-IOV with %d VFs\n", num_vfs); + + atomic_inc(&pf_loading); err = pci_enable_sriov(pdev, num_vfs); + atomic_dec(&pf_loading); + if (err) { mlx4_err(dev, "Failed to enable SR-IOV, continuing without SR-IOV (err = %d).\n", err); @@ -2676,6 +2687,7 @@ static struct pci_driver mlx4_driver = { .name = DRV_NAME, .id_table = mlx4_pci_table, .probe = mlx4_init_one, + .shutdown = mlx4_remove_one, .remove = mlx4_remove_one, .err_handler = &mlx4_err_handler, }; diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index cf94733..90c14d1 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c @@ -209,7 +209,7 @@ static const struct { [RTL_GIGA_MAC_VER_16] = _R("RTL8101e", RTL_TD_0, NULL, JUMBO_1K, true), [RTL_GIGA_MAC_VER_17] = - _R("RTL8168b/8111b", RTL_TD_1, NULL, JUMBO_4K, false), + _R("RTL8168b/8111b", RTL_TD_0, NULL, JUMBO_4K, false), [RTL_GIGA_MAC_VER_18] = _R("RTL8168cp/8111cp", RTL_TD_1, NULL, JUMBO_6K, false), [RTL_GIGA_MAC_VER_19] = diff --git a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c index 72d282b..c553f6b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/chain_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/chain_mode.c @@ -151,7 +151,7 @@ static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) sizeof(struct dma_desc))); } -const struct stmmac_chain_mode_ops chain_mode_ops = { +const struct stmmac_mode_ops chain_mode_ops = { .init = stmmac_init_dma_chain, .is_jumbo_frm = stmmac_is_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm, diff --git a/drivers/net/ethernet/stmicro/stmmac/common.h b/drivers/net/ethernet/stmicro/stmmac/common.h index 7834a39..74610f3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/common.h +++ b/drivers/net/ethernet/stmicro/stmmac/common.h @@ -419,20 +419,13 @@ struct mii_regs { unsigned int data; /* MII Data */ }; -struct stmmac_ring_mode_ops { - unsigned int (*is_jumbo_frm) (int len, int ehn_desc); - unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); - void (*refill_desc3) (void *priv, struct dma_desc *p); - void (*init_desc3) (struct dma_desc *p); - void (*clean_desc3) (void *priv, struct dma_desc *p); - int (*set_16kib_bfsize) (int mtu); -}; - -struct stmmac_chain_mode_ops { +struct stmmac_mode_ops { void (*init) (void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc); unsigned int (*is_jumbo_frm) (int len, int ehn_desc); unsigned int (*jumbo_frm) (void *priv, struct sk_buff *skb, int csum); + int (*set_16kib_bfsize)(int mtu); + void (*init_desc3)(struct dma_desc *p); void (*refill_desc3) (void *priv, struct dma_desc *p); void (*clean_desc3) (void *priv, struct dma_desc *p); }; @@ -441,8 +434,7 @@ struct mac_device_info { const struct stmmac_ops *mac; const struct stmmac_desc_ops *desc; const struct stmmac_dma_ops *dma; - const struct stmmac_ring_mode_ops *ring; - const struct stmmac_chain_mode_ops *chain; + const struct stmmac_mode_ops *mode; const struct stmmac_hwtimestamp *ptp; struct mii_regs mii; /* MII register Addresses */ struct mac_link link; @@ -460,7 +452,7 @@ void stmmac_get_mac_addr(void __iomem *ioaddr, unsigned char *addr, void stmmac_set_mac(void __iomem *ioaddr, bool enable); void dwmac_dma_flush_tx_fifo(void __iomem *ioaddr); -extern const struct stmmac_ring_mode_ops ring_mode_ops; -extern const struct stmmac_chain_mode_ops chain_mode_ops; +extern const struct stmmac_mode_ops ring_mode_ops; +extern const struct stmmac_mode_ops chain_mode_ops; #endif /* __COMMON_H__ */ diff --git a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c index a96c7c2..650a4be 100644 --- a/drivers/net/ethernet/stmicro/stmmac/ring_mode.c +++ b/drivers/net/ethernet/stmicro/stmmac/ring_mode.c @@ -100,10 +100,9 @@ static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; - if (unlikely(priv->plat->has_gmac)) - /* Fill DES3 in case of RING mode */ - if (priv->dma_buf_sz >= BUF_SIZE_8KiB) - p->des3 = p->des2 + BUF_SIZE_8KiB; + /* Fill DES3 in case of RING mode */ + if (priv->dma_buf_sz >= BUF_SIZE_8KiB) + p->des3 = p->des2 + BUF_SIZE_8KiB; } /* In ring mode we need to fill the desc3 because it is used as buffer */ @@ -126,7 +125,7 @@ static int stmmac_set_16kib_bfsize(int mtu) return ret; } -const struct stmmac_ring_mode_ops ring_mode_ops = { +const struct stmmac_mode_ops ring_mode_ops = { .is_jumbo_frm = stmmac_is_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm, .refill_desc3 = stmmac_refill_desc3, diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 078ad0e..8543e1c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -92,8 +92,8 @@ static int tc = TC_DEFAULT; module_param(tc, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(tc, "DMA threshold control value"); -#define DMA_BUFFER_SIZE BUF_SIZE_4KiB -static int buf_sz = DMA_BUFFER_SIZE; +#define DEFAULT_BUFSIZE 1536 +static int buf_sz = DEFAULT_BUFSIZE; module_param(buf_sz, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(buf_sz, "DMA buffer size"); @@ -136,8 +136,8 @@ static void stmmac_verify_args(void) dma_rxsize = DMA_RX_SIZE; if (unlikely(dma_txsize < 0)) dma_txsize = DMA_TX_SIZE; - if (unlikely((buf_sz < DMA_BUFFER_SIZE) || (buf_sz > BUF_SIZE_16KiB))) - buf_sz = DMA_BUFFER_SIZE; + if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB))) + buf_sz = DEFAULT_BUFSIZE; if (unlikely(flow_ctrl > 1)) flow_ctrl = FLOW_AUTO; else if (likely(flow_ctrl < 0)) @@ -286,10 +286,25 @@ bool stmmac_eee_init(struct stmmac_priv *priv) /* MAC core supports the EEE feature. */ if (priv->dma_cap.eee) { + int tx_lpi_timer = priv->tx_lpi_timer; + /* Check if the PHY supports EEE */ - if (phy_init_eee(priv->phydev, 1)) + if (phy_init_eee(priv->phydev, 1)) { + /* To manage at run-time if the EEE cannot be supported + * anymore (for example because the lp caps have been + * changed). + * In that case the driver disable own timers. + */ + if (priv->eee_active) { + pr_debug("stmmac: disable EEE\n"); + del_timer_sync(&priv->eee_ctrl_timer); + priv->hw->mac->set_eee_timer(priv->ioaddr, 0, + tx_lpi_timer); + } + priv->eee_active = 0; goto out; - + } + /* Activate the EEE and start timers */ if (!priv->eee_active) { priv->eee_active = 1; init_timer(&priv->eee_ctrl_timer); @@ -300,13 +315,13 @@ bool stmmac_eee_init(struct stmmac_priv *priv) priv->hw->mac->set_eee_timer(priv->ioaddr, STMMAC_DEFAULT_LIT_LS, - priv->tx_lpi_timer); + tx_lpi_timer); } else /* Set HW EEE according to the speed */ priv->hw->mac->set_eee_pls(priv->ioaddr, priv->phydev->link); - pr_info("stmmac: Energy-Efficient Ethernet initialized\n"); + pr_debug("stmmac: Energy-Efficient Ethernet initialized\n"); ret = true; } @@ -886,10 +901,10 @@ static int stmmac_set_bfsize(int mtu, int bufsize) ret = BUF_SIZE_8KiB; else if (mtu >= BUF_SIZE_2KiB) ret = BUF_SIZE_4KiB; - else if (mtu >= DMA_BUFFER_SIZE) + else if (mtu > DEFAULT_BUFSIZE) ret = BUF_SIZE_2KiB; else - ret = DMA_BUFFER_SIZE; + ret = DEFAULT_BUFSIZE; return ret; } @@ -951,9 +966,9 @@ static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, p->des2 = priv->rx_skbuff_dma[i]; - if ((priv->mode == STMMAC_RING_MODE) && + if ((priv->hw->mode->init_desc3) && (priv->dma_buf_sz == BUF_SIZE_16KiB)) - priv->hw->ring->init_desc3(p); + priv->hw->mode->init_desc3(p); return 0; } @@ -984,11 +999,8 @@ static int init_dma_desc_rings(struct net_device *dev) unsigned int bfsize = 0; int ret = -ENOMEM; - /* Set the max buffer size according to the DESC mode - * and the MTU. Note that RING mode allows 16KiB bsize. - */ - if (priv->mode == STMMAC_RING_MODE) - bfsize = priv->hw->ring->set_16kib_bfsize(dev->mtu); + if (priv->hw->mode->set_16kib_bfsize) + bfsize = priv->hw->mode->set_16kib_bfsize(dev->mtu); if (bfsize < BUF_SIZE_16KiB) bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); @@ -1029,15 +1041,15 @@ static int init_dma_desc_rings(struct net_device *dev) /* Setup the chained descriptor addresses */ if (priv->mode == STMMAC_CHAIN_MODE) { if (priv->extend_desc) { - priv->hw->chain->init(priv->dma_erx, priv->dma_rx_phy, - rxsize, 1); - priv->hw->chain->init(priv->dma_etx, priv->dma_tx_phy, - txsize, 1); + priv->hw->mode->init(priv->dma_erx, priv->dma_rx_phy, + rxsize, 1); + priv->hw->mode->init(priv->dma_etx, priv->dma_tx_phy, + txsize, 1); } else { - priv->hw->chain->init(priv->dma_rx, priv->dma_rx_phy, - rxsize, 0); - priv->hw->chain->init(priv->dma_tx, priv->dma_tx_phy, - txsize, 0); + priv->hw->mode->init(priv->dma_rx, priv->dma_rx_phy, + rxsize, 0); + priv->hw->mode->init(priv->dma_tx, priv->dma_tx_phy, + txsize, 0); } } @@ -1288,7 +1300,7 @@ static void stmmac_tx_clean(struct stmmac_priv *priv) DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = 0; } - priv->hw->ring->clean_desc3(priv, p); + priv->hw->mode->clean_desc3(priv, p); if (likely(skb != NULL)) { dev_kfree_skb(skb); @@ -1844,6 +1856,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) int nfrags = skb_shinfo(skb)->nr_frags; struct dma_desc *desc, *first; unsigned int nopaged_len = skb_headlen(skb); + unsigned int enh_desc = priv->plat->enh_desc; if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { if (!netif_queue_stopped(dev)) { @@ -1871,27 +1884,19 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev) first = desc; /* To program the descriptors according to the size of the frame */ - if (priv->mode == STMMAC_RING_MODE) { - is_jumbo = priv->hw->ring->is_jumbo_frm(skb->len, - priv->plat->enh_desc); - if (unlikely(is_jumbo)) - entry = priv->hw->ring->jumbo_frm(priv, skb, - csum_insertion); - } else { - is_jumbo = priv->hw->chain->is_jumbo_frm(skb->len, - priv->plat->enh_desc); - if (unlikely(is_jumbo)) - entry = priv->hw->chain->jumbo_frm(priv, skb, - csum_insertion); - } + if (enh_desc) + is_jumbo = priv->hw->mode->is_jumbo_frm(skb->len, enh_desc); + if (likely(!is_jumbo)) { desc->des2 = dma_map_single(priv->device, skb->data, nopaged_len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, nopaged_len, csum_insertion, priv->mode); - } else + } else { desc = first; + entry = priv->hw->mode->jumbo_frm(priv, skb, csum_insertion); + } for (i = 0; i < nfrags; i++) { const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; @@ -2029,7 +2034,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv) p->des2 = priv->rx_skbuff_dma[entry]; - priv->hw->ring->refill_desc3(priv, p); + priv->hw->mode->refill_desc3(priv, p); if (netif_msg_rx_status(priv)) pr_debug("\trefill entry #%d\n", entry); @@ -2633,11 +2638,11 @@ static int stmmac_hw_init(struct stmmac_priv *priv) /* To use the chained or ring mode */ if (chain_mode) { - priv->hw->chain = &chain_mode_ops; + priv->hw->mode = &chain_mode_ops; pr_info(" Chain mode enabled\n"); priv->mode = STMMAC_CHAIN_MODE; } else { - priv->hw->ring = &ring_mode_ops; + priv->hw->mode = &ring_mode_ops; pr_info(" Ring mode enabled\n"); priv->mode = STMMAC_RING_MODE; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c index c61bc72b..8fb32a8 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c @@ -36,7 +36,7 @@ static const struct of_device_id stmmac_dt_ids[] = { #ifdef CONFIG_DWMAC_STI { .compatible = "st,stih415-dwmac", .data = &sti_gmac_data}, { .compatible = "st,stih416-dwmac", .data = &sti_gmac_data}, - { .compatible = "st,stih127-dwmac", .data = &sti_gmac_data}, + { .compatible = "st,stid127-dwmac", .data = &sti_gmac_data}, #endif /* SoC specific glue layers should come before generic bindings */ { .compatible = "st,spear600-gmac"}, diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 3d06990..4e4cf9e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -674,6 +674,8 @@ static int netvsc_probe(struct hv_device *dev, if (!net) return -ENOMEM; + netif_carrier_off(net); + net_device_ctx = netdev_priv(net); net_device_ctx->device_ctx = dev; hv_set_drvdata(dev, net); @@ -706,6 +708,8 @@ static int netvsc_probe(struct hv_device *dev, pr_err("Unable to register netdev.\n"); rndis_filter_device_remove(dev); free_netdev(net); + } else { + schedule_delayed_work(&net_device_ctx->dwork, 0); } return ret; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index 54553df..4a37e3d 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -240,6 +240,22 @@ static int rndis_filter_send_request(struct rndis_device *dev, return ret; } +static void rndis_set_link_state(struct rndis_device *rdev, + struct rndis_request *request) +{ + u32 link_status; + struct rndis_query_complete *query_complete; + + query_complete = &request->response_msg.msg.query_complete; + + if (query_complete->status == RNDIS_STATUS_SUCCESS && + query_complete->info_buflen == sizeof(u32)) { + memcpy(&link_status, (void *)((unsigned long)query_complete + + query_complete->info_buf_offset), sizeof(u32)); + rdev->link_state = link_status != 0; + } +} + static void rndis_filter_receive_response(struct rndis_device *dev, struct rndis_message *resp) { @@ -269,6 +285,10 @@ static void rndis_filter_receive_response(struct rndis_device *dev, sizeof(struct rndis_message) + RNDIS_EXT_LEN) { memcpy(&request->response_msg, resp, resp->msg_len); + if (request->request_msg.ndis_msg_type == + RNDIS_MSG_QUERY && request->request_msg.msg. + query_req.oid == RNDIS_OID_GEN_MEDIA_CONNECT_STATUS) + rndis_set_link_state(dev, request); } else { netdev_err(ndev, "rndis response buffer overflow " @@ -674,7 +694,6 @@ static int rndis_filter_query_device_link_status(struct rndis_device *dev) ret = rndis_filter_query_device(dev, RNDIS_OID_GEN_MEDIA_CONNECT_STATUS, &link_status, &size); - dev->link_state = (link_status != 0) ? true : false; return ret; } diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 934a12c..ae38a98 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -655,12 +655,12 @@ at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb) int rc; unsigned long flags; - spin_lock(&lp->lock); + spin_lock_irqsave(&lp->lock, flags); if (lp->irq_busy) { - spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); return -EBUSY; } - spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); might_sleep(); @@ -944,10 +944,11 @@ static void at86rf230_irqwork_level(struct work_struct *work) static irqreturn_t at86rf230_isr(int irq, void *data) { struct at86rf230_local *lp = data; + unsigned long flags; - spin_lock(&lp->lock); + spin_lock_irqsave(&lp->lock, flags); lp->irq_busy = 1; - spin_unlock(&lp->lock); + spin_unlock_irqrestore(&lp->lock, flags); schedule_work(&lp->irqwork); diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 643b5d6..1d788f1 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -186,9 +186,9 @@ static const struct phy_setting settings[] = { * of that setting. Returns the index of the last setting if * none of the others match. */ -static inline int phy_find_setting(int speed, int duplex) +static inline unsigned int phy_find_setting(int speed, int duplex) { - int idx = 0; + unsigned int idx = 0; while (idx < ARRAY_SIZE(settings) && (settings[idx].speed != speed || settings[idx].duplex != duplex)) @@ -207,7 +207,7 @@ static inline int phy_find_setting(int speed, int duplex) * the mask in features. Returns the index of the last setting * if nothing else matches. */ -static inline int phy_find_valid(int idx, u32 features) +static inline unsigned int phy_find_valid(unsigned int idx, u32 features) { while (idx < MAX_NUM_SETTINGS && !(settings[idx].setting & features)) idx++; @@ -226,7 +226,7 @@ static inline int phy_find_valid(int idx, u32 features) static void phy_sanitize_settings(struct phy_device *phydev) { u32 features = phydev->supported; - int idx; + unsigned int idx; /* Sanitize settings based on PHY capabilities */ if ((features & SUPPORTED_Autoneg) == 0) @@ -979,7 +979,8 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable) (phydev->interface == PHY_INTERFACE_MODE_RGMII))) { int eee_lp, eee_cap, eee_adv; u32 lp, cap, adv; - int idx, status; + int status; + unsigned int idx; /* Read phy status to properly get the right settings */ status = phy_read_status(phydev); diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile index 433f0a0..e2797f1 100644 --- a/drivers/net/usb/Makefile +++ b/drivers/net/usb/Makefile @@ -11,7 +11,7 @@ obj-$(CONFIG_USB_HSO) += hso.o obj-$(CONFIG_USB_NET_AX8817X) += asix.o asix-y := asix_devices.o asix_common.o ax88172a.o obj-$(CONFIG_USB_NET_AX88179_178A) += ax88179_178a.o -obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o r815x.o +obj-$(CONFIG_USB_NET_CDCETHER) += cdc_ether.o obj-$(CONFIG_USB_NET_CDC_EEM) += cdc_eem.o obj-$(CONFIG_USB_NET_DM9601) += dm9601.o obj-$(CONFIG_USB_NET_SR9700) += sr9700.o diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c index d2e6fdb..054e59c 100644 --- a/drivers/net/usb/ax88179_178a.c +++ b/drivers/net/usb/ax88179_178a.c @@ -1029,20 +1029,12 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf) dev->mii.phy_id = 0x03; dev->mii.supports_gmii = 1; - if (usb_device_no_sg_constraint(dev->udev)) - dev->can_dma_sg = 1; - dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; dev->net->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM; - if (dev->can_dma_sg) { - dev->net->features |= NETIF_F_SG | NETIF_F_TSO; - dev->net->hw_features |= NETIF_F_SG | NETIF_F_TSO; - } - /* Enable checksum offload */ *tmp = AX_RXCOE_IP | AX_RXCOE_TCP | AX_RXCOE_UDP | AX_RXCOE_TCPV6 | AX_RXCOE_UDPV6; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 42e1769..bd363b2 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -652,6 +652,13 @@ static const struct usb_device_id products[] = { .driver_info = 0, }, +/* Samsung USB Ethernet Adapters */ +{ + USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, 0xa101, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = 0, +}, + /* WHITELIST!!! * * CDC Ether uses two interfaces, not necessarily consecutive. diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c index aa1d5b2..18e12a3 100644 --- a/drivers/net/usb/r8152.c +++ b/drivers/net/usb/r8152.c @@ -3367,6 +3367,12 @@ static int rtl8152_probe(struct usb_interface *intf, struct net_device *netdev; int ret; + if (udev->actconfig->desc.bConfigurationValue != 1) { + usb_driver_set_configuration(udev, 1); + return -ENODEV; + } + + usb_reset_device(udev); netdev = alloc_etherdev(sizeof(struct r8152)); if (!netdev) { dev_err(&intf->dev, "Out of memory\n"); @@ -3456,9 +3462,9 @@ static void rtl8152_disconnect(struct usb_interface *intf) /* table of devices that work with this driver */ static struct usb_device_id rtl8152_table[] = { - {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, - {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, - {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, + {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8152)}, + {USB_DEVICE(VENDOR_ID_REALTEK, PRODUCT_ID_RTL8153)}, + {USB_DEVICE(VENDOR_ID_SAMSUNG, PRODUCT_ID_SAMSUNG)}, {} }; diff --git a/drivers/net/usb/r815x.c b/drivers/net/usb/r815x.c deleted file mode 100644 index f0a8791..0000000 --- a/drivers/net/usb/r815x.c +++ /dev/null @@ -1,248 +0,0 @@ -#include <linux/module.h> -#include <linux/netdevice.h> -#include <linux/mii.h> -#include <linux/usb.h> -#include <linux/usb/cdc.h> -#include <linux/usb/usbnet.h> - -#define RTL815x_REQT_READ 0xc0 -#define RTL815x_REQT_WRITE 0x40 -#define RTL815x_REQ_GET_REGS 0x05 -#define RTL815x_REQ_SET_REGS 0x05 - -#define MCU_TYPE_PLA 0x0100 -#define OCP_BASE 0xe86c -#define BASE_MII 0xa400 - -#define BYTE_EN_DWORD 0xff -#define BYTE_EN_WORD 0x33 -#define BYTE_EN_BYTE 0x11 - -#define R815x_PHY_ID 32 -#define REALTEK_VENDOR_ID 0x0bda - - -static int pla_read_word(struct usb_device *udev, u16 index) -{ - int ret; - u8 shift = index & 2; - __le32 *tmp; - - tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - index &= ~3; - - ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, - index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); - if (ret < 0) - goto out2; - - ret = __le32_to_cpu(*tmp); - ret >>= (shift * 8); - ret &= 0xffff; - -out2: - kfree(tmp); - return ret; -} - -static int pla_write_word(struct usb_device *udev, u16 index, u32 data) -{ - __le32 *tmp; - u32 mask = 0xffff; - u16 byen = BYTE_EN_WORD; - u8 shift = index & 2; - int ret; - - tmp = kmalloc(sizeof(*tmp), GFP_KERNEL); - if (!tmp) - return -ENOMEM; - - data &= mask; - - if (shift) { - byen <<= shift; - mask <<= (shift * 8); - data <<= (shift * 8); - index &= ~3; - } - - ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), - RTL815x_REQ_GET_REGS, RTL815x_REQT_READ, - index, MCU_TYPE_PLA, tmp, sizeof(*tmp), 500); - if (ret < 0) - goto out3; - - data |= __le32_to_cpu(*tmp) & ~mask; - *tmp = __cpu_to_le32(data); - - ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), - RTL815x_REQ_SET_REGS, RTL815x_REQT_WRITE, - index, MCU_TYPE_PLA | byen, tmp, sizeof(*tmp), - 500); - -out3: - kfree(tmp); - return ret; -} - -static int ocp_reg_read(struct usbnet *dev, u16 addr) -{ - u16 ocp_base, ocp_index; - int ret; - - ocp_base = addr & 0xf000; - ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); - if (ret < 0) - goto out; - - ocp_index = (addr & 0x0fff) | 0xb000; - ret = pla_read_word(dev->udev, ocp_index); - -out: - return ret; -} - -static int ocp_reg_write(struct usbnet *dev, u16 addr, u16 data) -{ - u16 ocp_base, ocp_index; - int ret; - - ocp_base = addr & 0xf000; - ret = pla_write_word(dev->udev, OCP_BASE, ocp_base); - if (ret < 0) - goto out1; - - ocp_index = (addr & 0x0fff) | 0xb000; - ret = pla_write_word(dev->udev, ocp_index, data); - -out1: - return ret; -} - -static int r815x_mdio_read(struct net_device *netdev, int phy_id, int reg) -{ - struct usbnet *dev = netdev_priv(netdev); - int ret; - - if (phy_id != R815x_PHY_ID) - return -EINVAL; - - if (usb_autopm_get_interface(dev->intf) < 0) - return -ENODEV; - - ret = ocp_reg_read(dev, BASE_MII + reg * 2); - - usb_autopm_put_interface(dev->intf); - return ret; -} - -static -void r815x_mdio_write(struct net_device *netdev, int phy_id, int reg, int val) -{ - struct usbnet *dev = netdev_priv(netdev); - - if (phy_id != R815x_PHY_ID) - return; - - if (usb_autopm_get_interface(dev->intf) < 0) - return; - - ocp_reg_write(dev, BASE_MII + reg * 2, val); - - usb_autopm_put_interface(dev->intf); -} - -static int r8153_bind(struct usbnet *dev, struct usb_interface *intf) -{ - int status; - - status = usbnet_cdc_bind(dev, intf); - if (status < 0) - return status; - - dev->mii.dev = dev->net; - dev->mii.mdio_read = r815x_mdio_read; - dev->mii.mdio_write = r815x_mdio_write; - dev->mii.phy_id_mask = 0x3f; - dev->mii.reg_num_mask = 0x1f; - dev->mii.phy_id = R815x_PHY_ID; - dev->mii.supports_gmii = 1; - - return status; -} - -static int r8152_bind(struct usbnet *dev, struct usb_interface *intf) -{ - int status; - - status = usbnet_cdc_bind(dev, intf); - if (status < 0) - return status; - - dev->mii.dev = dev->net; - dev->mii.mdio_read = r815x_mdio_read; - dev->mii.mdio_write = r815x_mdio_write; - dev->mii.phy_id_mask = 0x3f; - dev->mii.reg_num_mask = 0x1f; - dev->mii.phy_id = R815x_PHY_ID; - dev->mii.supports_gmii = 0; - - return status; -} - -static const struct driver_info r8152_info = { - .description = "RTL8152 ECM Device", - .flags = FLAG_ETHER | FLAG_POINTTOPOINT, - .bind = r8152_bind, - .unbind = usbnet_cdc_unbind, - .status = usbnet_cdc_status, - .manage_power = usbnet_manage_power, -}; - -static const struct driver_info r8153_info = { - .description = "RTL8153 ECM Device", - .flags = FLAG_ETHER | FLAG_POINTTOPOINT, - .bind = r8153_bind, - .unbind = usbnet_cdc_unbind, - .status = usbnet_cdc_status, - .manage_power = usbnet_manage_power, -}; - -static const struct usb_device_id products[] = { -{ - USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8152, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = (unsigned long) &r8152_info, -}, - -{ - USB_DEVICE_AND_INTERFACE_INFO(REALTEK_VENDOR_ID, 0x8153, USB_CLASS_COMM, - USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), - .driver_info = (unsigned long) &r8153_info, -}, - - { }, /* END */ -}; -MODULE_DEVICE_TABLE(usb, products); - -static struct usb_driver r815x_driver = { - .name = "r815x", - .id_table = products, - .probe = usbnet_probe, - .disconnect = usbnet_disconnect, - .suspend = usbnet_suspend, - .resume = usbnet_resume, - .reset_resume = usbnet_resume, - .supports_autosuspend = 1, - .disable_hub_initiated_lpm = 1, -}; - -module_usb_driver(r815x_driver); - -MODULE_AUTHOR("Hayes Wang"); -MODULE_DESCRIPTION("Realtek USB ECM device"); -MODULE_LICENSE("GPL"); diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c index 9275c8c..28965ad 100644 --- a/drivers/net/vmxnet3/vmxnet3_drv.c +++ b/drivers/net/vmxnet3/vmxnet3_drv.c @@ -1762,11 +1762,20 @@ vmxnet3_netpoll(struct net_device *netdev) { struct vmxnet3_adapter *adapter = netdev_priv(netdev); - if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) - vmxnet3_disable_all_intrs(adapter); - - vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size); - vmxnet3_enable_all_intrs(adapter); + switch (adapter->intr.type) { +#ifdef CONFIG_PCI_MSI + case VMXNET3_IT_MSIX: { + int i; + for (i = 0; i < adapter->num_rx_queues; i++) + vmxnet3_msix_rx(0, &adapter->rx_queue[i]); + break; + } +#endif + case VMXNET3_IT_MSI: + default: + vmxnet3_intr(0, adapter->netdev); + break; + } } #endif /* CONFIG_NET_POLL_CONTROLLER */ diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c index 38a54a3..2aa3ee9 100644 --- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c +++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c @@ -906,8 +906,11 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif, lockdep_assert_held(&mvm->mutex); - /* Rssi update while not associated ?! */ - if (WARN_ON_ONCE(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT)) + /* + * Rssi update while not associated - can happen since the statistics + * are handled asynchronously + */ + if (mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT) return; /* No BT - reports should be disabled */ diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c index 0f52e96..1f97631 100644 --- a/drivers/net/wireless/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/iwlwifi/pcie/drv.c @@ -360,13 +360,12 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = { /* 7265 Series */ {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x5112, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5100, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095A, 0x510A, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)}, - {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_n_cfg)}, {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, + {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, diff --git a/drivers/net/wireless/mwifiex/11ac.c b/drivers/net/wireless/mwifiex/11ac.c index bb43251..c92f27a 100644 --- a/drivers/net/wireless/mwifiex/11ac.c +++ b/drivers/net/wireless/mwifiex/11ac.c @@ -192,8 +192,7 @@ int mwifiex_cmd_append_11ac_tlv(struct mwifiex_private *priv, vht_cap->header.len = cpu_to_le16(sizeof(struct ieee80211_vht_cap)); memcpy((u8 *)vht_cap + sizeof(struct mwifiex_ie_types_header), - (u8 *)bss_desc->bcn_vht_cap + - sizeof(struct ieee_types_header), + (u8 *)bss_desc->bcn_vht_cap, le16_to_cpu(vht_cap->header.len)); mwifiex_fill_vht_cap_tlv(priv, &vht_cap->vht_cap, diff --git a/drivers/net/wireless/mwifiex/11n.c b/drivers/net/wireless/mwifiex/11n.c index 79ead92..d9c65b6 100644 --- a/drivers/net/wireless/mwifiex/11n.c +++ b/drivers/net/wireless/mwifiex/11n.c @@ -317,8 +317,7 @@ mwifiex_cmd_append_11n_tlv(struct mwifiex_private *priv, ht_cap->header.len = cpu_to_le16(sizeof(struct ieee80211_ht_cap)); memcpy((u8 *) ht_cap + sizeof(struct mwifiex_ie_types_header), - (u8 *) bss_desc->bcn_ht_cap + - sizeof(struct ieee_types_header), + (u8 *)bss_desc->bcn_ht_cap, le16_to_cpu(ht_cap->header.len)); mwifiex_fill_cap_info(priv, radio_type, &ht_cap->ht_cap); diff --git a/drivers/net/wireless/mwifiex/scan.c b/drivers/net/wireless/mwifiex/scan.c index 0e8ca7b..f139244 100644 --- a/drivers/net/wireless/mwifiex/scan.c +++ b/drivers/net/wireless/mwifiex/scan.c @@ -2311,12 +2311,12 @@ mwifiex_save_curr_bcn(struct mwifiex_private *priv) curr_bss->ht_info_offset); if (curr_bss->bcn_vht_cap) - curr_bss->bcn_ht_cap = (void *)(curr_bss->beacon_buf + - curr_bss->vht_cap_offset); + curr_bss->bcn_vht_cap = (void *)(curr_bss->beacon_buf + + curr_bss->vht_cap_offset); if (curr_bss->bcn_vht_oper) - curr_bss->bcn_ht_oper = (void *)(curr_bss->beacon_buf + - curr_bss->vht_info_offset); + curr_bss->bcn_vht_oper = (void *)(curr_bss->beacon_buf + + curr_bss->vht_info_offset); if (curr_bss->bcn_bss_co_2040) curr_bss->bcn_bss_co_2040 = diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c index 83a71ac..a6a8c15 100644 --- a/drivers/net/xen-netback/interface.c +++ b/drivers/net/xen-netback/interface.c @@ -148,8 +148,7 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) /* If the skb is GSO then we'll also need an extra slot for the * metadata. */ - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || - skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + if (skb_is_gso(skb)) min_slots_needed++; /* If the skb can't possibly fit in the remaining slots diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c index bc94320..5a8c4a4 100644 --- a/drivers/net/xen-netback/netback.c +++ b/drivers/net/xen-netback/netback.c @@ -243,7 +243,7 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, struct gnttab_copy *copy_gop; struct xenvif_rx_meta *meta; unsigned long bytes; - int gso_type; + int gso_type = XEN_NETIF_GSO_TYPE_NONE; /* Data must not cross a page boundary. */ BUG_ON(size + offset > PAGE_SIZE<<compound_order(page)); @@ -309,12 +309,12 @@ static void xenvif_gop_frag_copy(struct xenvif *vif, struct sk_buff *skb, } /* Leave a gap for the GSO descriptor. */ - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - else - gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; + } if (*head && ((1 << gso_type) & vif->gso_mask)) vif->rx.req_cons++; @@ -348,22 +348,18 @@ static int xenvif_gop_skb(struct sk_buff *skb, int head = 1; int old_meta_prod; int gso_type; - int gso_size; struct ubuf_info *ubuf = skb_shinfo(skb)->destructor_arg; grant_ref_t foreign_grefs[MAX_SKB_FRAGS]; struct xenvif *foreign_vif = NULL; old_meta_prod = npo->meta_prod; - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { - gso_type = XEN_NETIF_GSO_TYPE_TCPV4; - gso_size = skb_shinfo(skb)->gso_size; - } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { - gso_type = XEN_NETIF_GSO_TYPE_TCPV6; - gso_size = skb_shinfo(skb)->gso_size; - } else { - gso_type = XEN_NETIF_GSO_TYPE_NONE; - gso_size = 0; + gso_type = XEN_NETIF_GSO_TYPE_NONE; + if (skb_is_gso(skb)) { + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) + gso_type = XEN_NETIF_GSO_TYPE_TCPV4; + else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + gso_type = XEN_NETIF_GSO_TYPE_TCPV6; } /* Set up a GSO prefix descriptor, if necessary */ @@ -371,7 +367,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++); meta = npo->meta + npo->meta_prod++; meta->gso_type = gso_type; - meta->gso_size = gso_size; + meta->gso_size = skb_shinfo(skb)->gso_size; meta->size = 0; meta->id = req->id; } @@ -381,7 +377,7 @@ static int xenvif_gop_skb(struct sk_buff *skb, if ((1 << gso_type) & vif->gso_mask) { meta->gso_type = gso_type; - meta->gso_size = gso_size; + meta->gso_size = skb_shinfo(skb)->gso_size; } else { meta->gso_type = XEN_NETIF_GSO_TYPE_NONE; meta->gso_size = 0; @@ -531,8 +527,9 @@ static void xenvif_rx_action(struct xenvif *vif) size = skb_frag_size(&skb_shinfo(skb)->frags[i]); max_slots_needed += DIV_ROUND_UP(size, PAGE_SIZE); } - if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || - skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) + if (skb_is_gso(skb) && + (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4 || + skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) max_slots_needed++; /* If the skb may not fit then bail out now */ diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c index 00660cc..3890166 100644 --- a/drivers/pci/bus.c +++ b/drivers/pci/bus.c @@ -162,8 +162,6 @@ static int pci_bus_alloc_from_region(struct pci_bus *bus, struct resource *res, avail = *r; pci_clip_resource_to_region(bus, &avail, region); - if (!resource_size(&avail)) - continue; /* * "min" is typically PCIBIOS_MIN_IO or PCIBIOS_MIN_MEM to diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c index 6b05f61..fdbc294 100644 --- a/drivers/pci/pci.c +++ b/drivers/pci/pci.c @@ -1192,6 +1192,9 @@ static int do_pci_enable_device(struct pci_dev *dev, int bars) return err; pci_fixup_device(pci_fixup_enable, dev); + if (dev->msi_enabled || dev->msix_enabled) + return 0; + pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin); if (pin) { pci_read_config_word(dev, PCI_COMMAND, &cmd); diff --git a/drivers/pinctrl/Kconfig b/drivers/pinctrl/Kconfig index be361b7..1e4e693 100644 --- a/drivers/pinctrl/Kconfig +++ b/drivers/pinctrl/Kconfig @@ -217,7 +217,7 @@ config PINCTRL_IMX28 select PINCTRL_MXS config PINCTRL_MSM - tristate + bool select PINMUX select PINCONF select GENERIC_PINCONF diff --git a/drivers/pinctrl/pinctrl-capri.c b/drivers/pinctrl/pinctrl-capri.c index 4669c53..eb25002 100644 --- a/drivers/pinctrl/pinctrl-capri.c +++ b/drivers/pinctrl/pinctrl-capri.c @@ -1435,7 +1435,7 @@ int __init capri_pinctrl_probe(struct platform_device *pdev) } static struct of_device_id capri_pinctrl_of_match[] = { - { .compatible = "brcm,capri-pinctrl", }, + { .compatible = "brcm,bcm11351-pinctrl", }, { }, }; diff --git a/drivers/pinctrl/pinctrl-sunxi.c b/drivers/pinctrl/pinctrl-sunxi.c index 9ccf681..f9fabe9 100644 --- a/drivers/pinctrl/pinctrl-sunxi.c +++ b/drivers/pinctrl/pinctrl-sunxi.c @@ -14,6 +14,7 @@ #include <linux/clk.h> #include <linux/gpio.h> #include <linux/irqdomain.h> +#include <linux/irqchip/chained_irq.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> @@ -584,7 +585,7 @@ static int sunxi_pinctrl_irq_set_type(struct irq_data *d, spin_lock_irqsave(&pctl->lock, flags); regval = readl(pctl->membase + reg); - regval &= ~IRQ_CFG_IRQ_MASK; + regval &= ~(IRQ_CFG_IRQ_MASK << index); writel(regval | (mode << index), pctl->membase + reg); spin_unlock_irqrestore(&pctl->lock, flags); @@ -665,6 +666,7 @@ static struct irq_chip sunxi_pinctrl_irq_chip = { static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) { + struct irq_chip *chip = irq_get_chip(irq); struct sunxi_pinctrl *pctl = irq_get_handler_data(irq); const unsigned long reg = readl(pctl->membase + IRQ_STATUS_REG); @@ -674,10 +676,12 @@ static void sunxi_pinctrl_irq_handler(unsigned irq, struct irq_desc *desc) if (reg) { int irqoffset; + chained_irq_enter(chip, desc); for_each_set_bit(irqoffset, ®, SUNXI_IRQ_NUMBER) { int pin_irq = irq_find_mapping(pctl->domain, irqoffset); generic_handle_irq(pin_irq); } + chained_irq_exit(chip, desc); } } diff --git a/drivers/pinctrl/pinctrl-sunxi.h b/drivers/pinctrl/pinctrl-sunxi.h index 01c494f..552b0e9 100644 --- a/drivers/pinctrl/pinctrl-sunxi.h +++ b/drivers/pinctrl/pinctrl-sunxi.h @@ -511,7 +511,7 @@ static inline u32 sunxi_pull_offset(u16 pin) static inline u32 sunxi_irq_cfg_reg(u16 irq) { - u8 reg = irq / IRQ_CFG_IRQ_PER_REG; + u8 reg = irq / IRQ_CFG_IRQ_PER_REG * 0x04; return reg + IRQ_CFG_REG; } @@ -523,7 +523,7 @@ static inline u32 sunxi_irq_cfg_offset(u16 irq) static inline u32 sunxi_irq_ctrl_reg(u16 irq) { - u8 reg = irq / IRQ_CTRL_IRQ_PER_REG; + u8 reg = irq / IRQ_CTRL_IRQ_PER_REG * 0x04; return reg + IRQ_CTRL_REG; } @@ -535,7 +535,7 @@ static inline u32 sunxi_irq_ctrl_offset(u16 irq) static inline u32 sunxi_irq_status_reg(u16 irq) { - u8 reg = irq / IRQ_STATUS_IRQ_PER_REG; + u8 reg = irq / IRQ_STATUS_IRQ_PER_REG * 0x04; return reg + IRQ_STATUS_REG; } diff --git a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c index 77d103f..567d691 100644 --- a/drivers/pinctrl/sh-pfc/pfc-r8a7791.c +++ b/drivers/pinctrl/sh-pfc/pfc-r8a7791.c @@ -89,7 +89,8 @@ enum { /* GPSR6 */ FN_IP13_10, FN_IP13_11, FN_IP13_12, FN_IP13_13, FN_IP13_14, - FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, FN_IP13_22, FN_IP13_24_23, + FN_IP13_15, FN_IP13_18_16, FN_IP13_21_19, + FN_IP13_22, FN_IP13_24_23, FN_SD1_CLK, FN_IP13_25, FN_IP13_26, FN_IP13_27, FN_IP13_30_28, FN_IP14_1_0, FN_IP14_2, FN_IP14_3, FN_IP14_4, FN_IP14_5, FN_IP14_6, FN_IP14_7, FN_IP14_10_8, FN_IP14_13_11, FN_IP14_16_14, FN_IP14_19_17, @@ -788,6 +789,7 @@ static const u16 pinmux_data[] = { PINMUX_DATA(USB1_PWEN_MARK, FN_USB1_PWEN), PINMUX_DATA(USB1_OVC_MARK, FN_USB1_OVC), PINMUX_DATA(DU0_DOTCLKIN_MARK, FN_DU0_DOTCLKIN), + PINMUX_DATA(SD1_CLK_MARK, FN_SD1_CLK), /* IPSR0 */ PINMUX_IPSR_DATA(IP0_0, D0), @@ -3825,7 +3827,7 @@ static const struct pinmux_cfg_reg pinmux_config_regs[] = { GP_6_11_FN, FN_IP13_25, GP_6_10_FN, FN_IP13_24_23, GP_6_9_FN, FN_IP13_22, - 0, 0, + GP_6_8_FN, FN_SD1_CLK, GP_6_7_FN, FN_IP13_21_19, GP_6_6_FN, FN_IP13_18_16, GP_6_5_FN, FN_IP13_15, diff --git a/drivers/pinctrl/sirf/pinctrl-sirf.c b/drivers/pinctrl/sirf/pinctrl-sirf.c index a0d6152..617a491 100644 --- a/drivers/pinctrl/sirf/pinctrl-sirf.c +++ b/drivers/pinctrl/sirf/pinctrl-sirf.c @@ -598,7 +598,7 @@ static unsigned int sirfsoc_gpio_irq_startup(struct irq_data *d) { struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); - if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq)) + if (gpio_lock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE)) dev_err(bank->chip.gc.dev, "unable to lock HW IRQ %lu for IRQ\n", d->hwirq); @@ -611,7 +611,7 @@ static void sirfsoc_gpio_irq_shutdown(struct irq_data *d) struct sirfsoc_gpio_bank *bank = irq_data_get_irq_chip_data(d); sirfsoc_gpio_irq_mask(d); - gpio_unlock_as_irq(&bank->chip.gc, d->hwirq); + gpio_unlock_as_irq(&bank->chip.gc, d->hwirq % SIRFSOC_GPIO_BANK_SIZE); } static struct irq_chip sirfsoc_irq_chip = { diff --git a/drivers/pnp/pnpacpi/rsparser.c b/drivers/pnp/pnpacpi/rsparser.c index 167f3d0..66977eb 100644 --- a/drivers/pnp/pnpacpi/rsparser.c +++ b/drivers/pnp/pnpacpi/rsparser.c @@ -183,9 +183,7 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, struct resource r = {0}; int i, flags; - if (acpi_dev_resource_memory(res, &r) - || acpi_dev_resource_io(res, &r) - || acpi_dev_resource_address_space(res, &r) + if (acpi_dev_resource_address_space(res, &r) || acpi_dev_resource_ext_address_space(res, &r)) { pnp_add_resource(dev, &r); return AE_OK; @@ -217,6 +215,17 @@ static acpi_status pnpacpi_allocated_resource(struct acpi_resource *res, } switch (res->type) { + case ACPI_RESOURCE_TYPE_MEMORY24: + case ACPI_RESOURCE_TYPE_MEMORY32: + case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: + if (acpi_dev_resource_memory(res, &r)) + pnp_add_resource(dev, &r); + break; + case ACPI_RESOURCE_TYPE_IO: + case ACPI_RESOURCE_TYPE_FIXED_IO: + if (acpi_dev_resource_io(res, &r)) + pnp_add_resource(dev, &r); + break; case ACPI_RESOURCE_TYPE_DMA: dma = &res->data.dma; if (dma->channel_count > 0 && dma->channels[0] != (u8) -1) diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c index 31534b5..c3b2fb9 100644 --- a/drivers/spi/spi-ath79.c +++ b/drivers/spi/spi-ath79.c @@ -132,9 +132,9 @@ static int ath79_spi_setup_cs(struct spi_device *spi) flags = GPIOF_DIR_OUT; if (spi->mode & SPI_CS_HIGH) - flags |= GPIOF_INIT_HIGH; - else flags |= GPIOF_INIT_LOW; + else + flags |= GPIOF_INIT_HIGH; status = gpio_request_one(cdata->gpio, flags, dev_name(&spi->dev)); diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c index b0842f7..5d7b07f 100644 --- a/drivers/spi/spi-atmel.c +++ b/drivers/spi/spi-atmel.c @@ -1455,6 +1455,14 @@ static int atmel_spi_suspend(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct atmel_spi *as = spi_master_get_devdata(master); + int ret; + + /* Stop the queue running */ + ret = spi_master_suspend(master); + if (ret) { + dev_warn(dev, "cannot suspend master\n"); + return ret; + } clk_disable_unprepare(as->clk); return 0; @@ -1464,9 +1472,16 @@ static int atmel_spi_resume(struct device *dev) { struct spi_master *master = dev_get_drvdata(dev); struct atmel_spi *as = spi_master_get_devdata(master); + int ret; clk_prepare_enable(as->clk); - return 0; + + /* Start the queue running */ + ret = spi_master_resume(master); + if (ret) + dev_err(dev, "problem starting queue (%d)\n", ret); + + return ret; } static SIMPLE_DEV_PM_OPS(atmel_spi_pm_ops, atmel_spi_suspend, atmel_spi_resume); diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c index cabed8f..28ae470 100644 --- a/drivers/spi/spi-coldfire-qspi.c +++ b/drivers/spi/spi-coldfire-qspi.c @@ -514,7 +514,8 @@ static int mcfqspi_resume(struct device *dev) #ifdef CONFIG_PM_RUNTIME static int mcfqspi_runtime_suspend(struct device *dev) { - struct mcfqspi *mcfqspi = dev_get_drvdata(dev); + struct spi_master *master = dev_get_drvdata(dev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); clk_disable(mcfqspi->clk); @@ -523,7 +524,8 @@ static int mcfqspi_runtime_suspend(struct device *dev) static int mcfqspi_runtime_resume(struct device *dev) { - struct mcfqspi *mcfqspi = dev_get_drvdata(dev); + struct spi_master *master = dev_get_drvdata(dev); + struct mcfqspi *mcfqspi = spi_master_get_devdata(master); clk_enable(mcfqspi->clk); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index ec79f72..a253920 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -420,7 +420,6 @@ static int dspi_suspend(struct device *dev) static int dspi_resume(struct device *dev) { - struct spi_master *master = dev_get_drvdata(dev); struct fsl_dspi *dspi = spi_master_get_devdata(master); @@ -504,7 +503,7 @@ static int dspi_probe(struct platform_device *pdev) clk_prepare_enable(dspi->clk); init_waitqueue_head(&dspi->waitq); - platform_set_drvdata(pdev, dspi); + platform_set_drvdata(pdev, master); ret = spi_bitbang_start(&dspi->bitbang); if (ret != 0) { @@ -525,7 +524,8 @@ out_master_put: static int dspi_remove(struct platform_device *pdev) { - struct fsl_dspi *dspi = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct fsl_dspi *dspi = spi_master_get_devdata(master); /* Disconnect from the SPI framework */ spi_bitbang_stop(&dspi->bitbang); diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c index a5474ef..47f15d9 100644 --- a/drivers/spi/spi-imx.c +++ b/drivers/spi/spi-imx.c @@ -948,8 +948,8 @@ static int spi_imx_remove(struct platform_device *pdev) spi_bitbang_stop(&spi_imx->bitbang); writel(0, spi_imx->base + MXC_CSPICTRL); - clk_disable_unprepare(spi_imx->clk_ipg); - clk_disable_unprepare(spi_imx->clk_per); + clk_unprepare(spi_imx->clk_ipg); + clk_unprepare(spi_imx->clk_per); spi_master_put(master); return 0; diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c index 2e7f38c..88eb57e 100644 --- a/drivers/spi/spi-topcliff-pch.c +++ b/drivers/spi/spi-topcliff-pch.c @@ -915,7 +915,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Tx DMA */ param = &dma->param_tx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->master->bus_num * 2; /* Tx = 0, 2 */ + param->chan_id = data->ch * 2; /* Tx = 0, 2 */; param->tx_reg = data->io_base_addr + PCH_SPDWR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); @@ -930,7 +930,7 @@ static void pch_spi_request_dma(struct pch_spi_data *data, int bpw) /* Set Rx DMA */ param = &dma->param_rx; param->dma_dev = &dma_dev->dev; - param->chan_id = data->master->bus_num * 2 + 1; /* Rx = Tx + 1 */ + param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */; param->rx_reg = data->io_base_addr + PCH_SPDRR; param->width = width; chan = dma_request_channel(mask, pch_spi_filter, param); @@ -1452,6 +1452,11 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) pch_spi_set_master_mode(master); + if (use_dma) { + dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); + pch_alloc_dma_buf(board_dat, data); + } + ret = spi_register_master(master); if (ret != 0) { dev_err(&plat_dev->dev, @@ -1459,14 +1464,10 @@ static int pch_spi_pd_probe(struct platform_device *plat_dev) goto err_spi_register_master; } - if (use_dma) { - dev_info(&plat_dev->dev, "Use DMA for data transfers\n"); - pch_alloc_dma_buf(board_dat, data); - } - return 0; err_spi_register_master: + pch_free_dma_buf(board_dat, data); free_irq(board_dat->pdev->irq, data); err_request_irq: pch_spi_free_resources(board_dat, data); diff --git a/drivers/staging/cxt1e1/linux.c b/drivers/staging/cxt1e1/linux.c index 4a08e16..79206cb 100644 --- a/drivers/staging/cxt1e1/linux.c +++ b/drivers/staging/cxt1e1/linux.c @@ -866,6 +866,8 @@ c4_ioctl (struct net_device *ndev, struct ifreq *ifr, int cmd) _IOC_SIZE (iocmd)); #endif iolen = _IOC_SIZE (iocmd); + if (iolen > sizeof(arg)) + return -EFAULT; data = ifr->ifr_data + sizeof (iocmd); if (copy_from_user (&arg, data, iolen)) return -EFAULT; diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index 7f1a7ce..b83ec37 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -785,7 +785,7 @@ static void iscsit_ack_from_expstatsn(struct iscsi_conn *conn, u32 exp_statsn) spin_unlock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_p, &ack_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); iscsit_free_cmd(cmd, false); } } @@ -3708,7 +3708,7 @@ iscsit_immediate_queue(struct iscsi_conn *conn, struct iscsi_cmd *cmd, int state break; case ISTATE_REMOVE: spin_lock_bh(&conn->cmd_lock); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, false); @@ -4151,7 +4151,7 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn) spin_lock_bh(&conn->cmd_lock); list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_increment_maxcmdsn(cmd, sess); @@ -4196,6 +4196,10 @@ int iscsit_close_connection( iscsit_stop_timers_for_cmds(conn); iscsit_stop_nopin_response_timer(conn); iscsit_stop_nopin_timer(conn); + + if (conn->conn_transport->iscsit_wait_conn) + conn->conn_transport->iscsit_wait_conn(conn); + iscsit_free_queue_reqs_for_conn(conn); /* diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 33be1fb..4ca8fd2 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -138,7 +138,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -160,7 +160,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess) list_for_each_entry_safe(cmd, cmd_tmp, &cr->conn_recovery_cmd_list, i_conn_node) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); cmd->conn = NULL; spin_unlock(&cr->conn_recovery_cmd_lock); iscsit_free_cmd(cmd, true); @@ -216,7 +216,7 @@ int iscsit_remove_cmd_from_connection_recovery( } cr = cmd->cr; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); return --cr->cmd_count; } @@ -297,7 +297,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn) if (!(cmd->cmd_flags & ICF_OOO_CMDSN)) continue; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); @@ -335,7 +335,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) /* * Only perform connection recovery on ISCSI_OP_SCSI_CMD or * ISCSI_OP_NOOP_OUT opcodes. For all other opcodes call - * list_del(&cmd->i_conn_node); to release the command to the + * list_del_init(&cmd->i_conn_node); to release the command to the * session pool and remove it from the connection's list. * * Also stop the DataOUT timer, which will be restarted after @@ -351,7 +351,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) " CID: %hu\n", cmd->iscsi_opcode, cmd->init_task_tag, cmd->cmd_sn, conn->cid); - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -371,7 +371,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) */ if (!(cmd->cmd_flags & ICF_OOO_CMDSN) && !cmd->immediate_cmd && iscsi_sna_gte(cmd->cmd_sn, conn->sess->exp_cmd_sn)) { - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_cmd(cmd, true); spin_lock_bh(&conn->cmd_lock); @@ -393,7 +393,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn) cmd->sess = conn->sess; - list_del(&cmd->i_conn_node); + list_del_init(&cmd->i_conn_node); spin_unlock_bh(&conn->cmd_lock); iscsit_free_all_datain_reqs(cmd); diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c index 3976183..44a5471 100644 --- a/drivers/target/iscsi/iscsi_target_tpg.c +++ b/drivers/target/iscsi/iscsi_target_tpg.c @@ -137,7 +137,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np( list_for_each_entry(tpg, &tiqn->tiqn_tpg_list, tpg_list) { spin_lock(&tpg->tpg_state_lock); - if (tpg->tpg_state == TPG_STATE_FREE) { + if (tpg->tpg_state != TPG_STATE_ACTIVE) { spin_unlock(&tpg->tpg_state_lock); continue; } diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c index 42f18fc..77e6531 100644 --- a/drivers/target/target_core_sbc.c +++ b/drivers/target/target_core_sbc.c @@ -1079,25 +1079,31 @@ sbc_dif_copy_prot(struct se_cmd *cmd, unsigned int sectors, bool read, left = sectors * dev->prot_length; for_each_sg(cmd->t_prot_sg, psg, cmd->t_prot_nents, i) { - - len = min(psg->length, left); - if (offset >= sg->length) { - sg = sg_next(sg); - offset = 0; - } + unsigned int psg_len, copied = 0; paddr = kmap_atomic(sg_page(psg)) + psg->offset; - addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; - - if (read) - memcpy(paddr, addr, len); - else - memcpy(addr, paddr, len); - - left -= len; - offset += len; + psg_len = min(left, psg->length); + while (psg_len) { + len = min(psg_len, sg->length - offset); + addr = kmap_atomic(sg_page(sg)) + sg->offset + offset; + + if (read) + memcpy(paddr + copied, addr, len); + else + memcpy(addr, paddr + copied, len); + + left -= len; + offset += len; + copied += len; + psg_len -= len; + + if (offset >= sg->length) { + sg = sg_next(sg); + offset = 0; + } + kunmap_atomic(addr); + } kunmap_atomic(paddr); - kunmap_atomic(addr); } } diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 35c0664..5f88d76 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig @@ -136,6 +136,7 @@ config SPEAR_THERMAL config RCAR_THERMAL tristate "Renesas R-Car thermal driver" depends on ARCH_SHMOBILE || COMPILE_TEST + depends on HAS_IOMEM help Enable this to plug the R-Car thermal sensor driver into the Linux thermal framework. @@ -210,8 +211,16 @@ config ACPI_INT3403_THERMAL tristate "ACPI INT3403 thermal driver" depends on X86 && ACPI help - This driver uses ACPI INT3403 device objects. If present, it will - register each INT3403 thermal sensor as a thermal zone. + Newer laptops and tablets that use ACPI may have thermal sensors + outside the core CPU/SOC for thermal safety reasons. These + temperature sensors are also exposed for the OS to use via the so + called INT3403 ACPI object. This driver will, on devices that have + such sensors, expose the temperature information from these sensors + to userspace via the normal thermal framework. This means that a wide + range of applications and GUI widgets can show this information to + the user or use this information for making decisions. For example, + the Intel Thermal Daemon can use this information to allow the user + to select his laptop to run without turning on the fans. menu "Texas Instruments thermal drivers" source "drivers/thermal/ti-soc-thermal/Kconfig" diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c index 338a88b..71b0ec0 100644 --- a/drivers/thermal/thermal_core.c +++ b/drivers/thermal/thermal_core.c @@ -56,10 +56,15 @@ static LIST_HEAD(thermal_governor_list); static DEFINE_MUTEX(thermal_list_lock); static DEFINE_MUTEX(thermal_governor_lock); +static struct thermal_governor *def_governor; + static struct thermal_governor *__find_governor(const char *name) { struct thermal_governor *pos; + if (!name || !name[0]) + return def_governor; + list_for_each_entry(pos, &thermal_governor_list, governor_list) if (!strnicmp(name, pos->name, THERMAL_NAME_LENGTH)) return pos; @@ -82,17 +87,23 @@ int thermal_register_governor(struct thermal_governor *governor) if (__find_governor(governor->name) == NULL) { err = 0; list_add(&governor->governor_list, &thermal_governor_list); + if (!def_governor && !strncmp(governor->name, + DEFAULT_THERMAL_GOVERNOR, THERMAL_NAME_LENGTH)) + def_governor = governor; } mutex_lock(&thermal_list_lock); list_for_each_entry(pos, &thermal_tz_list, node) { + /* + * only thermal zones with specified tz->tzp->governor_name + * may run with tz->govenor unset + */ if (pos->governor) continue; - if (pos->tzp) - name = pos->tzp->governor_name; - else - name = DEFAULT_THERMAL_GOVERNOR; + + name = pos->tzp->governor_name; + if (!strnicmp(name, governor->name, THERMAL_NAME_LENGTH)) pos->governor = governor; } @@ -342,8 +353,8 @@ static void monitor_thermal_zone(struct thermal_zone_device *tz) static void handle_non_critical_trips(struct thermal_zone_device *tz, int trip, enum thermal_trip_type trip_type) { - if (tz->governor) - tz->governor->throttle(tz, trip); + tz->governor ? tz->governor->throttle(tz, trip) : + def_governor->throttle(tz, trip); } static void handle_critical_trips(struct thermal_zone_device *tz, @@ -1107,7 +1118,7 @@ __thermal_cooling_device_register(struct device_node *np, INIT_LIST_HEAD(&cdev->thermal_instances); cdev->np = np; cdev->ops = ops; - cdev->updated = true; + cdev->updated = false; cdev->device.class = &thermal_class; cdev->devdata = devdata; dev_set_name(&cdev->device, "cooling_device%d", cdev->id); @@ -1533,7 +1544,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type, if (tz->tzp) tz->governor = __find_governor(tz->tzp->governor_name); else - tz->governor = __find_governor(DEFAULT_THERMAL_GOVERNOR); + tz->governor = def_governor; mutex_unlock(&thermal_governor_lock); diff --git a/drivers/thermal/x86_pkg_temp_thermal.c b/drivers/thermal/x86_pkg_temp_thermal.c index 972e1c7..081fd7e 100644 --- a/drivers/thermal/x86_pkg_temp_thermal.c +++ b/drivers/thermal/x86_pkg_temp_thermal.c @@ -68,6 +68,10 @@ struct phy_dev_entry { struct thermal_zone_device *tzone; }; +static const struct thermal_zone_params pkg_temp_tz_params = { + .no_hwmon = true, +}; + /* List maintaining number of package instances */ static LIST_HEAD(phy_dev_list); static DEFINE_MUTEX(phy_dev_list_mutex); @@ -394,7 +398,6 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) int err; u32 tj_max; struct phy_dev_entry *phy_dev_entry; - char buffer[30]; int thres_count; u32 eax, ebx, ecx, edx; u8 *temp; @@ -440,13 +443,11 @@ static int pkg_temp_thermal_device_add(unsigned int cpu) phy_dev_entry->first_cpu = cpu; phy_dev_entry->tj_max = tj_max; phy_dev_entry->ref_cnt = 1; - snprintf(buffer, sizeof(buffer), "pkg-temp-%d\n", - phy_dev_entry->phys_proc_id); - phy_dev_entry->tzone = thermal_zone_device_register(buffer, + phy_dev_entry->tzone = thermal_zone_device_register("x86_pkg_temp", thres_count, (thres_count == MAX_NUMBER_OF_TRIPS) ? 0x03 : 0x01, - phy_dev_entry, &tzone_ops, NULL, 0, 0); + phy_dev_entry, &tzone_ops, &pkg_temp_tz_params, 0, 0); if (IS_ERR(phy_dev_entry->tzone)) { err = PTR_ERR(phy_dev_entry->tzone); goto err_ret_free; diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 8d72f0c..062967c 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -717,6 +717,10 @@ int usb_get_configuration(struct usb_device *dev) result = -ENOMEM; goto err; } + + if (dev->quirks & USB_QUIRK_DELAY_INIT) + msleep(100); + result = usb_get_descriptor(dev, USB_DT_CONFIG, cfgno, bigbuffer, length); if (result < 0) { diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index 8f37063..739ee8e 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -47,6 +47,10 @@ static const struct usb_device_id usb_quirk_list[] = { /* Microsoft LifeCam-VX700 v2.0 */ { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Logitech HD Pro Webcams C920 and C930e */ + { USB_DEVICE(0x046d, 0x082d), .driver_info = USB_QUIRK_DELAY_INIT }, + { USB_DEVICE(0x046d, 0x0843), .driver_info = USB_QUIRK_DELAY_INIT }, + /* Logitech Quickcam Fusion */ { USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 6fe577d..924a6cc 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -4733,6 +4733,9 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) /* Accept arbitrarily long scatter-gather lists */ hcd->self.sg_tablesize = ~0; + /* support to build packet from discontinuous buffers */ + hcd->self.no_sg_constraint = 1; + /* XHCI controllers don't stop the ep queue on short packets :| */ hcd->self.no_stop_on_short = 1; @@ -4757,14 +4760,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) /* xHCI private pointer was set in xhci_pci_probe for the second * registered roothub. */ - xhci = hcd_to_xhci(hcd); - /* - * Support arbitrarily aligned sg-list entries on hosts without - * TD fragment rules (which are currently unsupported). - */ - if (xhci->hci_version < 0x100) - hcd->self.no_sg_constraint = 1; - return 0; } @@ -4793,9 +4788,6 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) if (xhci->hci_version > 0x96) xhci->quirks |= XHCI_SPURIOUS_SUCCESS; - if (xhci->hci_version < 0x100) - hcd->self.no_sg_constraint = 1; - /* Make sure the HC is halted. */ retval = xhci_halt(xhci); if (retval) diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c index 0129b78..4f70f38 100644 --- a/fs/bio-integrity.c +++ b/fs/bio-integrity.c @@ -458,11 +458,10 @@ static int bio_integrity_verify(struct bio *bio) struct blk_integrity_exchg bix; struct bio_vec *bv; sector_t sector = bio->bi_integrity->bip_iter.bi_sector; - unsigned int sectors, total, ret; + unsigned int sectors, ret = 0; void *prot_buf = bio->bi_integrity->bip_buf; int i; - ret = total = 0; bix.disk_name = bio->bi_bdev->bd_disk->disk_name; bix.sector_size = bi->sector_size; @@ -484,8 +483,6 @@ static int bio_integrity_verify(struct bio *bio) sectors = bv->bv_len / bi->sector_size; sector += sectors; prot_buf += sectors * bi->tuple_size; - total += sectors * bi->tuple_size; - BUG_ON(total > bio->bi_integrity->bip_iter.bi_size); kunmap_atomic(kaddr); } diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index cf32f03..c0f3718 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h @@ -513,7 +513,7 @@ struct cifs_mnt_data { static inline unsigned int get_rfc1002_length(void *buf) { - return be32_to_cpu(*((__be32 *)buf)); + return be32_to_cpu(*((__be32 *)buf)) & 0xffffff; } static inline void diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 53c1507..834fce7 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -2579,31 +2579,19 @@ cifs_writev(struct kiocb *iocb, const struct iovec *iov, struct cifsInodeInfo *cinode = CIFS_I(inode); struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server; ssize_t rc = -EACCES; + loff_t lock_pos = pos; - BUG_ON(iocb->ki_pos != pos); - + if (file->f_flags & O_APPEND) + lock_pos = i_size_read(inode); /* * We need to hold the sem to be sure nobody modifies lock list * with a brlock that prevents writing. */ down_read(&cinode->lock_sem); - if (!cifs_find_lock_conflict(cfile, pos, iov_length(iov, nr_segs), + if (!cifs_find_lock_conflict(cfile, lock_pos, iov_length(iov, nr_segs), server->vals->exclusive_lock_type, NULL, - CIFS_WRITE_OP)) { - mutex_lock(&inode->i_mutex); - rc = __generic_file_aio_write(iocb, iov, nr_segs, - &iocb->ki_pos); - mutex_unlock(&inode->i_mutex); - } - - if (rc > 0) { - ssize_t err; - - err = generic_write_sync(file, iocb->ki_pos - rc, rc); - if (err < 0) - rc = err; - } - + CIFS_WRITE_OP)) + rc = generic_file_aio_write(iocb, iov, nr_segs, pos); up_read(&cinode->lock_sem); return rc; } diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index b375709..18cd565 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c @@ -270,6 +270,26 @@ cifs_rqst_page_to_kvec(struct smb_rqst *rqst, unsigned int idx, iov->iov_len = rqst->rq_pagesz; } +static unsigned long +rqst_len(struct smb_rqst *rqst) +{ + unsigned int i; + struct kvec *iov = rqst->rq_iov; + unsigned long buflen = 0; + + /* total up iov array first */ + for (i = 0; i < rqst->rq_nvec; i++) + buflen += iov[i].iov_len; + + /* add in the page array if there is one */ + if (rqst->rq_npages) { + buflen += rqst->rq_pagesz * (rqst->rq_npages - 1); + buflen += rqst->rq_tailsz; + } + + return buflen; +} + static int smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) { @@ -277,6 +297,7 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) struct kvec *iov = rqst->rq_iov; int n_vec = rqst->rq_nvec; unsigned int smb_buf_length = get_rfc1002_length(iov[0].iov_base); + unsigned long send_length; unsigned int i; size_t total_len = 0, sent; struct socket *ssocket = server->ssocket; @@ -285,6 +306,14 @@ smb_send_rqst(struct TCP_Server_Info *server, struct smb_rqst *rqst) if (ssocket == NULL) return -ENOTSOCK; + /* sanity check send length */ + send_length = rqst_len(rqst); + if (send_length != smb_buf_length + 4) { + WARN(1, "Send length mismatch(send_length=%lu smb_buf_length=%u)\n", + send_length, smb_buf_length); + return -EIO; + } + cifs_dbg(FYI, "Sending smb: smb_len=%u\n", smb_buf_length); dump_smb(iov[0].iov_base, iov[0].iov_len); @@ -683,35 +683,65 @@ EXPORT_SYMBOL(fget_raw); * The fput_needed flag returned by fget_light should be passed to the * corresponding fput_light. */ -struct file *__fget_light(unsigned int fd, fmode_t mask, int *fput_needed) +static unsigned long __fget_light(unsigned int fd, fmode_t mask) { struct files_struct *files = current->files; struct file *file; - *fput_needed = 0; if (atomic_read(&files->count) == 1) { file = __fcheck_files(files, fd); - if (file && (file->f_mode & mask)) - file = NULL; + if (!file || unlikely(file->f_mode & mask)) + return 0; + return (unsigned long)file; } else { file = __fget(fd, mask); - if (file) - *fput_needed = 1; + if (!file) + return 0; + return FDPUT_FPUT | (unsigned long)file; } - - return file; } -struct file *fget_light(unsigned int fd, int *fput_needed) +unsigned long __fdget(unsigned int fd) { - return __fget_light(fd, FMODE_PATH, fput_needed); + return __fget_light(fd, FMODE_PATH); } -EXPORT_SYMBOL(fget_light); +EXPORT_SYMBOL(__fdget); -struct file *fget_raw_light(unsigned int fd, int *fput_needed) +unsigned long __fdget_raw(unsigned int fd) { - return __fget_light(fd, 0, fput_needed); + return __fget_light(fd, 0); +} + +unsigned long __fdget_pos(unsigned int fd) +{ + struct files_struct *files = current->files; + struct file *file; + unsigned long v; + + if (atomic_read(&files->count) == 1) { + file = __fcheck_files(files, fd); + v = 0; + } else { + file = __fget(fd, 0); + v = FDPUT_FPUT; + } + if (!file) + return 0; + + if (file->f_mode & FMODE_ATOMIC_POS) { + if (file_count(file) > 1) { + v |= FDPUT_POS_UNLOCK; + mutex_lock(&file->f_pos_lock); + } + } + return v | (unsigned long)file; } +/* + * We only lock f_pos if we have threads or if the file might be + * shared with another process. In both cases we'll have an elevated + * file count (done either by fdget() or by fork()). + */ + void set_close_on_exec(unsigned int fd, int flag) { struct files_struct *files = current->files; diff --git a/fs/file_table.c b/fs/file_table.c index 5fff903..5b24008 100644 --- a/fs/file_table.c +++ b/fs/file_table.c @@ -135,6 +135,7 @@ struct file *get_empty_filp(void) atomic_long_set(&f->f_count, 1); rwlock_init(&f->f_owner.lock); spin_lock_init(&f->f_lock); + mutex_init(&f->f_pos_lock); eventpoll_init_file(f); /* f->f_version: 0 */ return f; diff --git a/fs/hfsplus/catalog.c b/fs/hfsplus/catalog.c index 968ce41..32602c6 100644 --- a/fs/hfsplus/catalog.c +++ b/fs/hfsplus/catalog.c @@ -103,6 +103,8 @@ static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, folder = &entry->folder; memset(folder, 0, sizeof(*folder)); folder->type = cpu_to_be16(HFSPLUS_FOLDER); + if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) + folder->flags |= cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT); folder->id = cpu_to_be32(inode->i_ino); HFSPLUS_I(inode)->create_date = folder->create_date = @@ -203,6 +205,36 @@ int hfsplus_find_cat(struct super_block *sb, u32 cnid, return hfs_brec_find(fd, hfs_find_rec_by_key); } +static void hfsplus_subfolders_inc(struct inode *dir) +{ + struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); + + if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { + /* + * Increment subfolder count. Note, the value is only meaningful + * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set. + */ + HFSPLUS_I(dir)->subfolders++; + } +} + +static void hfsplus_subfolders_dec(struct inode *dir) +{ + struct hfsplus_sb_info *sbi = HFSPLUS_SB(dir->i_sb); + + if (test_bit(HFSPLUS_SB_HFSX, &sbi->flags)) { + /* + * Decrement subfolder count. Note, the value is only meaningful + * for folders with HFSPLUS_HAS_FOLDER_COUNT flag set. + * + * Check for zero. Some subfolders may have been created + * by an implementation ignorant of this counter. + */ + if (HFSPLUS_I(dir)->subfolders) + HFSPLUS_I(dir)->subfolders--; + } +} + int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode) { @@ -247,6 +279,8 @@ int hfsplus_create_cat(u32 cnid, struct inode *dir, goto err1; dir->i_size++; + if (S_ISDIR(inode->i_mode)) + hfsplus_subfolders_inc(dir); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); @@ -336,6 +370,8 @@ int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) goto out; dir->i_size--; + if (type == HFSPLUS_FOLDER) + hfsplus_subfolders_dec(dir); dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; hfsplus_mark_inode_dirty(dir, HFSPLUS_I_CAT_DIRTY); @@ -380,6 +416,7 @@ int hfsplus_rename_cat(u32 cnid, hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, src_fd.entrylength); + type = be16_to_cpu(entry.type); /* create new dir entry with the data from the old entry */ hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); @@ -394,6 +431,8 @@ int hfsplus_rename_cat(u32 cnid, if (err) goto out; dst_dir->i_size++; + if (type == HFSPLUS_FOLDER) + hfsplus_subfolders_inc(dst_dir); dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; /* finally remove the old entry */ @@ -405,6 +444,8 @@ int hfsplus_rename_cat(u32 cnid, if (err) goto out; src_dir->i_size--; + if (type == HFSPLUS_FOLDER) + hfsplus_subfolders_dec(src_dir); src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; /* remove old thread entry */ diff --git a/fs/hfsplus/hfsplus_fs.h b/fs/hfsplus/hfsplus_fs.h index 08846425b..62d571e 100644 --- a/fs/hfsplus/hfsplus_fs.h +++ b/fs/hfsplus/hfsplus_fs.h @@ -242,6 +242,7 @@ struct hfsplus_inode_info { */ sector_t fs_blocks; u8 userflags; /* BSD user file flags */ + u32 subfolders; /* Subfolder count (HFSX only) */ struct list_head open_dir_list; loff_t phys_size; diff --git a/fs/hfsplus/hfsplus_raw.h b/fs/hfsplus/hfsplus_raw.h index 8ffb3a8..5a12682 100644 --- a/fs/hfsplus/hfsplus_raw.h +++ b/fs/hfsplus/hfsplus_raw.h @@ -261,7 +261,7 @@ struct hfsplus_cat_folder { struct DInfo user_info; struct DXInfo finder_info; __be32 text_encoding; - u32 reserved; + __be32 subfolders; /* Subfolder count in HFSX. Reserved in HFS+. */ } __packed; /* HFS file info (stolen from hfs.h) */ @@ -301,11 +301,13 @@ struct hfsplus_cat_file { struct hfsplus_fork_raw rsrc_fork; } __packed; -/* File attribute bits */ +/* File and folder flag bits */ #define HFSPLUS_FILE_LOCKED 0x0001 #define HFSPLUS_FILE_THREAD_EXISTS 0x0002 #define HFSPLUS_XATTR_EXISTS 0x0004 #define HFSPLUS_ACL_EXISTS 0x0008 +#define HFSPLUS_HAS_FOLDER_COUNT 0x0010 /* Folder has subfolder count + * (HFSX only) */ /* HFS+ catalog thread (part of a cat_entry) */ struct hfsplus_cat_thread { diff --git a/fs/hfsplus/inode.c b/fs/hfsplus/inode.c index fa929f3..a4f45bd 100644 --- a/fs/hfsplus/inode.c +++ b/fs/hfsplus/inode.c @@ -375,6 +375,7 @@ struct inode *hfsplus_new_inode(struct super_block *sb, umode_t mode) hip->extent_state = 0; hip->flags = 0; hip->userflags = 0; + hip->subfolders = 0; memset(hip->first_extents, 0, sizeof(hfsplus_extent_rec)); memset(hip->cached_extents, 0, sizeof(hfsplus_extent_rec)); hip->alloc_blocks = 0; @@ -494,6 +495,10 @@ int hfsplus_cat_read_inode(struct inode *inode, struct hfs_find_data *fd) inode->i_ctime = hfsp_mt2ut(folder->attribute_mod_date); HFSPLUS_I(inode)->create_date = folder->create_date; HFSPLUS_I(inode)->fs_blocks = 0; + if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { + HFSPLUS_I(inode)->subfolders = + be32_to_cpu(folder->subfolders); + } inode->i_op = &hfsplus_dir_inode_operations; inode->i_fop = &hfsplus_dir_operations; } else if (type == HFSPLUS_FILE) { @@ -566,6 +571,10 @@ int hfsplus_cat_write_inode(struct inode *inode) folder->content_mod_date = hfsp_ut2mt(inode->i_mtime); folder->attribute_mod_date = hfsp_ut2mt(inode->i_ctime); folder->valence = cpu_to_be32(inode->i_size - 2); + if (folder->flags & cpu_to_be16(HFSPLUS_HAS_FOLDER_COUNT)) { + folder->subfolders = + cpu_to_be32(HFSPLUS_I(inode)->subfolders); + } hfs_bnode_write(fd.bnode, &entry, fd.entryoffset, sizeof(struct hfsplus_cat_folder)); } else if (HFSPLUS_IS_RSRC(inode)) { @@ -1884,7 +1884,7 @@ static int path_init(int dfd, const char *name, unsigned int flags, nd->path = f.file->f_path; if (flags & LOOKUP_RCU) { - if (f.need_put) + if (f.flags & FDPUT_FPUT) *fp = f.file; nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); rcu_read_lock(); diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c index ef792f2..5d8ccec 100644 --- a/fs/nfs/delegation.c +++ b/fs/nfs/delegation.c @@ -659,16 +659,19 @@ int nfs_async_inode_return_delegation(struct inode *inode, rcu_read_lock(); delegation = rcu_dereference(NFS_I(inode)->delegation); + if (delegation == NULL) + goto out_enoent; - if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) { - rcu_read_unlock(); - return -ENOENT; - } + if (!clp->cl_mvops->match_stateid(&delegation->stateid, stateid)) + goto out_enoent; nfs_mark_return_delegation(server, delegation); rcu_read_unlock(); nfs_delegation_run_state_manager(clp); return 0; +out_enoent: + rcu_read_unlock(); + return -ENOENT; } static struct inode * diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c index 12c8132..b9a35c0 100644 --- a/fs/nfs/nfs4filelayout.c +++ b/fs/nfs/nfs4filelayout.c @@ -324,8 +324,9 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data) &rdata->res.seq_res, task)) return; - nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, - rdata->args.lock_context, FMODE_READ); + if (nfs4_set_rw_stateid(&rdata->args.stateid, rdata->args.context, + rdata->args.lock_context, FMODE_READ) == -EIO) + rpc_exit(task, -EIO); /* lost lock, terminate I/O */ } static void filelayout_read_call_done(struct rpc_task *task, void *data) @@ -435,8 +436,9 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data) &wdata->res.seq_res, task)) return; - nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, - wdata->args.lock_context, FMODE_WRITE); + if (nfs4_set_rw_stateid(&wdata->args.stateid, wdata->args.context, + wdata->args.lock_context, FMODE_WRITE) == -EIO) + rpc_exit(task, -EIO); /* lost lock, terminate I/O */ } static void filelayout_write_call_done(struct rpc_task *task, void *data) diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 2da6a69..450bfed 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -2398,13 +2398,16 @@ static int _nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, if (nfs4_copy_delegation_stateid(&arg.stateid, inode, fmode)) { /* Use that stateid */ - } else if (truncate && state != NULL && nfs4_valid_open_stateid(state)) { + } else if (truncate && state != NULL) { struct nfs_lockowner lockowner = { .l_owner = current->files, .l_pid = current->tgid, }; - nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, - &lockowner); + if (!nfs4_valid_open_stateid(state)) + return -EBADF; + if (nfs4_select_rw_stateid(&arg.stateid, state, FMODE_WRITE, + &lockowner) == -EIO) + return -EBADF; } else nfs4_stateid_copy(&arg.stateid, &zero_stateid); @@ -4011,8 +4014,9 @@ static bool nfs4_stateid_is_current(nfs4_stateid *stateid, { nfs4_stateid current_stateid; - if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode)) - return false; + /* If the current stateid represents a lost lock, then exit */ + if (nfs4_set_rw_stateid(¤t_stateid, ctx, l_ctx, fmode) == -EIO) + return true; return nfs4_stateid_match(stateid, ¤t_stateid); } @@ -5828,8 +5832,7 @@ struct nfs_release_lockowner_data { struct nfs4_lock_state *lsp; struct nfs_server *server; struct nfs_release_lockowner_args args; - struct nfs4_sequence_args seq_args; - struct nfs4_sequence_res seq_res; + struct nfs_release_lockowner_res res; unsigned long timestamp; }; @@ -5837,7 +5840,7 @@ static void nfs4_release_lockowner_prepare(struct rpc_task *task, void *calldata { struct nfs_release_lockowner_data *data = calldata; nfs40_setup_sequence(data->server, - &data->seq_args, &data->seq_res, task); + &data->args.seq_args, &data->res.seq_res, task); data->timestamp = jiffies; } @@ -5846,7 +5849,7 @@ static void nfs4_release_lockowner_done(struct rpc_task *task, void *calldata) struct nfs_release_lockowner_data *data = calldata; struct nfs_server *server = data->server; - nfs40_sequence_done(task, &data->seq_res); + nfs40_sequence_done(task, &data->res.seq_res); switch (task->tk_status) { case 0: @@ -5887,7 +5890,6 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st data = kmalloc(sizeof(*data), GFP_NOFS); if (!data) return -ENOMEM; - nfs4_init_sequence(&data->seq_args, &data->seq_res, 0); data->lsp = lsp; data->server = server; data->args.lock_owner.clientid = server->nfs_client->cl_clientid; @@ -5895,6 +5897,8 @@ static int nfs4_release_lockowner(struct nfs_server *server, struct nfs4_lock_st data->args.lock_owner.s_dev = server->s_dev; msg.rpc_argp = &data->args; + msg.rpc_resp = &data->res; + nfs4_init_sequence(&data->args.seq_args, &data->res.seq_res, 0); rpc_call_async(server->client, &msg, 0, &nfs4_release_lockowner_ops, data); return 0; } diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c index e1a4721..0deb321 100644 --- a/fs/nfs/nfs4state.c +++ b/fs/nfs/nfs4state.c @@ -974,9 +974,6 @@ static int nfs4_copy_lock_stateid(nfs4_stateid *dst, else if (lsp != NULL && test_bit(NFS_LOCK_INITIALIZED, &lsp->ls_flags) != 0) { nfs4_stateid_copy(dst, &lsp->ls_stateid); ret = 0; - smp_rmb(); - if (!list_empty(&lsp->ls_seqid.list)) - ret = -EWOULDBLOCK; } spin_unlock(&state->state_lock); nfs4_put_lock_state(lsp); @@ -984,10 +981,9 @@ out: return ret; } -static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) +static void nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) { const nfs4_stateid *src; - int ret; int seq; do { @@ -996,12 +992,7 @@ static int nfs4_copy_open_stateid(nfs4_stateid *dst, struct nfs4_state *state) if (test_bit(NFS_OPEN_STATE, &state->flags)) src = &state->open_stateid; nfs4_stateid_copy(dst, src); - ret = 0; - smp_rmb(); - if (!list_empty(&state->owner->so_seqid.list)) - ret = -EWOULDBLOCK; } while (read_seqretry(&state->seqlock, seq)); - return ret; } /* @@ -1026,7 +1017,8 @@ int nfs4_select_rw_stateid(nfs4_stateid *dst, struct nfs4_state *state, * choose to use. */ goto out; - ret = nfs4_copy_open_stateid(dst, state); + nfs4_copy_open_stateid(dst, state); + ret = 0; out: if (nfs_server_capable(state->inode, NFS_CAP_STATEID_NFSV41)) dst->seqid = 0; diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 8450262bc..51632c4 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c @@ -2393,8 +2393,8 @@ out_dio: if (((file->f_flags & O_DSYNC) && !direct_io) || IS_SYNC(inode) || ((file->f_flags & O_DIRECT) && !direct_io)) { - ret = filemap_fdatawrite_range(file->f_mapping, pos, - pos + count - 1); + ret = filemap_fdatawrite_range(file->f_mapping, *ppos, + *ppos + count - 1); if (ret < 0) written = ret; @@ -2407,8 +2407,8 @@ out_dio: } if (!ret) - ret = filemap_fdatawait_range(file->f_mapping, pos, - pos + count - 1); + ret = filemap_fdatawait_range(file->f_mapping, *ppos, + *ppos + count - 1); } /* @@ -705,6 +705,10 @@ static int do_dentry_open(struct file *f, return 0; } + /* POSIX.1-2008/SUSv4 Section XSI 2.9.7 */ + if (S_ISREG(inode->i_mode)) + f->f_mode |= FMODE_ATOMIC_POS; + f->f_op = fops_get(inode->i_fop); if (unlikely(WARN_ON(!f->f_op))) { error = -ENODEV; diff --git a/fs/proc/base.c b/fs/proc/base.c index 5150706..b976062 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c @@ -1824,6 +1824,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) if (rc) goto out_mmput; + rc = -ENOENT; down_read(&mm->mmap_sem); vma = find_exact_vma(mm, vm_start, vm_end); if (vma && vma->vm_file) { diff --git a/fs/read_write.c b/fs/read_write.c index edc5746..54e19b9 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -264,10 +264,22 @@ loff_t vfs_llseek(struct file *file, loff_t offset, int whence) } EXPORT_SYMBOL(vfs_llseek); +static inline struct fd fdget_pos(int fd) +{ + return __to_fd(__fdget_pos(fd)); +} + +static inline void fdput_pos(struct fd f) +{ + if (f.flags & FDPUT_POS_UNLOCK) + mutex_unlock(&f.file->f_pos_lock); + fdput(f); +} + SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) { off_t retval; - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); if (!f.file) return -EBADF; @@ -278,7 +290,7 @@ SYSCALL_DEFINE3(lseek, unsigned int, fd, off_t, offset, unsigned int, whence) if (res != (loff_t)retval) retval = -EOVERFLOW; /* LFS: should only happen on 32 bit platforms */ } - fdput(f); + fdput_pos(f); return retval; } @@ -498,7 +510,7 @@ static inline void file_pos_write(struct file *file, loff_t pos) SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) { - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); ssize_t ret = -EBADF; if (f.file) { @@ -506,7 +518,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) ret = vfs_read(f.file, buf, count, &pos); if (ret >= 0) file_pos_write(f.file, pos); - fdput(f); + fdput_pos(f); } return ret; } @@ -514,7 +526,7 @@ SYSCALL_DEFINE3(read, unsigned int, fd, char __user *, buf, size_t, count) SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, size_t, count) { - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); ssize_t ret = -EBADF; if (f.file) { @@ -522,7 +534,7 @@ SYSCALL_DEFINE3(write, unsigned int, fd, const char __user *, buf, ret = vfs_write(f.file, buf, count, &pos); if (ret >= 0) file_pos_write(f.file, pos); - fdput(f); + fdput_pos(f); } return ret; @@ -797,7 +809,7 @@ EXPORT_SYMBOL(vfs_writev); SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); ssize_t ret = -EBADF; if (f.file) { @@ -805,7 +817,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, ret = vfs_readv(f.file, vec, vlen, &pos); if (ret >= 0) file_pos_write(f.file, pos); - fdput(f); + fdput_pos(f); } if (ret > 0) @@ -817,7 +829,7 @@ SYSCALL_DEFINE3(readv, unsigned long, fd, const struct iovec __user *, vec, SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, unsigned long, vlen) { - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); ssize_t ret = -EBADF; if (f.file) { @@ -825,7 +837,7 @@ SYSCALL_DEFINE3(writev, unsigned long, fd, const struct iovec __user *, vec, ret = vfs_writev(f.file, vec, vlen, &pos); if (ret >= 0) file_pos_write(f.file, pos); - fdput(f); + fdput_pos(f); } if (ret > 0) @@ -968,7 +980,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd, const struct compat_iovec __user *,vec, compat_ulong_t, vlen) { - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); ssize_t ret; loff_t pos; @@ -978,7 +990,7 @@ COMPAT_SYSCALL_DEFINE3(readv, compat_ulong_t, fd, ret = compat_readv(f.file, vec, vlen, &pos); if (ret >= 0) f.file->f_pos = pos; - fdput(f); + fdput_pos(f); return ret; } @@ -1035,7 +1047,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd, const struct compat_iovec __user *, vec, compat_ulong_t, vlen) { - struct fd f = fdget(fd); + struct fd f = fdget_pos(fd); ssize_t ret; loff_t pos; @@ -1045,7 +1057,7 @@ COMPAT_SYSCALL_DEFINE3(writev, compat_ulong_t, fd, ret = compat_writev(f.file, vec, vlen, &pos); if (ret >= 0) f.file->f_pos = pos; - fdput(f); + fdput_pos(f); return ret; } diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h index be85127..f27000f 100644 --- a/include/kvm/arm_vgic.h +++ b/include/kvm/arm_vgic.h @@ -171,6 +171,11 @@ static inline int kvm_vgic_set_addr(struct kvm *kvm, unsigned long type, u64 add return 0; } +static inline int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write) +{ + return -ENXIO; +} + static inline int kvm_vgic_init(struct kvm *kvm) { return 0; diff --git a/include/linux/audit.h b/include/linux/audit.h index aa865a9..ec1464d 100644 --- a/include/linux/audit.h +++ b/include/linux/audit.h @@ -43,6 +43,7 @@ struct mq_attr; struct mqstat; struct audit_watch; struct audit_tree; +struct sk_buff; struct audit_krule { int vers_ops; @@ -463,7 +464,7 @@ extern int audit_filter_user(int type); extern int audit_filter_type(int type); extern int audit_rule_change(int type, __u32 portid, int seq, void *data, size_t datasz); -extern int audit_list_rules_send(__u32 portid, int seq); +extern int audit_list_rules_send(struct sk_buff *request_skb, int seq); extern u32 audit_enabled; #else /* CONFIG_AUDIT */ diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h index 18ba8a6..2ff2e8d 100644 --- a/include/linux/blk-mq.h +++ b/include/linux/blk-mq.h @@ -121,8 +121,7 @@ void blk_mq_init_commands(struct request_queue *, void (*init)(void *data, struc void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); -void blk_mq_insert_request(struct request_queue *, struct request *, - bool, bool); +void blk_mq_insert_request(struct request *, bool, bool, bool); void blk_mq_run_queues(struct request_queue *q, bool async); void blk_mq_free_request(struct request *rq); bool blk_mq_can_queue(struct blk_mq_hw_ctx *); @@ -134,7 +133,13 @@ struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_ind struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int); -void blk_mq_end_io(struct request *rq, int error); +bool blk_mq_end_io_partial(struct request *rq, int error, + unsigned int nr_bytes); +static inline void blk_mq_end_io(struct request *rq, int error) +{ + bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq)); + BUG_ON(!done); +} void blk_mq_complete_request(struct request *rq); diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h index 092b641..4a21a87 100644 --- a/include/linux/clk/ti.h +++ b/include/linux/clk/ti.h @@ -245,6 +245,10 @@ long omap2_dpll_round_rate(struct clk_hw *hw, unsigned long target_rate, void omap2_init_clk_clkdm(struct clk_hw *clk); unsigned long omap3_clkoutx2_recalc(struct clk_hw *hw, unsigned long parent_rate); +int omap3_clkoutx2_set_rate(struct clk_hw *hw, unsigned long rate, + unsigned long parent_rate); +long omap3_clkoutx2_round_rate(struct clk_hw *hw, unsigned long rate, + unsigned long *prate); int omap2_clkops_enable_clkdm(struct clk_hw *hw); void omap2_clkops_disable_clkdm(struct clk_hw *hw); int omap2_clk_disable_autoidle_all(void); diff --git a/include/linux/file.h b/include/linux/file.h index cbacf4f..4d69123 100644 --- a/include/linux/file.h +++ b/include/linux/file.h @@ -28,33 +28,36 @@ static inline void fput_light(struct file *file, int fput_needed) struct fd { struct file *file; - int need_put; + unsigned int flags; }; +#define FDPUT_FPUT 1 +#define FDPUT_POS_UNLOCK 2 static inline void fdput(struct fd fd) { - if (fd.need_put) + if (fd.flags & FDPUT_FPUT) fput(fd.file); } extern struct file *fget(unsigned int fd); -extern struct file *fget_light(unsigned int fd, int *fput_needed); +extern struct file *fget_raw(unsigned int fd); +extern unsigned long __fdget(unsigned int fd); +extern unsigned long __fdget_raw(unsigned int fd); +extern unsigned long __fdget_pos(unsigned int fd); -static inline struct fd fdget(unsigned int fd) +static inline struct fd __to_fd(unsigned long v) { - int b; - struct file *f = fget_light(fd, &b); - return (struct fd){f,b}; + return (struct fd){(struct file *)(v & ~3),v & 3}; } -extern struct file *fget_raw(unsigned int fd); -extern struct file *fget_raw_light(unsigned int fd, int *fput_needed); +static inline struct fd fdget(unsigned int fd) +{ + return __to_fd(__fdget(fd)); +} static inline struct fd fdget_raw(unsigned int fd) { - int b; - struct file *f = fget_raw_light(fd, &b); - return (struct fd){f,b}; + return __to_fd(__fdget_raw(fd)); } extern int f_dupfd(unsigned int from, struct file *file, unsigned flags); diff --git a/include/linux/firewire.h b/include/linux/firewire.h index 5d7782e..c3683bd 100644 --- a/include/linux/firewire.h +++ b/include/linux/firewire.h @@ -200,6 +200,7 @@ struct fw_device { unsigned irmc:1; unsigned bc_implemented:2; + work_func_t workfn; struct delayed_work work; struct fw_attribute_group attribute_group; }; diff --git a/include/linux/fs.h b/include/linux/fs.h index 6082956..23b2a35 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -123,6 +123,9 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset, /* File is opened with O_PATH; almost nothing can be done with it */ #define FMODE_PATH ((__force fmode_t)0x4000) +/* File needs atomic accesses to f_pos */ +#define FMODE_ATOMIC_POS ((__force fmode_t)0x8000) + /* File was opened by fanotify and shouldn't generate fanotify events */ #define FMODE_NONOTIFY ((__force fmode_t)0x1000000) @@ -780,13 +783,14 @@ struct file { const struct file_operations *f_op; /* - * Protects f_ep_links, f_flags, f_pos vs i_size in lseek SEEK_CUR. + * Protects f_ep_links, f_flags. * Must not be taken from IRQ context. */ spinlock_t f_lock; atomic_long_t f_count; unsigned int f_flags; fmode_t f_mode; + struct mutex f_pos_lock; loff_t f_pos; struct fown_struct f_owner; const struct cred *f_cred; @@ -808,7 +812,7 @@ struct file { #ifdef CONFIG_DEBUG_WRITECOUNT unsigned long f_mnt_write_state; #endif -}; +} __attribute__((aligned(4))); /* lest something weird decides that 2 is OK */ struct file_handle { __u32 handle_bytes; diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 0437439..39b81dc 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -123,6 +123,10 @@ struct vm_area_struct; __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN | \ __GFP_NO_KSWAPD) +/* + * GFP_THISNODE does not perform any reclaim, you most likely want to + * use __GFP_THISNODE to allocate from a given node without fallback! + */ #ifdef CONFIG_NUMA #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) #else diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 5f2052c..9b61b9b 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -590,10 +590,10 @@ static inline bool zone_is_empty(struct zone *zone) /* * The NUMA zonelists are doubled because we need zonelists that restrict the - * allocations to a single node for GFP_THISNODE. + * allocations to a single node for __GFP_THISNODE. * * [0] : Zonelist with fallback - * [1] : No fallback (GFP_THISNODE) + * [1] : No fallback (__GFP_THISNODE) */ #define MAX_ZONELISTS 2 diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index b2fb167..5624e4e 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -467,9 +467,14 @@ struct nfs_lockt_res { }; struct nfs_release_lockowner_args { + struct nfs4_sequence_args seq_args; struct nfs_lowner lock_owner; }; +struct nfs_release_lockowner_res { + struct nfs4_sequence_res seq_res; +}; + struct nfs4_delegreturnargs { struct nfs4_sequence_args seq_args; const struct nfs_fh *fhandle; diff --git a/include/linux/slab.h b/include/linux/slab.h index 9260abd..b5b2df6 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -410,7 +410,7 @@ static __always_inline void *kmalloc_large(size_t size, gfp_t flags) * * %GFP_NOWAIT - Allocation will not sleep. * - * %GFP_THISNODE - Allocate node-local memory only. + * %__GFP_THISNODE - Allocate node-local memory only. * * %GFP_DMA - Allocation suitable for DMA. * Should only be used for kmalloc() caches. Otherwise, use a diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h index accc497..7159a0a 100644 --- a/include/linux/tracepoint.h +++ b/include/linux/tracepoint.h @@ -60,6 +60,12 @@ struct tp_module { unsigned int num_tracepoints; struct tracepoint * const *tracepoints_ptrs; }; +bool trace_module_has_bad_taint(struct module *mod); +#else +static inline bool trace_module_has_bad_taint(struct module *mod) +{ + return false; +} #endif /* CONFIG_MODULES */ struct tracepoint_iter { diff --git a/include/net/sock.h b/include/net/sock.h index 9678569..625e65b 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -1488,6 +1488,11 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb) */ #define sock_owned_by_user(sk) ((sk)->sk_lock.owned) +static inline void sock_release_ownership(struct sock *sk) +{ + sk->sk_lock.owned = 0; +} + /* * Macro so as to not evaluate some arguments when * lockdep is not enabled. @@ -2186,7 +2191,6 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk, { #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL) | \ (1UL << SOCK_RCVTSTAMP) | \ - (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE) | \ (1UL << SOCK_TIMESTAMPING_SOFTWARE) | \ (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE) | \ (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE)) diff --git a/include/target/iscsi/iscsi_transport.h b/include/target/iscsi/iscsi_transport.h index ae5a171..4483fad 100644 --- a/include/target/iscsi/iscsi_transport.h +++ b/include/target/iscsi/iscsi_transport.h @@ -12,6 +12,7 @@ struct iscsit_transport { int (*iscsit_setup_np)(struct iscsi_np *, struct __kernel_sockaddr_storage *); int (*iscsit_accept_np)(struct iscsi_np *, struct iscsi_conn *); void (*iscsit_free_np)(struct iscsi_np *); + void (*iscsit_wait_conn)(struct iscsi_conn *); void (*iscsit_free_conn)(struct iscsi_conn *); int (*iscsit_get_login_rx)(struct iscsi_conn *, struct iscsi_login *); int (*iscsit_put_login_tx)(struct iscsi_conn *, struct iscsi_login *, u32); diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h index ddc179b..1fef3e6 100644 --- a/include/trace/events/sunrpc.h +++ b/include/trace/events/sunrpc.h @@ -83,7 +83,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, ), TP_fast_assign( - __entry->client_id = clnt->cl_clid; + __entry->client_id = clnt ? clnt->cl_clid : -1; __entry->task_id = task->tk_pid; __entry->action = action; __entry->runstate = task->tk_runstate; @@ -91,7 +91,7 @@ DECLARE_EVENT_CLASS(rpc_task_running, __entry->flags = task->tk_flags; ), - TP_printk("task:%u@%u flags=%4.4x state=%4.4lx status=%d action=%pf", + TP_printk("task:%u@%d flags=%4.4x state=%4.4lx status=%d action=%pf", __entry->task_id, __entry->client_id, __entry->flags, __entry->runstate, diff --git a/init/main.c b/init/main.c index eb03090..9c7fd4c 100644 --- a/init/main.c +++ b/init/main.c @@ -561,7 +561,6 @@ asmlinkage void __init start_kernel(void) init_timers(); hrtimers_init(); softirq_init(); - acpi_early_init(); timekeeping_init(); time_init(); sched_clock_postinit(); @@ -613,6 +612,7 @@ asmlinkage void __init start_kernel(void) calibrate_delay(); pidmap_init(); anon_vma_init(); + acpi_early_init(); #ifdef CONFIG_X86 if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_enter_virtual_mode(); diff --git a/kernel/audit.c b/kernel/audit.c index 34c5a23..3392d3e 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -182,7 +182,7 @@ struct audit_buffer { struct audit_reply { __u32 portid; - pid_t pid; + struct net *net; struct sk_buff *skb; }; @@ -500,7 +500,7 @@ int audit_send_list(void *_dest) { struct audit_netlink_list *dest = _dest; struct sk_buff *skb; - struct net *net = get_net_ns_by_pid(dest->pid); + struct net *net = dest->net; struct audit_net *aunet = net_generic(net, audit_net_id); /* wait for parent to finish and send an ACK */ @@ -510,6 +510,7 @@ int audit_send_list(void *_dest) while ((skb = __skb_dequeue(&dest->q)) != NULL) netlink_unicast(aunet->nlsk, skb, dest->portid, 0); + put_net(net); kfree(dest); return 0; @@ -543,7 +544,7 @@ out_kfree_skb: static int audit_send_reply_thread(void *arg) { struct audit_reply *reply = (struct audit_reply *)arg; - struct net *net = get_net_ns_by_pid(reply->pid); + struct net *net = reply->net; struct audit_net *aunet = net_generic(net, audit_net_id); mutex_lock(&audit_cmd_mutex); @@ -552,12 +553,13 @@ static int audit_send_reply_thread(void *arg) /* Ignore failure. It'll only happen if the sender goes away, because our timeout is set to infinite. */ netlink_unicast(aunet->nlsk , reply->skb, reply->portid, 0); + put_net(net); kfree(reply); return 0; } /** * audit_send_reply - send an audit reply message via netlink - * @portid: netlink port to which to send reply + * @request_skb: skb of request we are replying to (used to target the reply) * @seq: sequence number * @type: audit message type * @done: done (last) flag @@ -568,9 +570,11 @@ static int audit_send_reply_thread(void *arg) * Allocates an skb, builds the netlink message, and sends it to the port id. * No failure notifications. */ -static void audit_send_reply(__u32 portid, int seq, int type, int done, +static void audit_send_reply(struct sk_buff *request_skb, int seq, int type, int done, int multi, const void *payload, int size) { + u32 portid = NETLINK_CB(request_skb).portid; + struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct sk_buff *skb; struct task_struct *tsk; struct audit_reply *reply = kmalloc(sizeof(struct audit_reply), @@ -583,8 +587,8 @@ static void audit_send_reply(__u32 portid, int seq, int type, int done, if (!skb) goto out; + reply->net = get_net(net); reply->portid = portid; - reply->pid = task_pid_vnr(current); reply->skb = skb; tsk = kthread_run(audit_send_reply_thread, reply, "audit_send_reply"); @@ -673,8 +677,7 @@ static int audit_get_feature(struct sk_buff *skb) seq = nlmsg_hdr(skb)->nlmsg_seq; - audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, - &af, sizeof(af)); + audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &af, sizeof(af)); return 0; } @@ -794,8 +797,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) s.backlog = skb_queue_len(&audit_skb_queue); s.version = AUDIT_VERSION_LATEST; s.backlog_wait_time = audit_backlog_wait_time; - audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0, - &s, sizeof(s)); + audit_send_reply(skb, seq, AUDIT_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_SET: { @@ -905,7 +907,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) seq, data, nlmsg_len(nlh)); break; case AUDIT_LIST_RULES: - err = audit_list_rules_send(NETLINK_CB(skb).portid, seq); + err = audit_list_rules_send(skb, seq); break; case AUDIT_TRIM: audit_trim_trees(); @@ -970,8 +972,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) memcpy(sig_data->ctx, ctx, len); security_release_secctx(ctx, len); } - audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_SIGNAL_INFO, - 0, 0, sig_data, sizeof(*sig_data) + len); + audit_send_reply(skb, seq, AUDIT_SIGNAL_INFO, 0, 0, + sig_data, sizeof(*sig_data) + len); kfree(sig_data); break; case AUDIT_TTY_GET: { @@ -983,8 +985,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh) s.log_passwd = tsk->signal->audit_tty_log_passwd; spin_unlock(&tsk->sighand->siglock); - audit_send_reply(NETLINK_CB(skb).portid, seq, - AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); + audit_send_reply(skb, seq, AUDIT_TTY_GET, 0, 0, &s, sizeof(s)); break; } case AUDIT_TTY_SET: { diff --git a/kernel/audit.h b/kernel/audit.h index 57cc64d..8df13221 100644 --- a/kernel/audit.h +++ b/kernel/audit.h @@ -247,7 +247,7 @@ extern void audit_panic(const char *message); struct audit_netlink_list { __u32 portid; - pid_t pid; + struct net *net; struct sk_buff_head q; }; diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c index 14a78cc..92062fd 100644 --- a/kernel/auditfilter.c +++ b/kernel/auditfilter.c @@ -29,6 +29,8 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/security.h> +#include <net/net_namespace.h> +#include <net/sock.h> #include "audit.h" /* @@ -1065,11 +1067,13 @@ int audit_rule_change(int type, __u32 portid, int seq, void *data, /** * audit_list_rules_send - list the audit rules - * @portid: target portid for netlink audit messages + * @request_skb: skb of request we are replying to (used to target the reply) * @seq: netlink audit message sequence (serial) number */ -int audit_list_rules_send(__u32 portid, int seq) +int audit_list_rules_send(struct sk_buff *request_skb, int seq) { + u32 portid = NETLINK_CB(request_skb).portid; + struct net *net = sock_net(NETLINK_CB(request_skb).sk); struct task_struct *tsk; struct audit_netlink_list *dest; int err = 0; @@ -1083,8 +1087,8 @@ int audit_list_rules_send(__u32 portid, int seq) dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); if (!dest) return -ENOMEM; + dest->net = get_net(net); dest->portid = portid; - dest->pid = task_pid_vnr(current); skb_queue_head_init(&dest->q); mutex_lock(&audit_filter_mutex); diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 4410ac6..e6b1b66 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -974,12 +974,6 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Temporarilly set tasks mems_allowed to target nodes of migration, * so that the migration code can allocate pages on these nodes. * - * Call holding cpuset_mutex, so current's cpuset won't change - * during this call, as manage_mutex holds off any cpuset_attach() - * calls. Therefore we don't need to take task_lock around the - * call to guarantee_online_mems(), as we know no one is changing - * our task's cpuset. - * * While the mm_struct we are migrating is typically from some * other task, the task_struct mems_allowed that we are hacking * is for our current task, which must allocate new pages for that @@ -996,8 +990,10 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); + rcu_read_lock(); mems_cs = effective_nodemask_cpuset(task_cs(tsk)); guarantee_online_mems(mems_cs, &tsk->mems_allowed); + rcu_read_unlock(); } /* @@ -2486,9 +2482,9 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) task_lock(current); cs = nearest_hardwall_ancestor(task_cs(current)); + allowed = node_isset(node, cs->mems_allowed); task_unlock(current); - allowed = node_isset(node, cs->mems_allowed); mutex_unlock(&callback_mutex); return allowed; } diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index cf68bb3..f140337 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c @@ -10,6 +10,7 @@ #include <linux/mutex.h> #include <linux/of.h> #include <linux/of_address.h> +#include <linux/of_irq.h> #include <linux/topology.h> #include <linux/seq_file.h> #include <linux/slab.h> diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 481a13c..d3bf660 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -802,8 +802,7 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc, static void wake_threads_waitq(struct irq_desc *desc) { - if (atomic_dec_and_test(&desc->threads_active) && - waitqueue_active(&desc->wait_for_threads)) + if (atomic_dec_and_test(&desc->threads_active)) wake_up(&desc->wait_for_threads); } diff --git a/kernel/profile.c b/kernel/profile.c index 6631e1e..ebdd9c1 100644 --- a/kernel/profile.c +++ b/kernel/profile.c @@ -549,14 +549,14 @@ static int create_hash_tables(void) struct page *page; page = alloc_pages_exact_node(node, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; per_cpu(cpu_profile_hits, cpu)[1] = (struct profile_hit *)page_address(page); page = alloc_pages_exact_node(node, - GFP_KERNEL | __GFP_ZERO | GFP_THISNODE, + GFP_KERNEL | __GFP_ZERO | __GFP_THISNODE, 0); if (!page) goto out_cleanup; diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index e71ffd4..f3989ce 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c @@ -1777,6 +1777,16 @@ static void trace_module_add_events(struct module *mod) { struct ftrace_event_call **call, **start, **end; + if (!mod->num_trace_events) + return; + + /* Don't add infrastructure for mods without tracepoints */ + if (trace_module_has_bad_taint(mod)) { + pr_err("%s: module has bad taint, not creating trace events\n", + mod->name); + return; + } + start = mod->trace_events; end = mod->trace_events + mod->num_trace_events; diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c index 29f2654..031cc56 100644 --- a/kernel/tracepoint.c +++ b/kernel/tracepoint.c @@ -631,6 +631,11 @@ void tracepoint_iter_reset(struct tracepoint_iter *iter) EXPORT_SYMBOL_GPL(tracepoint_iter_reset); #ifdef CONFIG_MODULES +bool trace_module_has_bad_taint(struct module *mod) +{ + return mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP)); +} + static int tracepoint_module_coming(struct module *mod) { struct tp_module *tp_mod, *iter; @@ -641,7 +646,7 @@ static int tracepoint_module_coming(struct module *mod) * module headers (for forced load), to make sure we don't cause a crash. * Staging and out-of-tree GPL modules are fine. */ - if (mod->taints & ~((1 << TAINT_OOT_MODULE) | (1 << TAINT_CRAP))) + if (trace_module_has_bad_taint(mod)) return 0; mutex_lock(&tracepoints_mutex); tp_mod = kmalloc(sizeof(struct tp_module), GFP_KERNEL); @@ -575,5 +575,5 @@ config PGTABLE_MAPPING then you should select this. This causes zsmalloc to use page table mapping rather than copying for object mapping. - You can check speed with zsmalloc benchmark[1]. - [1] https://github.com/spartacus06/zsmalloc + You can check speed with zsmalloc benchmark: + https://github.com/spartacus06/zsmapbench diff --git a/mm/compaction.c b/mm/compaction.c index b48c525..9185775 100644 --- a/mm/compaction.c +++ b/mm/compaction.c @@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, { int nr_scanned = 0, total_isolated = 0; struct page *cursor, *valid_page = NULL; - unsigned long nr_strict_required = end_pfn - blockpfn; unsigned long flags; bool locked = false; @@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, nr_scanned++; if (!pfn_valid_within(blockpfn)) - continue; + goto isolate_fail; + if (!valid_page) valid_page = page; if (!PageBuddy(page)) - continue; + goto isolate_fail; /* * The zone lock must be held to isolate freepages. @@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, /* Recheck this is a buddy page under lock */ if (!PageBuddy(page)) - continue; + goto isolate_fail; /* Found a free page, break it into order-0 pages */ isolated = split_free_page(page); - if (!isolated && strict) - break; total_isolated += isolated; for (i = 0; i < isolated; i++) { list_add(&page->lru, freelist); @@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, if (isolated) { blockpfn += isolated - 1; cursor += isolated - 1; + continue; } + +isolate_fail: + if (strict) + break; + else + continue; + } trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); @@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, * pages requested were isolated. If there were any failures, 0 is * returned and CMA will fail. */ - if (strict && nr_strict_required > total_isolated) + if (strict && blockpfn < end_pfn) total_isolated = 0; if (locked) diff --git a/mm/migrate.c b/mm/migrate.c index 482a33d..b494fdb 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1158,7 +1158,7 @@ static struct page *new_page_node(struct page *p, unsigned long private, pm->node); else return alloc_pages_exact_node(pm->node, - GFP_HIGHUSER_MOVABLE | GFP_THISNODE, 0); + GFP_HIGHUSER_MOVABLE | __GFP_THISNODE, 0); } /* @@ -1544,9 +1544,9 @@ static struct page *alloc_misplaced_dst_page(struct page *page, struct page *newpage; newpage = alloc_pages_exact_node(nid, - (GFP_HIGHUSER_MOVABLE | GFP_THISNODE | - __GFP_NOMEMALLOC | __GFP_NORETRY | - __GFP_NOWARN) & + (GFP_HIGHUSER_MOVABLE | + __GFP_THISNODE | __GFP_NOMEMALLOC | + __GFP_NORETRY | __GFP_NOWARN) & ~GFP_IOFS, 0); return newpage; @@ -1747,7 +1747,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, goto out_dropref; new_page = alloc_pages_node(node, - (GFP_TRANSHUGE | GFP_THISNODE) & ~__GFP_WAIT, HPAGE_PMD_ORDER); + (GFP_TRANSHUGE | __GFP_THISNODE) & ~__GFP_WAIT, + HPAGE_PMD_ORDER); if (!new_page) goto out_fail; diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index 566adbf..b38e715 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -538,6 +538,9 @@ static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev struct vlan_dev_priv *vlan = vlan_dev_priv(dev); struct net_device *real_dev = vlan->real_dev; + if (saddr == NULL) + saddr = dev->dev_addr; + return dev_hard_header(skb, real_dev, type, daddr, saddr, len); } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index c97c3c8..7b757b5 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -1127,9 +1127,10 @@ static void br_multicast_query_received(struct net_bridge *br, struct net_bridge_port *port, struct bridge_mcast_querier *querier, int saddr, + bool is_general_query, unsigned long max_delay) { - if (saddr) + if (saddr && is_general_query) br_multicast_update_querier_timer(br, querier, max_delay); else if (timer_pending(&querier->timer)) return; @@ -1181,8 +1182,16 @@ static int br_ip4_multicast_query(struct net_bridge *br, IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; } + /* RFC2236+RFC3376 (IGMPv2+IGMPv3) require the multicast link layer + * all-systems destination addresses (224.0.0.1) for general queries + */ + if (!group && iph->daddr != htonl(INADDR_ALLHOSTS_GROUP)) { + err = -EINVAL; + goto out; + } + br_multicast_query_received(br, port, &br->ip4_querier, !!iph->saddr, - max_delay); + !group, max_delay); if (!group) goto out; @@ -1228,6 +1237,7 @@ static int br_ip6_multicast_query(struct net_bridge *br, unsigned long max_delay; unsigned long now = jiffies; const struct in6_addr *group = NULL; + bool is_general_query; int err = 0; spin_lock(&br->multicast_lock); @@ -1235,6 +1245,12 @@ static int br_ip6_multicast_query(struct net_bridge *br, (port && port->state == BR_STATE_DISABLED)) goto out; + /* RFC2710+RFC3810 (MLDv1+MLDv2) require link-local source addresses */ + if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_LINKLOCAL)) { + err = -EINVAL; + goto out; + } + if (skb->len == sizeof(*mld)) { if (!pskb_may_pull(skb, sizeof(*mld))) { err = -EINVAL; @@ -1256,8 +1272,19 @@ static int br_ip6_multicast_query(struct net_bridge *br, max_delay = max(msecs_to_jiffies(mldv2_mrc(mld2q)), 1UL); } + is_general_query = group && ipv6_addr_any(group); + + /* RFC2710+RFC3810 (MLDv1+MLDv2) require the multicast link layer + * all-nodes destination address (ff02::1) for general queries + */ + if (is_general_query && !ipv6_addr_is_ll_all_nodes(&ip6h->daddr)) { + err = -EINVAL; + goto out; + } + br_multicast_query_received(br, port, &br->ip6_querier, - !ipv6_addr_any(&ip6h->saddr), max_delay); + !ipv6_addr_any(&ip6h->saddr), + is_general_query, max_delay); if (!group) goto out; diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 8b52dfc..3f14c63 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -2838,81 +2838,84 @@ EXPORT_SYMBOL_GPL(skb_pull_rcsum); /** * skb_segment - Perform protocol segmentation on skb. - * @skb: buffer to segment + * @head_skb: buffer to segment * @features: features for the output path (see dev->features) * * This function performs segmentation on the given skb. It returns * a pointer to the first in a list of new skbs for the segments. * In case of error it returns ERR_PTR(err). */ -struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) +struct sk_buff *skb_segment(struct sk_buff *head_skb, + netdev_features_t features) { struct sk_buff *segs = NULL; struct sk_buff *tail = NULL; - struct sk_buff *fskb = skb_shinfo(skb)->frag_list; - skb_frag_t *skb_frag = skb_shinfo(skb)->frags; - unsigned int mss = skb_shinfo(skb)->gso_size; - unsigned int doffset = skb->data - skb_mac_header(skb); + struct sk_buff *list_skb = skb_shinfo(head_skb)->frag_list; + skb_frag_t *frag = skb_shinfo(head_skb)->frags; + unsigned int mss = skb_shinfo(head_skb)->gso_size; + unsigned int doffset = head_skb->data - skb_mac_header(head_skb); + struct sk_buff *frag_skb = head_skb; unsigned int offset = doffset; - unsigned int tnl_hlen = skb_tnl_header_len(skb); + unsigned int tnl_hlen = skb_tnl_header_len(head_skb); unsigned int headroom; unsigned int len; __be16 proto; bool csum; int sg = !!(features & NETIF_F_SG); - int nfrags = skb_shinfo(skb)->nr_frags; + int nfrags = skb_shinfo(head_skb)->nr_frags; int err = -ENOMEM; int i = 0; int pos; - proto = skb_network_protocol(skb); + proto = skb_network_protocol(head_skb); if (unlikely(!proto)) return ERR_PTR(-EINVAL); csum = !!can_checksum_protocol(features, proto); - __skb_push(skb, doffset); - headroom = skb_headroom(skb); - pos = skb_headlen(skb); + __skb_push(head_skb, doffset); + headroom = skb_headroom(head_skb); + pos = skb_headlen(head_skb); do { struct sk_buff *nskb; - skb_frag_t *frag; + skb_frag_t *nskb_frag; int hsize; int size; - len = skb->len - offset; + len = head_skb->len - offset; if (len > mss) len = mss; - hsize = skb_headlen(skb) - offset; + hsize = skb_headlen(head_skb) - offset; if (hsize < 0) hsize = 0; if (hsize > len || !sg) hsize = len; - if (!hsize && i >= nfrags && skb_headlen(fskb) && - (skb_headlen(fskb) == len || sg)) { - BUG_ON(skb_headlen(fskb) > len); + if (!hsize && i >= nfrags && skb_headlen(list_skb) && + (skb_headlen(list_skb) == len || sg)) { + BUG_ON(skb_headlen(list_skb) > len); i = 0; - nfrags = skb_shinfo(fskb)->nr_frags; - skb_frag = skb_shinfo(fskb)->frags; - pos += skb_headlen(fskb); + nfrags = skb_shinfo(list_skb)->nr_frags; + frag = skb_shinfo(list_skb)->frags; + frag_skb = list_skb; + pos += skb_headlen(list_skb); while (pos < offset + len) { BUG_ON(i >= nfrags); - size = skb_frag_size(skb_frag); + size = skb_frag_size(frag); if (pos + size > offset + len) break; i++; pos += size; - skb_frag++; + frag++; } - nskb = skb_clone(fskb, GFP_ATOMIC); - fskb = fskb->next; + nskb = skb_clone(list_skb, GFP_ATOMIC); + list_skb = list_skb->next; if (unlikely(!nskb)) goto err; @@ -2933,7 +2936,7 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) __skb_push(nskb, doffset); } else { nskb = __alloc_skb(hsize + doffset + headroom, - GFP_ATOMIC, skb_alloc_rx_flag(skb), + GFP_ATOMIC, skb_alloc_rx_flag(head_skb), NUMA_NO_NODE); if (unlikely(!nskb)) @@ -2949,12 +2952,12 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) segs = nskb; tail = nskb; - __copy_skb_header(nskb, skb); - nskb->mac_len = skb->mac_len; + __copy_skb_header(nskb, head_skb); + nskb->mac_len = head_skb->mac_len; skb_headers_offset_update(nskb, skb_headroom(nskb) - headroom); - skb_copy_from_linear_data_offset(skb, -tnl_hlen, + skb_copy_from_linear_data_offset(head_skb, -tnl_hlen, nskb->data - tnl_hlen, doffset + tnl_hlen); @@ -2963,30 +2966,32 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) if (!sg) { nskb->ip_summed = CHECKSUM_NONE; - nskb->csum = skb_copy_and_csum_bits(skb, offset, + nskb->csum = skb_copy_and_csum_bits(head_skb, offset, skb_put(nskb, len), len, 0); continue; } - frag = skb_shinfo(nskb)->frags; + nskb_frag = skb_shinfo(nskb)->frags; - skb_copy_from_linear_data_offset(skb, offset, + skb_copy_from_linear_data_offset(head_skb, offset, skb_put(nskb, hsize), hsize); - skb_shinfo(nskb)->tx_flags = skb_shinfo(skb)->tx_flags & SKBTX_SHARED_FRAG; + skb_shinfo(nskb)->tx_flags = skb_shinfo(head_skb)->tx_flags & + SKBTX_SHARED_FRAG; while (pos < offset + len) { if (i >= nfrags) { - BUG_ON(skb_headlen(fskb)); + BUG_ON(skb_headlen(list_skb)); i = 0; - nfrags = skb_shinfo(fskb)->nr_frags; - skb_frag = skb_shinfo(fskb)->frags; + nfrags = skb_shinfo(list_skb)->nr_frags; + frag = skb_shinfo(list_skb)->frags; + frag_skb = list_skb; BUG_ON(!nfrags); - fskb = fskb->next; + list_skb = list_skb->next; } if (unlikely(skb_shinfo(nskb)->nr_frags >= @@ -2997,27 +3002,30 @@ struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features) goto err; } - *frag = *skb_frag; - __skb_frag_ref(frag); - size = skb_frag_size(frag); + if (unlikely(skb_orphan_frags(frag_skb, GFP_ATOMIC))) + goto err; + + *nskb_frag = *frag; + __skb_frag_ref(nskb_frag); + size = skb_frag_size(nskb_frag); if (pos < offset) { - frag->page_offset += offset - pos; - skb_frag_size_sub(frag, offset - pos); + nskb_frag->page_offset += offset - pos; + skb_frag_size_sub(nskb_frag, offset - pos); } skb_shinfo(nskb)->nr_frags++; if (pos + size <= offset + len) { i++; - skb_frag++; + frag++; pos += size; } else { - skb_frag_size_sub(frag, pos + size - (offset + len)); + skb_frag_size_sub(nskb_frag, pos + size - (offset + len)); goto skip_fraglist; } - frag++; + nskb_frag++; } skip_fraglist: @@ -3031,7 +3039,7 @@ perform_csum_check: nskb->len - doffset, 0); nskb->ip_summed = CHECKSUM_NONE; } - } while ((offset += len) < skb->len); + } while ((offset += len) < head_skb->len); return segs; diff --git a/net/core/sock.c b/net/core/sock.c index 5b6a943..c0fc6bd 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -2357,10 +2357,13 @@ void release_sock(struct sock *sk) if (sk->sk_backlog.tail) __release_sock(sk); + /* Warning : release_cb() might need to release sk ownership, + * ie call sock_release_ownership(sk) before us. + */ if (sk->sk_prot->release_cb) sk->sk_prot->release_cb(sk); - sk->sk_lock.owned = 0; + sock_release_ownership(sk); if (waitqueue_active(&sk->sk_lock.wq)) wake_up(&sk->sk_lock.wq); spin_unlock_bh(&sk->sk_lock.slock); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index bb075fc..3b01959 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -208,7 +208,7 @@ int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force) } work = frag_mem_limit(nf) - nf->low_thresh; - while (work > 0) { + while (work > 0 || force) { spin_lock(&nf->lru_lock); if (list_empty(&nf->lru_list)) { @@ -278,9 +278,10 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf, atomic_inc(&qp->refcnt); hlist_add_head(&qp->list, &hb->chain); + inet_frag_lru_add(nf, qp); spin_unlock(&hb->chain_lock); read_unlock(&f->lock); - inet_frag_lru_add(nf, qp); + return qp; } diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 5a163de..e0a7b33 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -780,6 +780,17 @@ void tcp_release_cb(struct sock *sk) if (flags & (1UL << TCP_TSQ_DEFERRED)) tcp_tsq_handler(sk); + /* Here begins the tricky part : + * We are called from release_sock() with : + * 1) BH disabled + * 2) sk_lock.slock spinlock held + * 3) socket owned by us (sk->sk_lock.owned == 1) + * + * But following code is meant to be called from BH handlers, + * so we should keep BH disabled, but early release socket ownership + */ + sock_release_ownership(sk); + if (flags & (1UL << TCP_WRITE_TIMER_DEFERRED)) { tcp_write_timer_handler(sk); __sock_put(sk); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index fdbfeca..344e972 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -1103,8 +1103,11 @@ retry: * Lifetime is greater than REGEN_ADVANCE time units. In particular, * an implementation must not create a temporary address with a zero * Preferred Lifetime. + * Use age calculation as in addrconf_verify to avoid unnecessary + * temporary addresses being generated. */ - if (tmp_prefered_lft <= regen_advance) { + age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ; + if (tmp_prefered_lft <= regen_advance + age) { in6_ifa_put(ifp); in6_dev_put(idev); ret = -1; diff --git a/net/ipv6/exthdrs_offload.c b/net/ipv6/exthdrs_offload.c index cf77f3a..447a7fb 100644 --- a/net/ipv6/exthdrs_offload.c +++ b/net/ipv6/exthdrs_offload.c @@ -25,11 +25,11 @@ int __init ipv6_exthdrs_offload_init(void) int ret; ret = inet6_add_offload(&rthdr_offload, IPPROTO_ROUTING); - if (!ret) + if (ret) goto out; ret = inet6_add_offload(&dstopt_offload, IPPROTO_DSTOPTS); - if (!ret) + if (ret) goto out_rt; out: diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 11dac21..fba54a4 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -1513,7 +1513,7 @@ int ip6_route_add(struct fib6_config *cfg) if (!table) goto out; - rt = ip6_dst_alloc(net, NULL, DST_NOCOUNT, table); + rt = ip6_dst_alloc(net, NULL, (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT, table); if (!rt) { err = -ENOMEM; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index dcab9dd..ab48d41 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -112,7 +112,6 @@ struct l2tp_net { spinlock_t l2tp_session_hlist_lock; }; -static void l2tp_session_set_header_len(struct l2tp_session *session, int version); static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); static inline struct l2tp_tunnel *l2tp_tunnel(struct sock *sk) @@ -1841,7 +1840,7 @@ EXPORT_SYMBOL_GPL(l2tp_session_delete); /* We come here whenever a session's send_seq, cookie_len or * l2specific_len parameters are set. */ -static void l2tp_session_set_header_len(struct l2tp_session *session, int version) +void l2tp_session_set_header_len(struct l2tp_session *session, int version) { if (version == L2TP_HDR_VER_2) { session->hdr_len = 6; @@ -1854,6 +1853,7 @@ static void l2tp_session_set_header_len(struct l2tp_session *session, int versio } } +EXPORT_SYMBOL_GPL(l2tp_session_set_header_len); struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 1f01ba3..3f93ccd 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -263,6 +263,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, int length, int (*payload_hook)(struct sk_buff *skb)); int l2tp_session_queue_purge(struct l2tp_session *session); int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb); +void l2tp_session_set_header_len(struct l2tp_session *session, int version); int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index 4cfd722..bd7387a 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -578,8 +578,10 @@ static int l2tp_nl_cmd_session_modify(struct sk_buff *skb, struct genl_info *inf if (info->attrs[L2TP_ATTR_RECV_SEQ]) session->recv_seq = nla_get_u8(info->attrs[L2TP_ATTR_RECV_SEQ]); - if (info->attrs[L2TP_ATTR_SEND_SEQ]) + if (info->attrs[L2TP_ATTR_SEND_SEQ]) { session->send_seq = nla_get_u8(info->attrs[L2TP_ATTR_SEND_SEQ]); + l2tp_session_set_header_len(session, session->tunnel->version); + } if (info->attrs[L2TP_ATTR_LNS_MODE]) session->lns_mode = nla_get_u8(info->attrs[L2TP_ATTR_LNS_MODE]); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index ec40bc3..d276e2d 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -254,12 +254,14 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int po = pppox_sk(sk); ppp_input(&po->chan, skb); } else { - l2tp_info(session, PPPOL2TP_MSG_DATA, "%s: socket not bound\n", - session->name); + l2tp_dbg(session, PPPOL2TP_MSG_DATA, + "%s: recv %d byte data frame, passing to L2TP socket\n", + session->name, data_len); - /* Not bound. Nothing we can do, so discard. */ - atomic_long_inc(&session->stats.rx_errors); - kfree_skb(skb); + if (sock_queue_rcv_skb(sk, skb) < 0) { + atomic_long_inc(&session->stats.rx_errors); + kfree_skb(skb); + } } return; @@ -1309,6 +1311,7 @@ static int pppol2tp_session_setsockopt(struct sock *sk, po->chan.hdrlen = val ? PPPOL2TP_L2TP_HDR_SIZE_SEQ : PPPOL2TP_L2TP_HDR_SIZE_NOSEQ; } + l2tp_session_set_header_len(session, session->tunnel->version); l2tp_info(session, PPPOL2TP_MSG_CONTROL, "%s: set send_seq=%d\n", session->name, session->send_seq); diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 42c6592..bd1fd8e 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -100,6 +100,12 @@ ieee80211_get_chanctx_max_required_bw(struct ieee80211_local *local, } max_bw = max(max_bw, width); } + + /* use the configured bandwidth in case of monitor interface */ + sdata = rcu_dereference(local->monitor_sdata); + if (sdata && rcu_access_pointer(sdata->vif.chanctx_conf) == conf) + max_bw = max(max_bw, conf->def.width); + rcu_read_unlock(); return max_bw; diff --git a/net/mac80211/mesh_ps.c b/net/mac80211/mesh_ps.c index 2802f9d..ad8b377 100644 --- a/net/mac80211/mesh_ps.c +++ b/net/mac80211/mesh_ps.c @@ -36,6 +36,7 @@ static struct sk_buff *mps_qos_null_get(struct sta_info *sta) sdata->vif.addr); nullfunc->frame_control = fc; nullfunc->duration_id = 0; + nullfunc->seq_ctrl = 0; /* no address resolution for this frame -> set addr 1 immediately */ memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); memset(skb_put(skb, 2), 0, 2); /* append QoS control field */ diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a023b43..137a192 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -1206,6 +1206,7 @@ static void ieee80211_send_null_response(struct ieee80211_sub_if_data *sdata, memcpy(nullfunc->addr1, sta->sta.addr, ETH_ALEN); memcpy(nullfunc->addr2, sdata->vif.addr, ETH_ALEN); memcpy(nullfunc->addr3, sdata->vif.addr, ETH_ALEN); + nullfunc->seq_ctrl = 0; skb->priority = tid; skb_set_queue_mapping(skb, ieee802_1d_to_ac[tid]); diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c index 0a99d7c..a0b84e0 100644 --- a/net/sched/sch_api.c +++ b/net/sched/sch_api.c @@ -273,11 +273,12 @@ static struct Qdisc *qdisc_match_from_root(struct Qdisc *root, u32 handle) void qdisc_list_add(struct Qdisc *q) { - struct Qdisc *root = qdisc_dev(q)->qdisc; + if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) { + struct Qdisc *root = qdisc_dev(q)->qdisc; - WARN_ON_ONCE(root == &noop_qdisc); - if ((q->parent != TC_H_ROOT) && !(q->flags & TCQ_F_INGRESS)) + WARN_ON_ONCE(root == &noop_qdisc); list_add_tail(&q->list, &root->list); + } } EXPORT_SYMBOL(qdisc_list_add); diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c index 632090b..3a1767e 100644 --- a/net/sctp/sm_make_chunk.c +++ b/net/sctp/sm_make_chunk.c @@ -1421,8 +1421,8 @@ static void sctp_chunk_destroy(struct sctp_chunk *chunk) BUG_ON(!list_empty(&chunk->list)); list_del_init(&chunk->transmitted_list); - /* Free the chunk skb data and the SCTP_chunk stub itself. */ - dev_kfree_skb(chunk->skb); + consume_skb(chunk->skb); + consume_skb(chunk->auth_chunk); SCTP_DBG_OBJCNT_DEC(chunk); kmem_cache_free(sctp_chunk_cachep, chunk); diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index ae65b6b..01e0024 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -760,7 +760,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, /* Make sure that we and the peer are AUTH capable */ if (!net->sctp.auth_enable || !new_asoc->peer.auth_capable) { - kfree_skb(chunk->auth_chunk); sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); } @@ -775,10 +774,6 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net, auth.transport = chunk->transport; ret = sctp_sf_authenticate(net, ep, new_asoc, type, &auth); - - /* We can now safely free the auth_chunk clone */ - kfree_skb(chunk->auth_chunk); - if (ret != SCTP_IERROR_NO_ERROR) { sctp_association_free(new_asoc); return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands); diff --git a/net/socket.c b/net/socket.c index 840cffb..f25eaa3 100644 --- a/net/socket.c +++ b/net/socket.c @@ -450,16 +450,17 @@ EXPORT_SYMBOL(sockfd_lookup); static struct socket *sockfd_lookup_light(int fd, int *err, int *fput_needed) { - struct file *file; + struct fd f = fdget(fd); struct socket *sock; *err = -EBADF; - file = fget_light(fd, fput_needed); - if (file) { - sock = sock_from_file(file, err); - if (sock) + if (f.file) { + sock = sock_from_file(f.file, err); + if (likely(sock)) { + *fput_needed = f.flags; return sock; - fput_light(file, *fput_needed); + } + fdput(f); } return NULL; } @@ -1985,6 +1986,10 @@ static int copy_msghdr_from_user(struct msghdr *kmsg, { if (copy_from_user(kmsg, umsg, sizeof(struct msghdr))) return -EFAULT; + + if (kmsg->msg_namelen < 0) + return -EINVAL; + if (kmsg->msg_namelen > sizeof(struct sockaddr_storage)) kmsg->msg_namelen = sizeof(struct sockaddr_storage); return 0; diff --git a/net/tipc/config.c b/net/tipc/config.c index e74eef2..e6d7216 100644 --- a/net/tipc/config.c +++ b/net/tipc/config.c @@ -376,7 +376,6 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr, struct tipc_cfg_msg_hdr *req_hdr; struct tipc_cfg_msg_hdr *rep_hdr; struct sk_buff *rep_buf; - int ret; /* Validate configuration message header (ignore invalid message) */ req_hdr = (struct tipc_cfg_msg_hdr *)buf; @@ -398,12 +397,8 @@ static void cfg_conn_msg_event(int conid, struct sockaddr_tipc *addr, memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); rep_hdr->tcm_len = htonl(rep_buf->len); rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); - - ret = tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, - rep_buf->len); - if (ret < 0) - pr_err("Sending cfg reply message failed, no memory\n"); - + tipc_conn_sendmsg(&cfgsrv, conid, addr, rep_buf->data, + rep_buf->len); kfree_skb(rep_buf); } } diff --git a/net/tipc/handler.c b/net/tipc/handler.c index e4bc8a2..1fabf16 100644 --- a/net/tipc/handler.c +++ b/net/tipc/handler.c @@ -58,7 +58,6 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument) spin_lock_bh(&qitem_lock); if (!handler_enabled) { - pr_err("Signal request ignored by handler\n"); spin_unlock_bh(&qitem_lock); return -ENOPROTOOPT; } diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c index 48302be..042e8e3 100644 --- a/net/tipc/name_table.c +++ b/net/tipc/name_table.c @@ -941,17 +941,48 @@ int tipc_nametbl_init(void) return 0; } +/** + * tipc_purge_publications - remove all publications for a given type + * + * tipc_nametbl_lock must be held when calling this function + */ +static void tipc_purge_publications(struct name_seq *seq) +{ + struct publication *publ, *safe; + struct sub_seq *sseq; + struct name_info *info; + + if (!seq->sseqs) { + nameseq_delete_empty(seq); + return; + } + sseq = seq->sseqs; + info = sseq->info; + list_for_each_entry_safe(publ, safe, &info->zone_list, zone_list) { + tipc_nametbl_remove_publ(publ->type, publ->lower, publ->node, + publ->ref, publ->key); + } +} + void tipc_nametbl_stop(void) { u32 i; + struct name_seq *seq; + struct hlist_head *seq_head; + struct hlist_node *safe; - /* Verify name table is empty, then release it */ + /* Verify name table is empty and purge any lingering + * publications, then release the name table + */ write_lock_bh(&tipc_nametbl_lock); for (i = 0; i < TIPC_NAMETBL_SIZE; i++) { if (hlist_empty(&table.types[i])) continue; - pr_err("nametbl_stop(): orphaned hash chain detected\n"); - break; + seq_head = &table.types[i]; + hlist_for_each_entry_safe(seq, safe, seq_head, ns_list) { + tipc_purge_publications(seq); + } + continue; } kfree(table.types); table.types = NULL; diff --git a/net/tipc/server.c b/net/tipc/server.c index 3739797..646a930 100644 --- a/net/tipc/server.c +++ b/net/tipc/server.c @@ -87,7 +87,6 @@ static void tipc_clean_outqueues(struct tipc_conn *con); static void tipc_conn_kref_release(struct kref *kref) { struct tipc_conn *con = container_of(kref, struct tipc_conn, kref); - struct tipc_server *s = con->server; if (con->sock) { tipc_sock_release_local(con->sock); @@ -95,10 +94,6 @@ static void tipc_conn_kref_release(struct kref *kref) } tipc_clean_outqueues(con); - - if (con->conid) - s->tipc_conn_shutdown(con->conid, con->usr_data); - kfree(con); } @@ -181,6 +176,9 @@ static void tipc_close_conn(struct tipc_conn *con) struct tipc_server *s = con->server; if (test_and_clear_bit(CF_CONNECTED, &con->flags)) { + if (con->conid) + s->tipc_conn_shutdown(con->conid, con->usr_data); + spin_lock_bh(&s->idr_lock); idr_remove(&s->conn_idr, con->conid); s->idr_in_use--; @@ -429,10 +427,12 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid, list_add_tail(&e->list, &con->outqueue); spin_unlock_bh(&con->outqueue_lock); - if (test_bit(CF_CONNECTED, &con->flags)) + if (test_bit(CF_CONNECTED, &con->flags)) { if (!queue_work(s->send_wq, &con->swork)) conn_put(con); - + } else { + conn_put(con); + } return 0; } diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 2a89bdb..29b7f26 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -992,7 +992,7 @@ static int tipc_wait_for_rcvmsg(struct socket *sock, long timeo) for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - if (skb_queue_empty(&sk->sk_receive_queue)) { + if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { if (sock->state == SS_DISCONNECTING) { err = -ENOTCONN; break; @@ -1609,7 +1609,7 @@ static int tipc_wait_for_accept(struct socket *sock, long timeo) for (;;) { prepare_to_wait_exclusive(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); - if (skb_queue_empty(&sk->sk_receive_queue)) { + if (timeo && skb_queue_empty(&sk->sk_receive_queue)) { release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c index 7cb0bd5..11c9ae0 100644 --- a/net/tipc/subscr.c +++ b/net/tipc/subscr.c @@ -96,20 +96,16 @@ static void subscr_send_event(struct tipc_subscription *sub, u32 found_lower, { struct tipc_subscriber *subscriber = sub->subscriber; struct kvec msg_sect; - int ret; msg_sect.iov_base = (void *)&sub->evt; msg_sect.iov_len = sizeof(struct tipc_event); - sub->evt.event = htohl(event, sub->swap); sub->evt.found_lower = htohl(found_lower, sub->swap); sub->evt.found_upper = htohl(found_upper, sub->swap); sub->evt.port.ref = htohl(port_ref, sub->swap); sub->evt.port.node = htohl(node, sub->swap); - ret = tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, - msg_sect.iov_base, msg_sect.iov_len); - if (ret < 0) - pr_err("Sending subscription event failed, no memory\n"); + tipc_conn_sendmsg(&topsrv, subscriber->conid, NULL, msg_sect.iov_base, + msg_sect.iov_len); } /** @@ -153,14 +149,6 @@ static void subscr_timeout(struct tipc_subscription *sub) /* The spin lock per subscriber is used to protect its members */ spin_lock_bh(&subscriber->lock); - /* Validate if the connection related to the subscriber is - * closed (in case subscriber is terminating) - */ - if (subscriber->conid == 0) { - spin_unlock_bh(&subscriber->lock); - return; - } - /* Validate timeout (in case subscription is being cancelled) */ if (sub->timeout == TIPC_WAIT_FOREVER) { spin_unlock_bh(&subscriber->lock); @@ -215,9 +203,6 @@ static void subscr_release(struct tipc_subscriber *subscriber) spin_lock_bh(&subscriber->lock); - /* Invalidate subscriber reference */ - subscriber->conid = 0; - /* Destroy any existing subscriptions for subscriber */ list_for_each_entry_safe(sub, sub_temp, &subscriber->subscription_list, subscription_list) { diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 29fc8be..ce6ec6c 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -163,9 +163,8 @@ static inline void unix_set_secdata(struct scm_cookie *scm, struct sk_buff *skb) static inline unsigned int unix_hash_fold(__wsum n) { - unsigned int hash = (__force unsigned int)n; + unsigned int hash = (__force unsigned int)csum_fold(n); - hash ^= hash>>16; hash ^= hash>>8; return hash&(UNIX_HASH_SIZE-1); } diff --git a/net/wireless/core.c b/net/wireless/core.c index 76ae6a6..eb7f408 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -788,8 +788,6 @@ void cfg80211_leave(struct cfg80211_registered_device *rdev, default: break; } - - wdev->beacon_interval = 0; } static int cfg80211_netdev_notifier_call(struct notifier_block *nb, diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 276e84b..10085de 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -330,7 +330,8 @@ static void write_src(void) printf("\tPTR\t_text + %#llx\n", table[i].addr - _text); else - printf("\tPTR\t%#llx\n", table[i].addr); + printf("\tPTR\t_text - %#llx\n", + _text - table[i].addr); } else { printf("\tPTR\t%#llx\n", table[i].addr); } diff --git a/scripts/mod/modpost.c b/scripts/mod/modpost.c index 4061098..99a45fd 100644 --- a/scripts/mod/modpost.c +++ b/scripts/mod/modpost.c @@ -1502,6 +1502,16 @@ static int addend_386_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) #define R_ARM_JUMP24 29 #endif +#ifndef R_ARM_THM_CALL +#define R_ARM_THM_CALL 10 +#endif +#ifndef R_ARM_THM_JUMP24 +#define R_ARM_THM_JUMP24 30 +#endif +#ifndef R_ARM_THM_JUMP19 +#define R_ARM_THM_JUMP19 51 +#endif + static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) { unsigned int r_typ = ELF_R_TYPE(r->r_info); @@ -1515,6 +1525,9 @@ static int addend_arm_rel(struct elf_info *elf, Elf_Shdr *sechdr, Elf_Rela *r) case R_ARM_PC24: case R_ARM_CALL: case R_ARM_JUMP24: + case R_ARM_THM_CALL: + case R_ARM_THM_JUMP24: + case R_ARM_THM_JUMP19: /* From ARM ABI: ((S + A) | T) - P */ r->r_addend = (int)(long)(elf->hdr + sechdr->sh_offset + diff --git a/security/keys/keyring.c b/security/keys/keyring.c index d46cbc5..2fb2576 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -1000,7 +1000,11 @@ static int keyring_detect_cycle_iterator(const void *object, kenter("{%d}", key->serial); - BUG_ON(key != ctx->match_data); + /* We might get a keyring with matching index-key that is nonetheless a + * different keyring. */ + if (key != ctx->match_data) + return 0; + ctx->result = ERR_PTR(-EDEADLK); return 1; } diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index df3652a..8ed0bcc 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c @@ -1026,6 +1026,9 @@ static void ad1884_fixup_thinkpad(struct hda_codec *codec, spec->gen.keep_eapd_on = 1; spec->gen.vmaster_mute.hook = ad_vmaster_eapd_hook; spec->eapd_nid = 0x12; + /* Analog PC Beeper - allow firmware/ACPI beeps */ + spec->beep_amp = HDA_COMPOSE_AMP_VAL(0x20, 3, 3, HDA_INPUT); + spec->gen.beep_nid = 0; /* no digital beep */ } } @@ -1092,6 +1095,7 @@ static int patch_ad1884(struct hda_codec *codec) spec = codec->spec; spec->gen.mixer_nid = 0x20; + spec->gen.mixer_merge_nid = 0x21; spec->gen.beep_nid = 0x10; set_beep_amp(spec, 0x10, 0, HDA_OUTPUT); diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index ec304f3..8d0a844 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -3616,6 +3616,19 @@ static void alc_fixup_auto_mute_via_amp(struct hda_codec *codec, } } +static void alc_no_shutup(struct hda_codec *codec) +{ +} + +static void alc_fixup_no_shutup(struct hda_codec *codec, + const struct hda_fixup *fix, int action) +{ + if (action == HDA_FIXUP_ACT_PRE_PROBE) { + struct alc_spec *spec = codec->spec; + spec->shutup = alc_no_shutup; + } +} + static void alc_fixup_headset_mode_alc668(struct hda_codec *codec, const struct hda_fixup *fix, int action) { @@ -3844,6 +3857,7 @@ enum { ALC269_FIXUP_HP_GPIO_LED, ALC269_FIXUP_INV_DMIC, ALC269_FIXUP_LENOVO_DOCK, + ALC269_FIXUP_NO_SHUTUP, ALC286_FIXUP_SONY_MIC_NO_PRESENCE, ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT, ALC269_FIXUP_DELL1_MIC_NO_PRESENCE, @@ -4020,6 +4034,10 @@ static const struct hda_fixup alc269_fixups[] = { .type = HDA_FIXUP_FUNC, .v.func = alc_fixup_inv_dmic_0x12, }, + [ALC269_FIXUP_NO_SHUTUP] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_no_shutup, + }, [ALC269_FIXUP_LENOVO_DOCK] = { .type = HDA_FIXUP_PINS, .v.pins = (const struct hda_pintbl[]) { @@ -4253,6 +4271,7 @@ static const struct hda_fixup alc269_fixups[] = { }; static const struct snd_pci_quirk alc269_fixup_tbl[] = { + SND_PCI_QUIRK(0x1025, 0x0283, "Acer TravelMate 8371", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x029b, "Acer 1810TZ", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x0349, "Acer AOD260", ALC269_FIXUP_INV_DMIC), SND_PCI_QUIRK(0x1025, 0x047c, "Acer AC700", ALC269_FIXUP_ACER_AC700), @@ -4404,6 +4423,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), + SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), SND_PCI_QUIRK(0x17aa, 0x501a, "Thinkpad", ALC283_FIXUP_INT_MIC), SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), @@ -5163,7 +5183,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = { SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_AUTO_MUTE), - SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), + SND_PCI_QUIRK(0x1028, 0x064e, "Dell", ALC668_FIXUP_AUTO_MUTE), SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP), SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP), diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c index 75d0ad5..647a72c 100644 --- a/sound/soc/codecs/88pm860x-codec.c +++ b/sound/soc/codecs/88pm860x-codec.c @@ -1328,6 +1328,9 @@ static int pm860x_probe(struct snd_soc_codec *codec) pm860x->codec = codec; codec->control_data = pm860x->regmap; + ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); + if (ret) + return ret; for (i = 0; i < 4; i++) { ret = request_threaded_irq(pm860x->irq[i], NULL, diff --git a/sound/soc/codecs/si476x.c b/sound/soc/codecs/si476x.c index 52e7cb0..fa2b8e0 100644 --- a/sound/soc/codecs/si476x.c +++ b/sound/soc/codecs/si476x.c @@ -210,7 +210,7 @@ out: static int si476x_codec_probe(struct snd_soc_codec *codec) { codec->control_data = dev_get_regmap(codec->dev->parent, NULL); - return 0; + return snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); } static struct snd_soc_dai_ops si476x_dai_ops = { diff --git a/sound/soc/omap/n810.c b/sound/soc/omap/n810.c index 3fde9e4..d163e18 100644 --- a/sound/soc/omap/n810.c +++ b/sound/soc/omap/n810.c @@ -305,7 +305,9 @@ static int __init n810_soc_init(void) int err; struct device *dev; - if (!(machine_is_nokia_n810() || machine_is_nokia_n810_wimax())) + if (!of_have_populated_dt() || + (!of_machine_is_compatible("nokia,n810") && + !of_machine_is_compatible("nokia,n810-wimax"))) return -ENODEV; n810_snd_device = platform_device_alloc("soc-audio", -1); diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 47e1ce7..28522bd 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1989,6 +1989,7 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card) paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); if (paths < 0) { + dpcm_path_put(&list); dev_warn(fe->dev, "ASoC: %s no valid %s path\n", fe->dai_link->name, "playback"); mutex_unlock(&card->mutex); @@ -2018,6 +2019,7 @@ capture: paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); if (paths < 0) { + dpcm_path_put(&list); dev_warn(fe->dev, "ASoC: %s no valid %s path\n", fe->dai_link->name, "capture"); mutex_unlock(&card->mutex); @@ -2082,6 +2084,7 @@ static int dpcm_fe_dai_open(struct snd_pcm_substream *fe_substream) fe->dpcm[stream].runtime = fe_substream->runtime; if (dpcm_path_get(fe, stream, &list) <= 0) { + dpcm_path_put(&list); dev_dbg(fe->dev, "ASoC: %s no valid %s route\n", fe->dai_link->name, stream ? "capture" : "playback"); } diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index 44b0ba4..1bed780 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -883,6 +883,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval, } break; + case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */ case USB_ID(0x046d, 0x0808): case USB_ID(0x046d, 0x0809): case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */ diff --git a/tools/net/Makefile b/tools/net/Makefile index 004cd74..ee577ea 100644 --- a/tools/net/Makefile +++ b/tools/net/Makefile @@ -12,7 +12,7 @@ YACC = bison all : bpf_jit_disasm bpf_dbg bpf_asm -bpf_jit_disasm : CFLAGS = -Wall -O2 +bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm' bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl bpf_jit_disasm : bpf_jit_disasm.o diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c index d664182..aa290c0 100644 --- a/tools/testing/selftests/ipc/msgque.c +++ b/tools/testing/selftests/ipc/msgque.c @@ -201,6 +201,7 @@ int main(int argc, char **argv) msgque.msq_id = msgget(msgque.key, IPC_CREAT | IPC_EXCL | 0666); if (msgque.msq_id == -1) { + err = -errno; printf("Can't create queue\n"); goto err_out; } |