summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/DocBook/media/v4l/vidioc-expbuf.xml8
-rw-r--r--Documentation/assoc_array.txt6
-rw-r--r--Documentation/block/null_blk.txt72
-rw-r--r--Documentation/device-mapper/cache.txt10
-rw-r--r--Documentation/devicetree/bindings/clock/exynos5250-clock.txt2
-rw-r--r--Documentation/devicetree/bindings/net/davinci_emac.txt2
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt4
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/module-signing.txt240
-rw-r--r--Documentation/networking/ip-sysctl.txt8
-rw-r--r--Documentation/networking/packet_mmap.txt10
-rw-r--r--MAINTAINERS84
-rw-r--r--Makefile24
-rw-r--r--arch/arc/Kconfig1
-rw-r--r--arch/arc/include/uapi/asm/unistd.h11
-rw-r--r--arch/arc/kernel/perf_event.c4
-rw-r--r--arch/arm/boot/dts/am3517-evm.dts6
-rw-r--r--arch/arm/boot/dts/am3517.dtsi63
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/omap3-n950-n9.dtsi2
-rw-r--r--arch/arm/boot/dts/omap34xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/omap36xx-hs.dtsi16
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi28
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi27
-rw-r--r--arch/arm/boot/dts/sun7i-a20.dtsi42
-rw-r--r--arch/arm/crypto/aesbs-core.S_shipped2
-rw-r--r--arch/arm/crypto/bsaes-armv7.pl2
-rw-r--r--arch/arm/include/asm/io.h2
-rw-r--r--arch/arm/include/asm/memory.h34
-rw-r--r--arch/arm/include/asm/xen/page.h2
-rw-r--r--arch/arm/kernel/head-nommu.S4
-rw-r--r--arch/arm/kernel/head.S2
-rw-r--r--arch/arm/kernel/process.c7
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/kernel/stacktrace.c2
-rw-r--r--arch/arm/kernel/traps.c11
-rw-r--r--arch/arm/mach-davinci/devices-da8xx.c4
-rw-r--r--arch/arm/mach-davinci/dm355.c3
-rw-r--r--arch/arm/mach-davinci/dm365.c3
-rw-r--r--arch/arm/mach-davinci/dm644x.c3
-rw-r--r--arch/arm/mach-davinci/dm646x.c6
-rw-r--r--arch/arm/mach-footbridge/dc21285-timer.c5
-rw-r--r--arch/arm/mach-highbank/highbank.c23
-rw-r--r--arch/arm/mach-omap2/board-generic.c18
-rw-r--r--arch/arm/mach-omap2/board-ldp.c7
-rw-r--r--arch/arm/mach-omap2/display.c38
-rw-r--r--arch/arm/mach-omap2/omap_device.c24
-rw-r--r--arch/arm/mach-omap2/omap_device.h1
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c143
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c4
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c19
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c12
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c13
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c2
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h2
-rw-r--r--arch/arm/mach-pxa/reset.c8
-rw-r--r--arch/arm/mach-pxa/tosa.c102
-rw-r--r--arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c11
-rw-r--r--arch/arm/mach-shmobile/board-armadillo800eva.c11
-rw-r--r--arch/arm/mach-shmobile/board-bockw.c2
-rw-r--r--arch/arm/mach-shmobile/board-kzm9g.c2
-rw-r--r--arch/arm/mach-shmobile/board-lager.c4
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c4
-rw-r--r--arch/arm/mach-tegra/fuse.c2
-rw-r--r--arch/arm/mm/dma-mapping.c91
-rw-r--r--arch/arm/mm/flush.c6
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/xen/enlighten.c6
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h4
-rw-r--r--arch/arm64/kernel/ptrace.c38
-rw-r--r--arch/avr32/boards/favr-32/setup.c4
-rw-r--r--arch/avr32/configs/atngw100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atngw100_mrmt_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd100_defconfig1
-rw-r--r--arch/avr32/configs/atngw100mkii_evklcd101_defconfig1
-rw-r--r--arch/avr32/configs/atstk1002_defconfig1
-rw-r--r--arch/avr32/configs/atstk1003_defconfig1
-rw-r--r--arch/avr32/configs/atstk1004_defconfig1
-rw-r--r--arch/avr32/configs/atstk1006_defconfig1
-rw-r--r--arch/avr32/configs/favr-32_defconfig1
-rw-r--r--arch/avr32/configs/hammerhead_defconfig1
-rw-r--r--arch/avr32/configs/merisc_defconfig1
-rw-r--r--arch/avr32/configs/mimc200_defconfig1
-rw-r--r--arch/avr32/kernel/time.c2
-rw-r--r--arch/avr32/mach-at32ap/pm.c2
-rw-r--r--arch/parisc/include/asm/cacheflush.h12
-rw-r--r--arch/parisc/include/asm/page.h5
-rw-r--r--arch/parisc/kernel/cache.c35
-rw-r--r--arch/powerpc/boot/dts/mpc5121.dtsi1
-rw-r--r--arch/powerpc/boot/dts/mpc5125twr.dts6
-rw-r--r--arch/powerpc/configs/52xx/cm5200_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/lite5200b_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/motionpro_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/pcm030_defconfig3
-rw-r--r--arch/powerpc/configs/52xx/tqm5200_defconfig3
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig3
-rw-r--r--arch/powerpc/configs/pasemi_defconfig7
-rw-r--r--arch/powerpc/include/asm/exception-64s.h2
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h4
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h2
-rw-r--r--arch/powerpc/include/asm/opal.h4
-rw-r--r--arch/powerpc/include/asm/pgalloc-32.h6
-rw-r--r--arch/powerpc/include/asm/pgalloc-64.h6
-rw-r--r--arch/powerpc/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/unaligned.h7
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/crash_dump.c6
-rw-r--r--arch/powerpc/kernel/head_64.S2
-rw-r--r--arch/powerpc/kernel/machine_kexec.c2
-rw-r--r--arch/powerpc/kernel/misc_64.S5
-rw-r--r--arch/powerpc/kernel/process.c32
-rw-r--r--arch/powerpc/kernel/ptrace.c4
-rw-r--r--arch/powerpc/kernel/setup-common.c4
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c18
-rw-r--r--arch/powerpc/kvm/book3s_hv.c24
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S23
-rw-r--r--arch/powerpc/kvm/book3s_interrupts.S19
-rw-r--r--arch/powerpc/kvm/book3s_pr.c22
-rw-r--r--arch/powerpc/kvm/book3s_rmhandlers.S6
-rw-r--r--arch/powerpc/kvm/booke.c12
-rw-r--r--arch/powerpc/lib/copyuser_64.S53
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c20
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c12
-rw-r--r--arch/powerpc/platforms/powernv/opal-xscom.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.h4
-rw-r--r--arch/powerpc/platforms/pseries/lparcfg.c12
-rw-r--r--arch/powerpc/platforms/pseries/msi.c28
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c46
-rw-r--r--arch/powerpc/platforms/pseries/pci.c8
-rw-r--r--arch/powerpc/sysdev/ppc4xx_ocm.c2
-rw-r--r--arch/s390/Kconfig7
-rw-r--r--arch/s390/include/asm/sclp.h3
-rw-r--r--arch/s390/include/asm/smp.h2
-rw-r--r--arch/s390/kernel/asm-offsets.c1
-rw-r--r--arch/s390/kernel/setup.c1
-rw-r--r--arch/s390/kernel/smp.c25
-rw-r--r--arch/s390/kernel/vdso.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S9
-rw-r--r--arch/s390/kernel/vdso64/clock_getres.S4
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S10
-rw-r--r--arch/s390/pci/pci_event.c2
-rw-r--r--arch/sh/kernel/sh_ksyms_32.c5
-rw-r--r--arch/sh/lib/Makefile2
-rw-r--r--arch/sparc/include/asm/pgtable_64.h4
-rw-r--r--arch/sparc/include/asm/uaccess_64.h4
-rw-r--r--arch/sparc/kernel/iommu.c2
-rw-r--r--arch/sparc/kernel/ioport.c5
-rw-r--r--arch/sparc/kernel/kgdb_64.c1
-rw-r--r--arch/sparc/kernel/smp_64.c3
-rw-r--r--arch/x86/Kconfig1
-rw-r--r--arch/x86/Makefile8
-rw-r--r--arch/x86/boot/Makefile6
-rw-r--r--arch/x86/boot/compressed/Makefile1
-rw-r--r--arch/x86/include/asm/pgtable.h11
-rw-r--r--arch/x86/include/asm/preempt.h11
-rw-r--r--arch/x86/kernel/cpu/intel.c3
-rw-r--r--arch/x86/kernel/cpu/perf_event.h15
-rw-r--r--arch/x86/kvm/lapic.c43
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/vmx.c3
-rw-r--r--arch/x86/kvm/x86.c40
-rw-r--r--arch/x86/mm/gup.c13
-rw-r--r--arch/x86/platform/efi/efi.c7
-rw-r--r--arch/x86/platform/uv/tlb_uv.c5
-rw-r--r--arch/x86/realmode/rm/Makefile3
-rw-r--r--block/blk-mq-sysfs.c13
-rw-r--r--drivers/acpi/Kconfig1
-rw-r--r--drivers/acpi/acpi_lpss.c1
-rw-r--r--drivers/acpi/apei/Kconfig1
-rw-r--r--drivers/acpi/apei/erst.c1
-rw-r--r--drivers/acpi/bus.c10
-rw-r--r--drivers/ata/ahci.c21
-rw-r--r--drivers/ata/ahci_imx.c3
-rw-r--r--drivers/ata/libata-core.c19
-rw-r--r--drivers/ata/libata-scsi.c21
-rw-r--r--drivers/ata/sata_sis.c4
-rw-r--r--drivers/base/regmap/regmap-mmio.c11
-rw-r--r--drivers/base/regmap/regmap.c8
-rw-r--r--drivers/block/null_blk.c110
-rw-r--r--drivers/block/skd_main.c4
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/clk/clk-divider.c2
-rw-r--r--drivers/clk/clk-s2mps11.c6
-rw-r--r--drivers/clk/samsung/clk-exynos-audss.c10
-rw-r--r--drivers/clk/samsung/clk-exynos4.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c14
-rw-r--r--drivers/clocksource/Kconfig1
-rw-r--r--drivers/clocksource/clksrc-of.c1
-rw-r--r--drivers/clocksource/dw_apb_timer_of.c7
-rw-r--r--drivers/clocksource/sun4i_timer.c3
-rw-r--r--drivers/clocksource/time-armada-370-xp.c10
-rw-r--r--drivers/cpufreq/at32ap-cpufreq.c2
-rw-r--r--drivers/cpufreq/cpufreq.c104
-rw-r--r--drivers/cpufreq/intel_pstate.c5
-rw-r--r--drivers/cpuidle/cpuidle-calxeda.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c4
-rw-r--r--drivers/dma/Kconfig7
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_hdmac_regs.h4
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/dmatest.c8
-rw-r--r--drivers/dma/fsldma.c31
-rw-r--r--drivers/dma/ioat/dma.c11
-rw-r--r--drivers/dma/mmp_pdma.c1
-rw-r--r--drivers/dma/mv_xor.c101
-rw-r--r--drivers/dma/pl330.c5
-rw-r--r--drivers/dma/ppc4xx/adma.c27
-rw-r--r--drivers/dma/s3c24xx-dma.c33
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c11
-rw-r--r--drivers/dma/txx9dmac.c1
-rw-r--r--drivers/edac/sb_edac.c2
-rw-r--r--drivers/firewire/sbp2.c1
-rw-r--r--drivers/firmware/Makefile1
-rw-r--r--drivers/firmware/efi/Kconfig6
-rw-r--r--drivers/firmware/efi/Makefile2
-rw-r--r--drivers/firmware/efi/efi-pstore.c1
-rw-r--r--drivers/gpio/gpio-davinci.c4
-rw-r--r--drivers/gpio/gpio-msm-v2.c4
-rw-r--r--drivers/gpio/gpio-rcar.c3
-rw-r--r--drivers/gpio/gpio-twl4030.c15
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h1
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c20
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c7
-rw-r--r--drivers/gpu/drm/drm_edid.c8
-rw-r--r--drivers/gpu/drm/drm_stub.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c20
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h9
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c28
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/intel_display.c33
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h1
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c26
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c43
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/core/subdev.c3
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/base.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/include/subdev/fb.h5
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/bios/init.c14
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_abi16.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_acpi.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c6
-rw-r--r--drivers/gpu/drm/qxl/Kconfig1
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c2
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c87
-rw-r--r--drivers/gpu/drm/radeon/cik.c12
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c2
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c4
-rw-r--r--drivers/gpu/drm/radeon/ni.c20
-rw-r--r--drivers/gpu/drm/radeon/radeon.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_atpx_handler.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c2
-rw-r--r--drivers/gpu/drm/radeon/rs690.c10
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c12
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c3
-rw-r--r--drivers/hid/hid-kye.c1
-rw-r--r--drivers/hid/hid-sensor-hub.c2
-rw-r--r--drivers/hwmon/hih6130.c16
-rw-r--r--drivers/hwmon/lm78.c2
-rw-r--r--drivers/hwmon/lm90.c4
-rw-r--r--drivers/hwmon/sis5595.c2
-rw-r--r--drivers/hwmon/vt8231.c2
-rw-r--r--drivers/hwmon/w83l786ng.c13
-rw-r--r--drivers/i2c/busses/i2c-imx.c4
-rw-r--r--drivers/i2c/i2c-mux.c2
-rw-r--r--drivers/idle/intel_idle.c3
-rw-r--r--drivers/iio/adc/ad7887.c16
-rw-r--r--drivers/iio/imu/adis16400_core.c7
-rw-r--r--drivers/iio/light/cm36651.c2
-rw-r--r--drivers/infiniband/core/iwcm.c11
-rw-r--r--drivers/infiniband/core/uverbs.h10
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c17
-rw-r--r--drivers/infiniband/core/uverbs_main.c27
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c78
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c2
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_netlink.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c22
-rw-r--r--drivers/input/input.c4
-rw-r--r--drivers/input/misc/adxl34x.c2
-rw-r--r--drivers/input/serio/serio.c24
-rw-r--r--drivers/input/touchscreen/zforce_ts.c21
-rw-r--r--drivers/iommu/arm-smmu.c66
-rw-r--r--drivers/irqchip/irq-renesas-intc-irqpin.c8
-rw-r--r--drivers/isdn/hisax/hfc_pci.c4
-rw-r--r--drivers/isdn/hisax/telespci.c4
-rw-r--r--drivers/md/bcache/alloc.c2
-rw-r--r--drivers/md/bcache/bcache.h12
-rw-r--r--drivers/md/bcache/btree.c27
-rw-r--r--drivers/md/bcache/movinggc.c21
-rw-r--r--drivers/md/bcache/super.c2
-rw-r--r--drivers/md/bcache/sysfs.c50
-rw-r--r--drivers/md/bcache/util.c8
-rw-r--r--drivers/md/bcache/util.h2
-rw-r--r--drivers/md/bcache/writeback.c53
-rw-r--r--drivers/md/dm-bufio.c5
-rw-r--r--drivers/md/dm-cache-policy-mq.c13
-rw-r--r--drivers/md/dm-cache-target.c2
-rw-r--r--drivers/md/dm-delay.c23
-rw-r--r--drivers/md/dm-snap.c71
-rw-r--r--drivers/md/dm-stats.c1
-rw-r--r--drivers/md/dm-table.c5
-rw-r--r--drivers/md/dm-thin-metadata.c8
-rw-r--r--drivers/md/dm-thin-metadata.h1
-rw-r--r--drivers/md/dm-thin.c66
-rw-r--r--drivers/md/persistent-data/dm-array.c10
-rw-r--r--drivers/md/persistent-data/dm-block-manager.c6
-rw-r--r--drivers/md/persistent-data/dm-block-manager.h7
-rw-r--r--drivers/md/persistent-data/dm-space-map-common.c32
-rw-r--r--drivers/md/persistent-data/dm-space-map-metadata.c8
-rw-r--r--drivers/media/common/siano/smscoreapi.h4
-rw-r--r--drivers/media/common/siano/smsdvb.h2
-rw-r--r--drivers/media/dvb-core/dvb_demux.c9
-rw-r--r--drivers/media/dvb-frontends/af9033.c12
-rw-r--r--drivers/media/dvb-frontends/cxd2820r_c.c2
-rw-r--r--drivers/media/dvb-frontends/dib8000.c4
-rw-r--r--drivers/media/dvb-frontends/drxk_hard.c18
-rw-r--r--drivers/media/dvb-frontends/rtl2830.c1
-rw-r--r--drivers/media/i2c/adv7183_regs.h6
-rw-r--r--drivers/media/i2c/adv7604.c2
-rw-r--r--drivers/media/i2c/adv7842.c2
-rw-r--r--drivers/media/i2c/ir-kbd-i2c.c2
-rw-r--r--drivers/media/i2c/m5mols/m5mols_controls.c2
-rw-r--r--drivers/media/i2c/mt9p031.c1
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3-core.c2
-rw-r--r--drivers/media/i2c/s5c73m3/s5c73m3.h2
-rw-r--r--drivers/media/i2c/saa7115.c2
-rw-r--r--drivers/media/i2c/soc_camera/ov5642.c2
-rw-r--r--drivers/media/i2c/ths7303.c3
-rw-r--r--drivers/media/i2c/wm8775.c4
-rw-r--r--drivers/media/pci/bt8xx/bttv-driver.c3
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h2
-rw-r--r--drivers/media/pci/cx23885/cx23885-417.c2
-rw-r--r--drivers/media/pci/pluto2/pluto2.c2
-rw-r--r--drivers/media/pci/saa7164/saa7164-core.c4
-rw-r--r--drivers/media/platform/coda.c2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/exynos4-is/media-dev.c2
-rw-r--r--drivers/media/platform/marvell-ccic/mmp-driver.c46
-rw-r--r--drivers/media/platform/omap3isp/isp.c2
-rw-r--r--drivers/media/platform/omap3isp/ispvideo.c7
-rw-r--r--drivers/media/platform/s5p-mfc/regs-mfc.h2
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc.c12
-rw-r--r--drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c2
-rw-r--r--drivers/media/platform/s5p-tv/mixer.h2
-rw-r--r--drivers/media/platform/s5p-tv/mixer_video.c4
-rw-r--r--drivers/media/platform/soc_camera/omap1_camera.c2
-rw-r--r--drivers/media/platform/vivi.c4
-rw-r--r--drivers/media/platform/vsp1/vsp1_drv.c2
-rw-r--r--drivers/media/platform/vsp1/vsp1_video.c4
-rw-r--r--drivers/media/radio/radio-shark.c4
-rw-r--r--drivers/media/radio/radio-shark2.c4
-rw-r--r--drivers/media/radio/radio-si476x.c4
-rw-r--r--drivers/media/radio/radio-tea5764.c2
-rw-r--r--drivers/media/radio/tef6862.c2
-rw-r--r--drivers/media/rc/imon.c2
-rw-r--r--drivers/media/rc/redrat3.c2
-rw-r--r--drivers/media/tuners/mt2063.c4
-rw-r--r--drivers/media/tuners/tuner-xc2028-types.h2
-rw-r--r--drivers/media/usb/cx231xx/cx231xx-cards.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c17
-rw-r--r--drivers/media/usb/dvb-usb-v2/mxl111sf.c4
-rw-r--r--drivers/media/usb/dvb-usb/technisat-usb2.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-video.c2
-rw-r--r--drivers/media/usb/gspca/gl860/gl860.c2
-rw-r--r--drivers/media/usb/gspca/pac207.c2
-rw-r--r--drivers/media/usb/gspca/pac7302.c2
-rw-r--r--drivers/media/usb/gspca/stk1135.c3
-rw-r--r--drivers/media/usb/gspca/stv0680.c2
-rw-r--r--drivers/media/usb/gspca/sunplus.c1
-rw-r--r--drivers/media/usb/gspca/zc3xx.c2
-rw-r--r--drivers/media/usb/pwc/pwc-if.c2
-rw-r--r--drivers/media/usb/usbtv/usbtv.c174
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/v4l2-core/v4l2-ctrls.c2
-rw-r--r--drivers/media/v4l2-core/videobuf2-core.c29
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-contig.c4
-rw-r--r--drivers/media/v4l2-core/videobuf2-dma-sg.c3
-rw-r--r--drivers/mfd/sec-core.c30
-rw-r--r--drivers/mfd/sec-irq.c6
-rw-r--r--drivers/mtd/maps/pxa2xx-flash.c2
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/net/bonding/bond_3ad.c45
-rw-r--r--drivers/net/bonding/bond_main.c6
-rw-r--r--drivers/net/bonding/bond_sysfs.c4
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c3
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c5
-rw-r--r--drivers/net/ethernet/arc/emac_main.c4
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c8
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c94
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c6
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h7
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c259
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h1
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c28
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c31
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h103
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c337
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h9
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.c35
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/l2t.h3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c14
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c333
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h87
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h24
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c4
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c33
-rw-r--r--drivers/net/ethernet/emulex/benet/be_hw.h3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c70
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c17
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/80003es2lan.c7
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_phy.c5
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c2
-rw-r--r--drivers/net/ethernet/marvell/mvmdio.c6
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c2
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c8
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic.h5
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c54
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h1
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c65
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c19
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c22
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge.h2
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c4
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c8
-rw-r--r--drivers/net/ethernet/sfc/efx.c8
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c39
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h3
-rw-r--r--drivers/net/ethernet/sfc/nic.h2
-rw-r--r--drivers/net/ethernet/sfc/ptp.c66
-rw-r--r--drivers/net/ethernet/sfc/rx.c6
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c45
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c4
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c1
-rw-r--r--drivers/net/ethernet/ti/cpsw.c23
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c26
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_axienet_main.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c51
-rw-r--r--drivers/net/hamradio/hdlcdrv.c2
-rw-r--r--drivers/net/hamradio/yam.c1
-rw-r--r--drivers/net/hyperv/netvsc_drv.c21
-rw-r--r--drivers/net/macvlan.c16
-rw-r--r--drivers/net/macvtap.c13
-rw-r--r--drivers/net/phy/micrel.c15
-rw-r--r--drivers/net/phy/phy.c6
-rw-r--r--drivers/net/tun.c18
-rw-r--r--drivers/net/usb/Kconfig6
-rw-r--r--drivers/net/usb/dm9601.c44
-rw-r--r--drivers/net/usb/hso.c13
-rw-r--r--drivers/net/usb/mcs7830.c19
-rw-r--r--drivers/net/virtio_net.c28
-rw-r--r--drivers/net/vxlan.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c52
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c4
-rw-r--r--drivers/net/wireless/ath/wcn36xx/smd.c19
-rw-r--r--drivers/net/wireless/brcm80211/Kconfig2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-7000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-config.h5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/bt-coex.c6
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/d3.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/debugfs.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c21
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c7
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c3
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c6
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c16
-rw-r--r--drivers/net/wireless/mwifiex/sta_ioctl.c4
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c4
-rw-r--r--drivers/net/xen-netback/common.h19
-rw-r--r--drivers/net/xen-netback/interface.c31
-rw-r--r--drivers/net/xen-netback/netback.c287
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/address.c8
-rw-r--r--drivers/of/fdt.c12
-rw-r--r--drivers/of/irq.c5
-rw-r--r--drivers/pci/host/pci-mvebu.c5
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c30
-rw-r--r--drivers/pci/pci-acpi.c21
-rw-r--r--drivers/pci/pci-driver.c38
-rw-r--r--drivers/pci/pci.c8
-rw-r--r--drivers/pci/remove.c4
-rw-r--r--drivers/phy/Kconfig4
-rw-r--r--drivers/phy/phy-core.c26
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c1
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/power/Kconfig1
-rw-r--r--drivers/power/power_supply_core.c12
-rw-r--r--drivers/powercap/intel_rapl.c13
-rw-r--r--drivers/regulator/as3722-regulator.c2
-rw-r--r--drivers/regulator/core.c11
-rw-r--r--drivers/regulator/pfuze100-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c2
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/rtc/rtc-at91rm9200.c2
-rw-r--r--drivers/rtc/rtc-s5m.c118
-rw-r--r--drivers/s390/block/dasd_genhd.c1
-rw-r--r--drivers/s390/char/sclp_early.c5
-rw-r--r--drivers/s390/char/tty3270.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c10
-rw-r--r--drivers/staging/comedi/drivers.c2
-rw-r--r--drivers/staging/comedi/drivers/8255_pci.c15
-rw-r--r--drivers/staging/iio/magnetometer/hmc5843.c7
-rw-r--r--drivers/staging/imx-drm/imx-drm-core.c39
-rw-r--r--drivers/staging/imx-drm/imx-tve.c9
-rw-r--r--drivers/staging/imx-drm/ipu-v3/ipu-common.c32
-rw-r--r--drivers/target/iscsi/iscsi_target.c27
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c3
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c6
-rw-r--r--drivers/target/target_core_device.c5
-rw-r--r--drivers/target/target_core_file.c8
-rw-r--r--drivers/target/target_core_file.h5
-rw-r--r--drivers/target/target_core_tpg.c10
-rw-r--r--drivers/tty/n_tty.c7
-rw-r--r--drivers/tty/serial/8250/8250_dw.c8
-rw-r--r--drivers/tty/serial/xilinx_uartps.c2
-rw-r--r--drivers/tty/tty_ldsem.c16
-rw-r--r--drivers/usb/chipidea/core.c4
-rw-r--r--drivers/usb/chipidea/host.c3
-rw-r--r--drivers/usb/chipidea/udc.c3
-rw-r--r--drivers/usb/class/cdc-wdm.c8
-rw-r--r--drivers/usb/dwc3/core.c8
-rw-r--r--drivers/usb/host/ohci-at91.c26
-rw-r--r--drivers/usb/host/xhci-pci.c7
-rw-r--r--drivers/usb/phy/Kconfig4
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c2
-rw-r--r--drivers/usb/phy/phy-twl6030-usb.c3
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/zte_ev.c3
-rw-r--r--drivers/video/offb.c29
-rw-r--r--drivers/virtio/virtio_balloon.c2
-rw-r--r--drivers/watchdog/bcm2835_wdt.c1
-rw-r--r--drivers/watchdog/ep93xx_wdt.c1
-rw-r--r--drivers/watchdog/ie6xx_wdt.c1
-rw-r--r--drivers/watchdog/jz4740_wdt.c1
-rw-r--r--drivers/watchdog/kempld_wdt.c1
-rw-r--r--drivers/watchdog/max63xx_wdt.c1
-rw-r--r--drivers/watchdog/orion_wdt.c1
-rw-r--r--drivers/watchdog/pnx4008_wdt.c1
-rw-r--r--drivers/watchdog/rt2880_wdt.c1
-rw-r--r--drivers/watchdog/sc1200wdt.c3
-rw-r--r--drivers/watchdog/shwdt.c1
-rw-r--r--drivers/watchdog/softdog.c1
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c1
-rw-r--r--drivers/watchdog/txx9wdt.c1
-rw-r--r--drivers/watchdog/ux500_wdt.c1
-rw-r--r--drivers/xen/balloon.c63
-rw-r--r--drivers/xen/grant-table.c3
-rw-r--r--drivers/xen/privcmd.c9
-rw-r--r--fs/aio.c113
-rw-r--r--fs/btrfs/extent-tree.c22
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/relocation.c81
-rw-r--r--fs/btrfs/send.c4
-rw-r--r--fs/btrfs/super.c5
-rw-r--r--fs/ceph/addr.c8
-rw-r--r--fs/ceph/inode.c136
-rw-r--r--fs/cifs/cifsproto.h7
-rw-r--r--fs/cifs/cifssmb.c6
-rw-r--r--fs/cifs/dir.c11
-rw-r--r--fs/cifs/inode.c6
-rw-r--r--fs/cifs/link.c26
-rw-r--r--fs/dcache.c2
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/ext2/super.c1
-rw-r--r--fs/ext4/ext4.h10
-rw-r--r--fs/ext4/ext4_jbd2.c9
-rw-r--r--fs/ext4/extents.c45
-rw-r--r--fs/ext4/inode.c12
-rw-r--r--fs/ext4/mballoc.c17
-rw-r--r--fs/ext4/super.c21
-rw-r--r--fs/gfs2/aops.c30
-rw-r--r--fs/gfs2/glock.c2
-rw-r--r--fs/gfs2/glops.c10
-rw-r--r--fs/gfs2/log.c4
-rw-r--r--fs/gfs2/meta_io.c5
-rw-r--r--fs/gfs2/ops_fstype.c12
-rw-r--r--fs/jbd2/journal.c18
-rw-r--r--fs/jbd2/recovery.c2
-rw-r--r--fs/jbd2/transaction.c16
-rw-r--r--fs/namei.c7
-rw-r--r--fs/nfsd/nfscache.c9
-rw-r--r--fs/proc/inode.c14
-rw-r--r--fs/pstore/platform.c7
-rw-r--r--fs/sysfs/file.c8
-rw-r--r--fs/xfs/xfs_bmap.c32
-rw-r--r--fs/xfs/xfs_bmap_util.c14
-rw-r--r--fs/xfs/xfs_buf.c37
-rw-r--r--fs/xfs/xfs_buf.h11
-rw-r--r--fs/xfs/xfs_buf_item.c21
-rw-r--r--fs/xfs/xfs_dir2_node.c26
-rw-r--r--fs/xfs/xfs_discard.c5
-rw-r--r--fs/xfs/xfs_fsops.c6
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c3
-rw-r--r--fs/xfs/xfs_iops.c3
-rw-r--r--fs/xfs/xfs_log_recover.c13
-rw-r--r--fs/xfs/xfs_qm.c80
-rw-r--r--fs/xfs/xfs_trans_buf.c13
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/asm-generic/pgtable.h7
-rw-r--r--include/asm-generic/preempt.h35
-rw-r--r--include/asm-generic/word-at-a-time.h8
-rw-r--r--include/drm/drm_pciids.h2
-rw-r--r--include/linux/assoc_array.h6
-rw-r--r--include/linux/auxvec.h2
-rw-r--r--include/linux/compiler-intel.h2
-rw-r--r--include/linux/dcache.h2
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/kernel.h3
-rw-r--r--include/linux/kexec.h3
-rw-r--r--include/linux/libata.h1
-rw-r--r--include/linux/lockref.h2
-rw-r--r--include/linux/math64.h30
-rw-r--r--include/linux/mfd/samsung/core.h3
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/migrate.h12
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_types.h52
-rw-r--r--include/linux/net.h2
-rw-r--r--include/linux/netdevice.h24
-rw-r--r--include/linux/pci.h31
-rw-r--r--include/linux/percpu-defs.h1
-rw-r--r--include/linux/pstore.h3
-rw-r--r--include/linux/reboot.h1
-rw-r--r--include/linux/rtnetlink.h5
-rw-r--r--include/linux/sched.h5
-rw-r--r--include/linux/shmem_fs.h2
-rw-r--r--include/linux/skbuff.h48
-rw-r--r--include/media/videobuf2-core.h2
-rw-r--r--include/net/ipv6.h3
-rw-r--r--include/net/llc_pdu.h2
-rw-r--r--include/net/sctp/structs.h9
-rw-r--r--include/net/sock.h6
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/target/target_core_base.h5
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/drm/vmwgfx_drm.h1
-rw-r--r--include/uapi/linux/input.h3
-rw-r--r--include/uapi/linux/perf_event.h1
-rw-r--r--include/uapi/sound/compress_offload.h6
-rw-r--r--include/xen/interface/io/blkif.h10
-rw-r--r--init/Kconfig6
-rw-r--r--kernel/.gitignore1
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/bounds.c2
-rw-r--r--kernel/cgroup.c50
-rw-r--r--kernel/events/core.c21
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/freezer.c6
-rw-r--r--kernel/futex.c7
-rw-r--r--kernel/kexec.c5
-rw-r--r--kernel/power/console.c1
-rw-r--r--kernel/reboot.c2
-rw-r--r--kernel/sched/core.c6
-rw-r--r--kernel/sched/fair.c151
-rw-r--r--kernel/sched/rt.c14
-rw-r--r--kernel/system_certificates.S14
-rw-r--r--kernel/system_keyring.c4
-rw-r--r--kernel/trace/ftrace.c2
-rw-r--r--kernel/user.c6
-rw-r--r--kernel/workqueue.c32
-rw-r--r--lib/assoc_array.c4
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c4
-rw-r--r--mm/fremap.c8
-rw-r--r--mm/huge_memory.c54
-rw-r--r--mm/memcontrol.c43
-rw-r--r--mm/memory-failure.c24
-rw-r--r--mm/memory.c2
-rw-r--r--mm/mempolicy.c16
-rw-r--r--mm/migrate.c82
-rw-r--r--mm/mlock.c44
-rw-r--r--mm/mprotect.c13
-rw-r--r--mm/page_alloc.c19
-rw-r--r--mm/pgtable-generic.c8
-rw-r--r--mm/rmap.c4
-rw-r--r--mm/shmem.c36
-rw-r--r--net/8021q/vlan_dev.c19
-rw-r--r--net/batman-adv/bat_iv_ogm.c36
-rw-r--r--net/batman-adv/distributed-arp-table.c6
-rw-r--r--net/batman-adv/fragmentation.c8
-rw-r--r--net/batman-adv/icmp_socket.c6
-rw-r--r--net/batman-adv/main.c16
-rw-r--r--net/batman-adv/network-coding.c22
-rw-r--r--net/batman-adv/packet.h124
-rw-r--r--net/batman-adv/routing.c30
-rw-r--r--net/batman-adv/send.c10
-rw-r--r--net/batman-adv/soft-interface.c18
-rw-r--r--net/batman-adv/translation-table.c6
-rw-r--r--net/bluetooth/hci_sock.c26
-rw-r--r--net/bridge/br_multicast.c4
-rw-r--r--net/bridge/br_private.h10
-rw-r--r--net/bridge/br_stp_bpdu.c2
-rw-r--r--net/core/dev.c2
-rw-r--r--net/core/drop_monitor.c1
-rw-r--r--net/core/neighbour.c3
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/skbuff.c1
-rw-r--r--net/core/sock.c2
-rw-r--r--net/dccp/ipv6.c1
-rw-r--r--net/dccp/probe.c19
-rw-r--r--net/ieee802154/6lowpan.c2
-rw-r--r--net/ipv4/fib_rules.c5
-rw-r--r--net/ipv4/gre_offload.c11
-rw-r--r--net/ipv4/inet_diag.c16
-rw-r--r--net/ipv4/ip_gre.c1
-rw-r--r--net/ipv4/ip_output.c5
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c1
-rw-r--r--net/ipv4/netfilter/nft_reject_ipv4.c2
-rw-r--r--net/ipv4/tcp_memcontrol.c7
-rw-r--r--net/ipv4/udp.c46
-rw-r--r--net/ipv4/udp_offload.c37
-rw-r--r--net/ipv6/addrconf.c19
-rw-r--r--net/ipv6/datagram.c1
-rw-r--r--net/ipv6/fib6_rules.c6
-rw-r--r--net/ipv6/ip6_output.c36
-rw-r--r--net/ipv6/ip6_tunnel.c21
-rw-r--r--net/ipv6/ip6_vti.c24
-rw-r--r--net/ipv6/ndisc.c3
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c1
-rw-r--r--net/ipv6/raw.c1
-rw-r--r--net/ipv6/route.c34
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c1
-rw-r--r--net/ipv6/udp.c1
-rw-r--r--net/l2tp/l2tp_ip6.c1
-rw-r--r--net/llc/af_llc.c5
-rw-r--r--net/mac80211/cfg.c15
-rw-r--r--net/mac80211/ibss.c4
-rw-r--r--net/mac80211/ieee80211_i.h1
-rw-r--r--net/mac80211/iface.c1
-rw-r--r--net/mac80211/main.c3
-rw-r--r--net/mac80211/mesh.c20
-rw-r--r--net/mac80211/mlme.c2
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c7
-rw-r--r--net/mac80211/rx.c3
-rw-r--r--net/mac80211/scan.c2
-rw-r--r--net/mac80211/spectmgmt.c2
-rw-r--r--net/mac80211/util.c11
-rw-r--r--net/netfilter/ipset/ip_set_hash_netnet.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_nfct.c6
-rw-r--r--net/netfilter/nf_conntrack_seqadj.c5
-rw-r--r--net/netfilter/nf_conntrack_timestamp.c1
-rw-r--r--net/netfilter/nf_tables_api.c72
-rw-r--r--net/netfilter/nfnetlink_log.c1
-rw-r--r--net/netfilter/nft_exthdr.c2
-rw-r--r--net/netfilter/xt_hashlimit.c25
-rw-r--r--net/packet/af_packet.c65
-rw-r--r--net/rds/ib.c3
-rw-r--r--net/rds/ib_send.c5
-rw-r--r--net/rose/af_rose.c16
-rw-r--r--net/sched/act_api.c26
-rw-r--r--net/sched/act_csum.c12
-rw-r--r--net/sched/act_gact.c9
-rw-r--r--net/sched/act_ipt.c12
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_nat.c12
-rw-r--r--net/sched/act_pedit.c10
-rw-r--r--net/sched/act_police.c5
-rw-r--r--net/sched/act_simple.c10
-rw-r--r--net/sched/act_skbedit.c8
-rw-r--r--net/sched/sch_htb.c20
-rw-r--r--net/sched/sch_tbf.c117
-rw-r--r--net/sctp/associola.c5
-rw-r--r--net/sctp/output.c3
-rw-r--r--net/sctp/outqueue.c32
-rw-r--r--net/sctp/probe.c17
-rw-r--r--net/sctp/sm_statefuns.c12
-rw-r--r--net/sctp/socket.c36
-rw-r--r--net/sctp/sysctl.c76
-rw-r--r--net/sctp/transport.c2
-rw-r--r--net/tipc/core.c7
-rw-r--r--net/tipc/handler.c11
-rw-r--r--net/tipc/port.c45
-rw-r--r--net/tipc/port.h6
-rw-r--r--net/tipc/socket.c46
-rw-r--r--net/unix/af_unix.c16
-rw-r--r--net/wireless/core.c9
-rw-r--r--net/wireless/ibss.c18
-rw-r--r--net/wireless/nl80211.c60
-rw-r--r--net/wireless/radiotap.c4
-rw-r--r--net/wireless/sme.c22
-rw-r--r--scripts/link-vmlinux.sh4
-rw-r--r--scripts/sortextable.c5
-rw-r--r--security/keys/big_key.c2
-rw-r--r--security/keys/key.c8
-rw-r--r--security/keys/keyring.c17
-rw-r--r--security/selinux/hooks.c185
-rw-r--r--security/selinux/include/xfrm.h8
-rw-r--r--security/selinux/ss/services.c42
-rw-r--r--security/selinux/xfrm.c62
-rw-r--r--sound/core/pcm_lib.c2
-rw-r--r--sound/pci/hda/hda_generic.c47
-rw-r--r--sound/pci/hda/hda_generic.h3
-rw-r--r--sound/pci/hda/hda_intel.c4
-rw-r--r--sound/pci/hda/patch_analog.c10
-rw-r--r--sound/pci/hda/patch_conexant.c1
-rw-r--r--sound/pci/hda/patch_hdmi.c5
-rw-r--r--sound/pci/hda/patch_realtek.c15
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c30
-rw-r--r--sound/soc/atmel/sam9x5_wm8731.c2
-rw-r--r--sound/soc/codecs/wm5110.c2
-rw-r--r--sound/soc/codecs/wm8904.c2
-rw-r--r--sound/soc/codecs/wm8962.c13
-rw-r--r--sound/soc/codecs/wm_adsp.c10
-rw-r--r--sound/soc/fsl/imx-wm8962.c2
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c24
-rw-r--r--sound/soc/soc-generic-dmaengine-pcm.c38
-rw-r--r--sound/soc/soc-pcm.c5
-rw-r--r--sound/soc/tegra/tegra20_i2s.c6
-rw-r--r--sound/soc/tegra/tegra20_spdif.c10
-rw-r--r--sound/soc/tegra/tegra30_i2s.c6
-rw-r--r--sound/usb/mixer_quirks.c2
-rw-r--r--tools/power/cpupower/utils/cpupower-set.c6
-rw-r--r--virt/kvm/kvm_main.c3
868 files changed, 8265 insertions, 4462 deletions
diff --git a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
index e287c8f..4165e7b 100644
--- a/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
+++ b/Documentation/DocBook/media/v4l/vidioc-expbuf.xml
@@ -73,7 +73,8 @@ range from zero to the maximal number of valid planes for the currently active
format. For the single-planar API, applications must set <structfield> plane
</structfield> to zero. Additional flags may be posted in the <structfield>
flags </structfield> field. Refer to a manual for open() for details.
-Currently only O_CLOEXEC is supported. All other fields must be set to zero.
+Currently only O_CLOEXEC, O_RDONLY, O_WRONLY, and O_RDWR are supported. All
+other fields must be set to zero.
In the case of multi-planar API, every plane is exported separately using
multiple <constant> VIDIOC_EXPBUF </constant> calls. </para>
@@ -170,8 +171,9 @@ multi-planar API. Otherwise this value must be set to zero. </entry>
<entry>__u32</entry>
<entry><structfield>flags</structfield></entry>
<entry>Flags for the newly created file, currently only <constant>
-O_CLOEXEC </constant> is supported, refer to the manual of open() for more
-details.</entry>
+O_CLOEXEC </constant>, <constant>O_RDONLY</constant>, <constant>O_WRONLY
+</constant>, and <constant>O_RDWR</constant> are supported, refer to the manual
+of open() for more details.</entry>
</row>
<row>
<entry>__s32</entry>
diff --git a/Documentation/assoc_array.txt b/Documentation/assoc_array.txt
index f4faec0..2f2c6cd 100644
--- a/Documentation/assoc_array.txt
+++ b/Documentation/assoc_array.txt
@@ -164,10 +164,10 @@ This points to a number of methods, all of which need to be provided:
(4) Diff the index keys of two objects.
- int (*diff_objects)(const void *a, const void *b);
+ int (*diff_objects)(const void *object, const void *index_key);
- Return the bit position at which the index keys of two objects differ or
- -1 if they are the same.
+ Return the bit position at which the index key of the specified object
+ differs from the given index key or -1 if they are the same.
(5) Free an object.
diff --git a/Documentation/block/null_blk.txt b/Documentation/block/null_blk.txt
new file mode 100644
index 0000000..b2830b4
--- /dev/null
+++ b/Documentation/block/null_blk.txt
@@ -0,0 +1,72 @@
+Null block device driver
+================================================================================
+
+I. Overview
+
+The null block device (/dev/nullb*) is used for benchmarking the various
+block-layer implementations. It emulates a block device of X gigabytes in size.
+The following instances are possible:
+
+ Single-queue block-layer
+ - Request-based.
+ - Single submission queue per device.
+ - Implements IO scheduling algorithms (CFQ, Deadline, noop).
+ Multi-queue block-layer
+ - Request-based.
+ - Configurable submission queues per device.
+ No block-layer (Known as bio-based)
+ - Bio-based. IO requests are submitted directly to the device driver.
+ - Directly accepts bio data structure and returns them.
+
+All of them have a completion queue for each core in the system.
+
+II. Module parameters applicable for all instances:
+
+queue_mode=[0-2]: Default: 2-Multi-queue
+ Selects which block-layer the module should instantiate with.
+
+ 0: Bio-based.
+ 1: Single-queue.
+ 2: Multi-queue.
+
+home_node=[0--nr_nodes]: Default: NUMA_NO_NODE
+ Selects what CPU node the data structures are allocated from.
+
+gb=[Size in GB]: Default: 250GB
+ The size of the device reported to the system.
+
+bs=[Block size (in bytes)]: Default: 512 bytes
+ The block size reported to the system.
+
+nr_devices=[Number of devices]: Default: 2
+ Number of block devices instantiated. They are instantiated as /dev/nullb0,
+ etc.
+
+irq_mode=[0-2]: Default: 1-Soft-irq
+ The completion mode used for completing IOs to the block-layer.
+
+ 0: None.
+ 1: Soft-irq. Uses IPI to complete IOs across CPU nodes. Simulates the overhead
+ when IOs are issued from another CPU node than the home the device is
+ connected to.
+ 2: Timer: Waits a specific period (completion_nsec) for each IO before
+ completion.
+
+completion_nsec=[ns]: Default: 10.000ns
+ Combined with irq_mode=2 (timer). The time each completion event must wait.
+
+submit_queues=[0..nr_cpus]:
+ The number of submission queues attached to the device driver. If unset, it
+ defaults to 1 on single-queue and bio-based instances. For multi-queue,
+ it is ignored when use_per_node_hctx module parameter is 1.
+
+hw_queue_depth=[0..qdepth]: Default: 64
+ The hardware queue depth of the device.
+
+III: Multi-queue specific parameters
+
+use_per_node_hctx=[0/1]: Default: 0
+ 0: The number of submit queues are set to the value of the submit_queues
+ parameter.
+ 1: The multi-queue block layer is instantiated with a hardware dispatch
+ queue for each CPU node in the system.
diff --git a/Documentation/device-mapper/cache.txt b/Documentation/device-mapper/cache.txt
index 274752f..719320b 100644
--- a/Documentation/device-mapper/cache.txt
+++ b/Documentation/device-mapper/cache.txt
@@ -266,10 +266,12 @@ E.g.
Invalidation is removing an entry from the cache without writing it
back. Cache blocks can be invalidated via the invalidate_cblocks
message, which takes an arbitrary number of cblock ranges. Each cblock
-must be expressed as a decimal value, in the future a variant message
-that takes cblock ranges expressed in hexidecimal may be needed to
-better support efficient invalidation of larger caches. The cache must
-be in passthrough mode when invalidate_cblocks is used.
+range's end value is "one past the end", meaning 5-10 expresses a range
+of values from 5 to 9. Each cblock must be expressed as a decimal
+value, in the future a variant message that takes cblock ranges
+expressed in hexidecimal may be needed to better support efficient
+invalidation of larger caches. The cache must be in passthrough mode
+when invalidate_cblocks is used.
invalidate_cblocks [<cblock>|<cblock begin>-<cblock end>]*
diff --git a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
index 46f5c79..0f2f920 100644
--- a/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
+++ b/Documentation/devicetree/bindings/clock/exynos5250-clock.txt
@@ -159,6 +159,8 @@ clock which they consume.
mixer 343
hdmi 344
g2d 345
+ mdma0 346
+ smmu_mdma0 347
[Clock Muxes]
diff --git a/Documentation/devicetree/bindings/net/davinci_emac.txt b/Documentation/devicetree/bindings/net/davinci_emac.txt
index 48b259e..bad381f 100644
--- a/Documentation/devicetree/bindings/net/davinci_emac.txt
+++ b/Documentation/devicetree/bindings/net/davinci_emac.txt
@@ -4,7 +4,7 @@ This file provides information, what the device node
for the davinci_emac interface contains.
Required properties:
-- compatible: "ti,davinci-dm6467-emac";
+- compatible: "ti,davinci-dm6467-emac" or "ti,am3517-emac"
- reg: Offset and length of the register set for the device
- ti,davinci-ctrl-reg-offset: offset to control register
- ti,davinci-ctrl-mod-reg-offset: offset to control module register
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 953049b..5a41a865 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -8,3 +8,7 @@ Required properties:
Optional properties:
- phy-device : phandle to Ethernet phy
- local-mac-address : Ethernet mac address to use
+- reg-io-width : Mask of sizes (in bytes) of the IO accesses that
+ are supported on the device. Valid value for SMSC LAN91c111 are
+ 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
+ 16-bit access only.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 50680a5..b9e9bd8 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1529,6 +1529,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
* atapi_dmadir: Enable ATAPI DMADIR bridge support
+ * disable: Disable this device.
+
If there are multiple matching configurations changing
the same attribute, the last one is used.
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
new file mode 100644
index 0000000..2b40e04
--- /dev/null
+++ b/Documentation/module-signing.txt
@@ -0,0 +1,240 @@
+ ==============================
+ KERNEL MODULE SIGNING FACILITY
+ ==============================
+
+CONTENTS
+
+ - Overview.
+ - Configuring module signing.
+ - Generating signing keys.
+ - Public keys in the kernel.
+ - Manually signing modules.
+ - Signed modules and stripping.
+ - Loading signed modules.
+ - Non-valid signatures and unsigned modules.
+ - Administering/protecting the private key.
+
+
+========
+OVERVIEW
+========
+
+The kernel module signing facility cryptographically signs modules during
+installation and then checks the signature upon loading the module. This
+allows increased kernel security by disallowing the loading of unsigned modules
+or modules signed with an invalid key. Module signing increases security by
+making it harder to load a malicious module into the kernel. The module
+signature checking is done by the kernel so that it is not necessary to have
+trusted userspace bits.
+
+This facility uses X.509 ITU-T standard certificates to encode the public keys
+involved. The signatures are not themselves encoded in any industrial standard
+type. The facility currently only supports the RSA public key encryption
+standard (though it is pluggable and permits others to be used). The possible
+hash algorithms that can be used are SHA-1, SHA-224, SHA-256, SHA-384, and
+SHA-512 (the algorithm is selected by data in the signature).
+
+
+==========================
+CONFIGURING MODULE SIGNING
+==========================
+
+The module signing facility is enabled by going to the "Enable Loadable Module
+Support" section of the kernel configuration and turning on
+
+ CONFIG_MODULE_SIG "Module signature verification"
+
+This has a number of options available:
+
+ (1) "Require modules to be validly signed" (CONFIG_MODULE_SIG_FORCE)
+
+ This specifies how the kernel should deal with a module that has a
+ signature for which the key is not known or a module that is unsigned.
+
+ If this is off (ie. "permissive"), then modules for which the key is not
+ available and modules that are unsigned are permitted, but the kernel will
+ be marked as being tainted.
+
+ If this is on (ie. "restrictive"), only modules that have a valid
+ signature that can be verified by a public key in the kernel's possession
+ will be loaded. All other modules will generate an error.
+
+ Irrespective of the setting here, if the module has a signature block that
+ cannot be parsed, it will be rejected out of hand.
+
+
+ (2) "Automatically sign all modules" (CONFIG_MODULE_SIG_ALL)
+
+ If this is on then modules will be automatically signed during the
+ modules_install phase of a build. If this is off, then the modules must
+ be signed manually using:
+
+ scripts/sign-file
+
+
+ (3) "Which hash algorithm should modules be signed with?"
+
+ This presents a choice of which hash algorithm the installation phase will
+ sign the modules with:
+
+ CONFIG_SIG_SHA1 "Sign modules with SHA-1"
+ CONFIG_SIG_SHA224 "Sign modules with SHA-224"
+ CONFIG_SIG_SHA256 "Sign modules with SHA-256"
+ CONFIG_SIG_SHA384 "Sign modules with SHA-384"
+ CONFIG_SIG_SHA512 "Sign modules with SHA-512"
+
+ The algorithm selected here will also be built into the kernel (rather
+ than being a module) so that modules signed with that algorithm can have
+ their signatures checked without causing a dependency loop.
+
+
+=======================
+GENERATING SIGNING KEYS
+=======================
+
+Cryptographic keypairs are required to generate and check signatures. A
+private key is used to generate a signature and the corresponding public key is
+used to check it. The private key is only needed during the build, after which
+it can be deleted or stored securely. The public key gets built into the
+kernel so that it can be used to check the signatures as the modules are
+loaded.
+
+Under normal conditions, the kernel build will automatically generate a new
+keypair using openssl if one does not exist in the files:
+
+ signing_key.priv
+ signing_key.x509
+
+during the building of vmlinux (the public part of the key needs to be built
+into vmlinux) using parameters in the:
+
+ x509.genkey
+
+file (which is also generated if it does not already exist).
+
+It is strongly recommended that you provide your own x509.genkey file.
+
+Most notably, in the x509.genkey file, the req_distinguished_name section
+should be altered from the default:
+
+ [ req_distinguished_name ]
+ O = Magrathea
+ CN = Glacier signing key
+ emailAddress = slartibartfast@magrathea.h2g2
+
+The generated RSA key size can also be set with:
+
+ [ req ]
+ default_bits = 4096
+
+
+It is also possible to manually generate the key private/public files using the
+x509.genkey key generation configuration file in the root node of the Linux
+kernel sources tree and the openssl command. The following is an example to
+generate the public/private key files:
+
+ openssl req -new -nodes -utf8 -sha256 -days 36500 -batch -x509 \
+ -config x509.genkey -outform DER -out signing_key.x509 \
+ -keyout signing_key.priv
+
+
+=========================
+PUBLIC KEYS IN THE KERNEL
+=========================
+
+The kernel contains a ring of public keys that can be viewed by root. They're
+in a keyring called ".system_keyring" that can be seen by:
+
+ [root@deneb ~]# cat /proc/keys
+ ...
+ 223c7853 I------ 1 perm 1f030000 0 0 keyring .system_keyring: 1
+ 302d2d52 I------ 1 perm 1f010000 0 0 asymmetri Fedora kernel signing key: d69a84e6bce3d216b979e9505b3e3ef9a7118079: X509.RSA a7118079 []
+ ...
+
+Beyond the public key generated specifically for module signing, any file
+placed in the kernel source root directory or the kernel build root directory
+whose name is suffixed with ".x509" will be assumed to be an X.509 public key
+and will be added to the keyring.
+
+Further, the architecture code may take public keys from a hardware store and
+add those in also (e.g. from the UEFI key database).
+
+Finally, it is possible to add additional public keys by doing:
+
+ keyctl padd asymmetric "" [.system_keyring-ID] <[key-file]
+
+e.g.:
+
+ keyctl padd asymmetric "" 0x223c7853 <my_public_key.x509
+
+Note, however, that the kernel will only permit keys to be added to
+.system_keyring _if_ the new key's X.509 wrapper is validly signed by a key
+that is already resident in the .system_keyring at the time the key was added.
+
+
+=========================
+MANUALLY SIGNING MODULES
+=========================
+
+To manually sign a module, use the scripts/sign-file tool available in
+the Linux kernel source tree. The script requires 4 arguments:
+
+ 1. The hash algorithm (e.g., sha256)
+ 2. The private key filename
+ 3. The public key filename
+ 4. The kernel module to be signed
+
+The following is an example to sign a kernel module:
+
+ scripts/sign-file sha512 kernel-signkey.priv \
+ kernel-signkey.x509 module.ko
+
+The hash algorithm used does not have to match the one configured, but if it
+doesn't, you should make sure that hash algorithm is either built into the
+kernel or can be loaded without requiring itself.
+
+
+============================
+SIGNED MODULES AND STRIPPING
+============================
+
+A signed module has a digital signature simply appended at the end. The string
+"~Module signature appended~." at the end of the module's file confirms that a
+signature is present but it does not confirm that the signature is valid!
+
+Signed modules are BRITTLE as the signature is outside of the defined ELF
+container. Thus they MAY NOT be stripped once the signature is computed and
+attached. Note the entire module is the signed payload, including any and all
+debug information present at the time of signing.
+
+
+======================
+LOADING SIGNED MODULES
+======================
+
+Modules are loaded with insmod, modprobe, init_module() or finit_module(),
+exactly as for unsigned modules as no processing is done in userspace. The
+signature checking is all done within the kernel.
+
+
+=========================================
+NON-VALID SIGNATURES AND UNSIGNED MODULES
+=========================================
+
+If CONFIG_MODULE_SIG_FORCE is enabled or enforcemodulesig=1 is supplied on
+the kernel command line, the kernel will only load validly signed modules
+for which it has a public key. Otherwise, it will also load modules that are
+unsigned. Any module for which the kernel has a key, but which proves to have
+a signature mismatch will not be permitted to load.
+
+Any module that has an unparseable signature will be rejected.
+
+
+=========================================
+ADMINISTERING/PROTECTING THE PRIVATE KEY
+=========================================
+
+Since the private key is used to sign modules, viruses and malware could use
+the private key to sign modules and compromise the operating system. The
+private key must be either destroyed or moved to a secure location and not kept
+in the root node of the kernel source tree.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 3c12d9a..8a984e9 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -16,8 +16,12 @@ ip_default_ttl - INTEGER
Default: 64 (as recommended by RFC1700)
ip_no_pmtu_disc - BOOLEAN
- Disable Path MTU Discovery.
- default FALSE
+ Disable Path MTU Discovery. If enabled and a
+ fragmentation-required ICMP is received, the PMTU to this
+ destination will be set to min_pmtu (see below). You will need
+ to raise min_pmtu to the smallest interface MTU on your system
+ manually if you want to avoid locally generated fragments.
+ Default: FALSE
min_pmtu - INTEGER
default 552 - minimum discovered Path MTU
diff --git a/Documentation/networking/packet_mmap.txt b/Documentation/networking/packet_mmap.txt
index c012236..8e48e3b 100644
--- a/Documentation/networking/packet_mmap.txt
+++ b/Documentation/networking/packet_mmap.txt
@@ -123,6 +123,16 @@ Transmission process is similar to capture as shown below.
[shutdown] close() --------> destruction of the transmission socket and
deallocation of all associated resources.
+Socket creation and destruction is also straight forward, and is done
+the same way as in capturing described in the previous paragraph:
+
+ int fd = socket(PF_PACKET, mode, 0);
+
+The protocol can optionally be 0 in case we only want to transmit
+via this socket, which avoids an expensive call to packet_rcv().
+In this case, you also need to bind(2) the TX_RING with sll_protocol = 0
+set. Otherwise, htons(ETH_P_ALL) or any other protocol, for example.
+
Binding the socket to your network interface is mandatory (with zero copy) to
know the header size of frames used in the circular buffer.
diff --git a/MAINTAINERS b/MAINTAINERS
index 9486fb6..31a0462 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -783,7 +783,7 @@ F: arch/arm/boot/dts/sama*.dts
F: arch/arm/boot/dts/sama*.dtsi
ARM/CALXEDA HIGHBANK ARCHITECTURE
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh@kernel.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-highbank/
@@ -893,20 +893,15 @@ F: arch/arm/include/asm/hardware/dec21285.h
F: arch/arm/mach-footbridge/
ARM/FREESCALE IMX / MXC ARM ARCHITECTURE
+M: Shawn Guo <shawn.guo@linaro.org>
M: Sascha Hauer <kernel@pengutronix.de>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
-T: git git://git.pengutronix.de/git/imx/linux-2.6.git
+T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
F: arch/arm/mach-imx/
+F: arch/arm/boot/dts/imx*
F: arch/arm/configs/imx*_defconfig
-ARM/FREESCALE IMX6
-M: Shawn Guo <shawn.guo@linaro.org>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Maintained
-T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
-F: arch/arm/mach-imx/*imx6*
-
ARM/FREESCALE MXS ARM ARCHITECTURE
M: Shawn Guo <shawn.guo@linaro.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1013,6 +1008,8 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
F: arch/arm/mach-keystone/
+F: drivers/clk/keystone/
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
ARM/LOGICPD PXA270 MACHINE SUPPORT
M: Lennert Buytenhek <kernel@wantstofly.org>
@@ -1371,6 +1368,9 @@ T: git git://git.xilinx.com/linux-xlnx.git
S: Supported
F: arch/arm/mach-zynq/
F: drivers/cpuidle/cpuidle-zynq.c
+N: zynq
+N: xilinx
+F: drivers/clocksource/cadence_ttc_timer.c
ARM SMMU DRIVER
M: Will Deacon <will.deacon@arm.com>
@@ -2828,8 +2828,10 @@ F: include/uapi/drm/
INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
M: Daniel Vetter <daniel.vetter@ffwll.ch>
+M: Jani Nikula <jani.nikula@linux.intel.com>
L: intel-gfx@lists.freedesktop.org
L: dri-devel@lists.freedesktop.org
+Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://people.freedesktop.org/~danvet/drm-intel
S: Supported
F: drivers/gpu/drm/i915/
@@ -3766,9 +3768,11 @@ F: include/uapi/linux/gigaset_dev.h
GPIO SUBSYSTEM
M: Linus Walleij <linus.walleij@linaro.org>
-S: Maintained
+M: Alexandre Courbot <gnurou@gmail.com>
L: linux-gpio@vger.kernel.org
-F: Documentation/gpio.txt
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+S: Maintained
+F: Documentation/gpio/
F: drivers/gpio/
F: include/linux/gpio*
F: include/asm-generic/gpio.h
@@ -3836,6 +3840,12 @@ T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/usb/gspca/
+GUID PARTITION TABLE (GPT)
+M: Davidlohr Bueso <davidlohr@hp.com>
+L: linux-efi@vger.kernel.org
+S: Maintained
+F: block/partitions/efi.*
+
STK1160 USB VIDEO CAPTURE DRIVER
M: Ezequiel Garcia <elezegarcia@gmail.com>
L: linux-media@vger.kernel.org
@@ -4471,10 +4481,8 @@ M: Bruce Allan <bruce.w.allan@intel.com>
M: Carolyn Wyborny <carolyn.wyborny@intel.com>
M: Don Skidmore <donald.c.skidmore@intel.com>
M: Greg Rose <gregory.v.rose@intel.com>
-M: Peter P Waskiewicz Jr <peter.p.waskiewicz.jr@intel.com>
M: Alex Duyck <alexander.h.duyck@intel.com>
M: John Ronciak <john.ronciak@intel.com>
-M: Tushar Dave <tushar.n.dave@intel.com>
L: e1000-devel@lists.sourceforge.net
W: http://www.intel.com/support/feedback.htm
W: http://e1000.sourceforge.net/
@@ -5918,12 +5926,21 @@ M: Steffen Klassert <steffen.klassert@secunet.com>
M: Herbert Xu <herbert@gondor.apana.org.au>
M: "David S. Miller" <davem@davemloft.net>
L: netdev@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/davem/net.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/klassert/ipsec-next.git
S: Maintained
F: net/xfrm/
F: net/key/
F: net/ipv4/xfrm*
+F: net/ipv4/esp4.c
+F: net/ipv4/ah4.c
+F: net/ipv4/ipcomp.c
+F: net/ipv4/ip_vti.c
F: net/ipv6/xfrm*
+F: net/ipv6/esp6.c
+F: net/ipv6/ah6.c
+F: net/ipv6/ipcomp6.c
+F: net/ipv6/ip6_vti.c
F: include/uapi/linux/xfrm.h
F: include/net/xfrm.h
@@ -6244,7 +6261,7 @@ F: drivers/i2c/busses/i2c-ocores.c
OPEN FIRMWARE AND FLATTENED DEVICE TREE
M: Grant Likely <grant.likely@linaro.org>
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh+dt@kernel.org>
L: devicetree@vger.kernel.org
W: http://fdt.secretlab.ca
T: git git://git.secretlab.ca/git/linux-2.6.git
@@ -6256,7 +6273,7 @@ K: of_get_property
K: of_match_table
OPEN FIRMWARE AND FLATTENED DEVICE TREE BINDINGS
-M: Rob Herring <rob.herring@calxeda.com>
+M: Rob Herring <robh+dt@kernel.org>
M: Pawel Moll <pawel.moll@arm.com>
M: Mark Rutland <mark.rutland@arm.com>
M: Ian Campbell <ijc+devicetree@hellion.org.uk>
@@ -6470,19 +6487,52 @@ F: drivers/pci/
F: include/linux/pci*
F: arch/x86/pci/
+PCI DRIVER FOR IMX6
+M: Richard Zhu <r65037@freescale.com>
+M: Shawn Guo <shawn.guo@linaro.org>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/*imx6*
+
+PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
+M: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+M: Jason Cooper <jason@lakedaemon.net>
+L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+F: drivers/pci/host/*mvebu*
+
PCI DRIVER FOR NVIDIA TEGRA
M: Thierry Reding <thierry.reding@gmail.com>
L: linux-tegra@vger.kernel.org
+L: linux-pci@vger.kernel.org
S: Supported
F: Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
F: drivers/pci/host/pci-tegra.c
+PCI DRIVER FOR RENESAS R-CAR
+M: Simon Horman <horms@verge.net.au>
+L: linux-pci@vger.kernel.org
+L: linux-sh@vger.kernel.org
+S: Maintained
+F: drivers/pci/host/*rcar*
+
PCI DRIVER FOR SAMSUNG EXYNOS
M: Jingoo Han <jg1.han@samsung.com>
L: linux-pci@vger.kernel.org
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
S: Maintained
F: drivers/pci/host/pci-exynos.c
+PCI DRIVER FOR SYNOPSIS DESIGNWARE
+M: Mohit Kumar <mohit.kumar@st.com>
+M: Jingoo Han <jg1.han@samsung.com>
+L: linux-pci@vger.kernel.org
+S: Maintained
+F: drivers/pci/host/*designware*
+
PCMCIA SUBSYSTEM
P: Linux PCMCIA Team
L: linux-pcmcia@lists.infradead.org
@@ -9545,7 +9595,7 @@ F: drivers/xen/*swiotlb*
XFS FILESYSTEM
P: Silicon Graphics Inc
-M: Dave Chinner <dchinner@fromorbit.com>
+M: Dave Chinner <david@fromorbit.com>
M: Ben Myers <bpm@sgi.com>
M: xfs@oss.sgi.com
L: xfs@oss.sgi.com
diff --git a/Makefile b/Makefile
index 890392f..ae1a55a 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 3
PATCHLEVEL = 13
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc7
NAME = One Giant Leap for Frogkind
# *DOCUMENTATION*
@@ -732,19 +732,15 @@ export mod_strip_cmd
# Select initial ramdisk compression format, default is gzip(1).
# This shall be used by the dracut(8) tool while creating an initramfs image.
#
-INITRD_COMPRESS=gzip
-ifeq ($(CONFIG_RD_BZIP2), y)
- INITRD_COMPRESS=bzip2
-else ifeq ($(CONFIG_RD_LZMA), y)
- INITRD_COMPRESS=lzma
-else ifeq ($(CONFIG_RD_XZ), y)
- INITRD_COMPRESS=xz
-else ifeq ($(CONFIG_RD_LZO), y)
- INITRD_COMPRESS=lzo
-else ifeq ($(CONFIG_RD_LZ4), y)
- INITRD_COMPRESS=lz4
-endif
-export INITRD_COMPRESS
+INITRD_COMPRESS-y := gzip
+INITRD_COMPRESS-$(CONFIG_RD_BZIP2) := bzip2
+INITRD_COMPRESS-$(CONFIG_RD_LZMA) := lzma
+INITRD_COMPRESS-$(CONFIG_RD_XZ) := xz
+INITRD_COMPRESS-$(CONFIG_RD_LZO) := lzo
+INITRD_COMPRESS-$(CONFIG_RD_LZ4) := lz4
+# do not export INITRD_COMPRESS, since we didn't actually
+# choose a sane default compression above.
+# export INITRD_COMPRESS := $(INITRD_COMPRESS-y)
ifdef CONFIG_MODULE_SIG_ALL
MODSECKEY = ./signing_key.priv
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 2ee0c9b..9063ae6 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -8,6 +8,7 @@
config ARC
def_bool y
+ select BUILDTIME_EXTABLE_SORT
select CLONE_BACKWARDS
# ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
select DEVTMPFS if !INITRAMFS_SOURCE=""
diff --git a/arch/arc/include/uapi/asm/unistd.h b/arch/arc/include/uapi/asm/unistd.h
index 6f30484..39e58d1 100644
--- a/arch/arc/include/uapi/asm/unistd.h
+++ b/arch/arc/include/uapi/asm/unistd.h
@@ -8,6 +8,13 @@
/******** no-legacy-syscalls-ABI *******/
+/*
+ * Non-typical guard macro to enable inclusion twice in ARCH sys.c
+ * That is how the Generic syscall wrapper generator works
+ */
+#if !defined(_UAPI_ASM_ARC_UNISTD_H) || defined(__SYSCALL)
+#define _UAPI_ASM_ARC_UNISTD_H
+
#define __ARCH_WANT_SYS_EXECVE
#define __ARCH_WANT_SYS_CLONE
#define __ARCH_WANT_SYS_VFORK
@@ -32,3 +39,7 @@ __SYSCALL(__NR_arc_gettls, sys_arc_gettls)
/* Generic syscall (fs/filesystems.c - lost in asm-generic/unistd.h */
#define __NR_sysfs (__NR_arch_specific_syscall + 3)
__SYSCALL(__NR_sysfs, sys_sysfs)
+
+#undef __SYSCALL
+
+#endif
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index e46d81f..63177e4 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -79,9 +79,9 @@ static int arc_pmu_cache_event(u64 config)
cache_result = (config >> 16) & 0xff;
if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
return -EINVAL;
- if (cache_type >= PERF_COUNT_HW_CACHE_OP_MAX)
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
return -EINVAL;
- if (cache_type >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
return -EINVAL;
ret = arc_pmu_cache_map[cache_type][cache_op][cache_result];
diff --git a/arch/arm/boot/dts/am3517-evm.dts b/arch/arm/boot/dts/am3517-evm.dts
index e99dfaf..03fcbf0 100644
--- a/arch/arm/boot/dts/am3517-evm.dts
+++ b/arch/arm/boot/dts/am3517-evm.dts
@@ -7,11 +7,11 @@
*/
/dts-v1/;
-#include "omap34xx.dtsi"
+#include "am3517.dtsi"
/ {
- model = "TI AM3517 EVM (AM3517/05)";
- compatible = "ti,am3517-evm", "ti,omap3";
+ model = "TI AM3517 EVM (AM3517/05 TMDSEVM3517)";
+ compatible = "ti,am3517-evm", "ti,am3517", "ti,omap3";
memory {
device_type = "memory";
diff --git a/arch/arm/boot/dts/am3517.dtsi b/arch/arm/boot/dts/am3517.dtsi
new file mode 100644
index 0000000..2fbe02f
--- /dev/null
+++ b/arch/arm/boot/dts/am3517.dtsi
@@ -0,0 +1,63 @@
+/*
+ * Device Tree Source for am3517 SoC
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include "omap3.dtsi"
+
+/ {
+ aliases {
+ serial3 = &uart4;
+ };
+
+ ocp {
+ am35x_otg_hs: am35x_otg_hs@5c040000 {
+ compatible = "ti,omap3-musb";
+ ti,hwmods = "am35x_otg_hs";
+ status = "disabled";
+ reg = <0x5c040000 0x1000>;
+ interrupts = <71>;
+ interrupt-names = "mc";
+ };
+
+ davinci_emac: ethernet@0x5c000000 {
+ compatible = "ti,am3517-emac";
+ ti,hwmods = "davinci_emac";
+ status = "disabled";
+ reg = <0x5c000000 0x30000>;
+ interrupts = <67 68 69 70>;
+ ti,davinci-ctrl-reg-offset = <0x10000>;
+ ti,davinci-ctrl-mod-reg-offset = <0>;
+ ti,davinci-ctrl-ram-offset = <0x20000>;
+ ti,davinci-ctrl-ram-size = <0x2000>;
+ ti,davinci-rmii-en = /bits/ 8 <1>;
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+
+ davinci_mdio: ethernet@0x5c030000 {
+ compatible = "ti,davinci_mdio";
+ ti,hwmods = "davinci_mdio";
+ status = "disabled";
+ reg = <0x5c030000 0x1000>;
+ bus_freq = <1000000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ };
+
+ uart4: serial@4809e000 {
+ compatible = "ti,omap3-uart";
+ ti,hwmods = "uart4";
+ status = "disabled";
+ reg = <0x4809e000 0x400>;
+ interrupts = <84>;
+ dmas = <&sdma 55 &sdma 54>;
+ dma-names = "tx", "rx";
+ clock-frequency = <48000000>;
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 9db5047..177becd 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -559,7 +559,7 @@
compatible = "arm,pl330", "arm,primecell";
reg = <0x10800000 0x1000>;
interrupts = <0 33 0>;
- clocks = <&clock 271>;
+ clocks = <&clock 346>;
clock-names = "apb_pclk";
#dma-cells = <1>;
#dma-channels = <8>;
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index c2c306d..6fc85f9 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -9,7 +9,7 @@
/dts-v1/;
-#include "omap34xx.dtsi"
+#include "omap34xx-hs.dtsi"
/ {
model = "Nokia N900";
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 94eb77d..5c26c18 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -8,7 +8,7 @@
* published by the Free Software Foundation.
*/
-#include "omap36xx.dtsi"
+#include "omap36xx-hs.dtsi"
/ {
cpus {
diff --git a/arch/arm/boot/dts/omap34xx-hs.dtsi b/arch/arm/boot/dts/omap34xx-hs.dtsi
new file mode 100644
index 0000000..1ff6264
--- /dev/null
+++ b/arch/arm/boot/dts/omap34xx-hs.dtsi
@@ -0,0 +1,16 @@
+/* Disabled modules for secure omaps */
+
+#include "omap34xx.dtsi"
+
+/* Secure omaps have some devices inaccessible depending on the firmware */
+&aes {
+ status = "disabled";
+};
+
+&sham {
+ status = "disabled";
+};
+
+&timer12 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/omap36xx-hs.dtsi b/arch/arm/boot/dts/omap36xx-hs.dtsi
new file mode 100644
index 0000000..2c7febb
--- /dev/null
+++ b/arch/arm/boot/dts/omap36xx-hs.dtsi
@@ -0,0 +1,16 @@
+/* Disabled modules for secure omaps */
+
+#include "omap36xx.dtsi"
+
+/* Secure omaps have some devices inaccessible depending on the firmware */
+&aes {
+ status = "disabled";
+};
+
+&sham {
+ status = "disabled";
+};
+
+&timer12 {
+ status = "disabled";
+};
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index ee845fa..9987dd0e 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -87,9 +87,9 @@
interrupts = <1 9 0xf04>;
};
- gpio0: gpio@ffc40000 {
+ gpio0: gpio@e6050000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc40000 0 0x2c>;
+ reg = <0 0xe6050000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 4 0x4>;
#gpio-cells = <2>;
@@ -99,9 +99,9 @@
interrupt-controller;
};
- gpio1: gpio@ffc41000 {
+ gpio1: gpio@e6051000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc41000 0 0x2c>;
+ reg = <0 0xe6051000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 5 0x4>;
#gpio-cells = <2>;
@@ -111,9 +111,9 @@
interrupt-controller;
};
- gpio2: gpio@ffc42000 {
+ gpio2: gpio@e6052000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc42000 0 0x2c>;
+ reg = <0 0xe6052000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 6 0x4>;
#gpio-cells = <2>;
@@ -123,9 +123,9 @@
interrupt-controller;
};
- gpio3: gpio@ffc43000 {
+ gpio3: gpio@e6053000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc43000 0 0x2c>;
+ reg = <0 0xe6053000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 7 0x4>;
#gpio-cells = <2>;
@@ -135,9 +135,9 @@
interrupt-controller;
};
- gpio4: gpio@ffc44000 {
+ gpio4: gpio@e6054000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc44000 0 0x2c>;
+ reg = <0 0xe6054000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 8 0x4>;
#gpio-cells = <2>;
@@ -147,9 +147,9 @@
interrupt-controller;
};
- gpio5: gpio@ffc45000 {
+ gpio5: gpio@e6055000 {
compatible = "renesas,gpio-r8a7790", "renesas,gpio-rcar";
- reg = <0 0xffc45000 0 0x2c>;
+ reg = <0 0xe6055000 0 0x50>;
interrupt-parent = <&gic>;
interrupts = <0 9 0x4>;
#gpio-cells = <2>;
@@ -241,7 +241,7 @@
sdhi0: sdhi@ee100000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee100000 0 0x100>;
+ reg = <0 0xee100000 0 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 165 4>;
cap-sd-highspeed;
@@ -250,7 +250,7 @@
sdhi1: sdhi@ee120000 {
compatible = "renesas,sdhi-r8a7790";
- reg = <0 0xee120000 0 0x100>;
+ reg = <0 0xee120000 0 0x200>;
interrupt-parent = <&gic>;
interrupts = <0 166 4>;
cap-sd-highspeed;
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index c1751a6..7f5878c 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -193,7 +193,10 @@
pio: pinctrl@01c20800 {
compatible = "allwinner,sun6i-a31-pinctrl";
reg = <0x01c20800 0x400>;
- interrupts = <0 11 1>, <0 15 1>, <0 16 1>, <0 17 1>;
+ interrupts = <0 11 4>,
+ <0 15 4>,
+ <0 16 4>,
+ <0 17 4>;
clocks = <&apb1_gates 5>;
gpio-controller;
interrupt-controller;
@@ -212,11 +215,11 @@
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
reg = <0x01c20c00 0xa0>;
- interrupts = <0 18 1>,
- <0 19 1>,
- <0 20 1>,
- <0 21 1>,
- <0 22 1>;
+ interrupts = <0 18 4>,
+ <0 19 4>,
+ <0 20 4>,
+ <0 21 4>,
+ <0 22 4>;
clocks = <&osc24M>;
};
@@ -228,7 +231,7 @@
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
- interrupts = <0 0 1>;
+ interrupts = <0 0 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 16>;
@@ -238,7 +241,7 @@
uart1: serial@01c28400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28400 0x400>;
- interrupts = <0 1 1>;
+ interrupts = <0 1 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 17>;
@@ -248,7 +251,7 @@
uart2: serial@01c28800 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28800 0x400>;
- interrupts = <0 2 1>;
+ interrupts = <0 2 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 18>;
@@ -258,7 +261,7 @@
uart3: serial@01c28c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28c00 0x400>;
- interrupts = <0 3 1>;
+ interrupts = <0 3 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 19>;
@@ -268,7 +271,7 @@
uart4: serial@01c29000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29000 0x400>;
- interrupts = <0 4 1>;
+ interrupts = <0 4 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 20>;
@@ -278,7 +281,7 @@
uart5: serial@01c29400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29400 0x400>;
- interrupts = <0 5 1>;
+ interrupts = <0 5 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb2_gates 21>;
diff --git a/arch/arm/boot/dts/sun7i-a20.dtsi b/arch/arm/boot/dts/sun7i-a20.dtsi
index e46cfed..367611a 100644
--- a/arch/arm/boot/dts/sun7i-a20.dtsi
+++ b/arch/arm/boot/dts/sun7i-a20.dtsi
@@ -170,7 +170,7 @@
emac: ethernet@01c0b000 {
compatible = "allwinner,sun4i-emac";
reg = <0x01c0b000 0x1000>;
- interrupts = <0 55 1>;
+ interrupts = <0 55 4>;
clocks = <&ahb_gates 17>;
status = "disabled";
};
@@ -186,7 +186,7 @@
pio: pinctrl@01c20800 {
compatible = "allwinner,sun7i-a20-pinctrl";
reg = <0x01c20800 0x400>;
- interrupts = <0 28 1>;
+ interrupts = <0 28 4>;
clocks = <&apb0_gates 5>;
gpio-controller;
interrupt-controller;
@@ -251,12 +251,12 @@
timer@01c20c00 {
compatible = "allwinner,sun4i-timer";
reg = <0x01c20c00 0x90>;
- interrupts = <0 22 1>,
- <0 23 1>,
- <0 24 1>,
- <0 25 1>,
- <0 67 1>,
- <0 68 1>;
+ interrupts = <0 22 4>,
+ <0 23 4>,
+ <0 24 4>,
+ <0 25 4>,
+ <0 67 4>,
+ <0 68 4>;
clocks = <&osc24M>;
};
@@ -273,7 +273,7 @@
uart0: serial@01c28000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28000 0x400>;
- interrupts = <0 1 1>;
+ interrupts = <0 1 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 16>;
@@ -283,7 +283,7 @@
uart1: serial@01c28400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28400 0x400>;
- interrupts = <0 2 1>;
+ interrupts = <0 2 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 17>;
@@ -293,7 +293,7 @@
uart2: serial@01c28800 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28800 0x400>;
- interrupts = <0 3 1>;
+ interrupts = <0 3 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 18>;
@@ -303,7 +303,7 @@
uart3: serial@01c28c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c28c00 0x400>;
- interrupts = <0 4 1>;
+ interrupts = <0 4 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 19>;
@@ -313,7 +313,7 @@
uart4: serial@01c29000 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29000 0x400>;
- interrupts = <0 17 1>;
+ interrupts = <0 17 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 20>;
@@ -323,7 +323,7 @@
uart5: serial@01c29400 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29400 0x400>;
- interrupts = <0 18 1>;
+ interrupts = <0 18 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 21>;
@@ -333,7 +333,7 @@
uart6: serial@01c29800 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29800 0x400>;
- interrupts = <0 19 1>;
+ interrupts = <0 19 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 22>;
@@ -343,7 +343,7 @@
uart7: serial@01c29c00 {
compatible = "snps,dw-apb-uart";
reg = <0x01c29c00 0x400>;
- interrupts = <0 20 1>;
+ interrupts = <0 20 4>;
reg-shift = <2>;
reg-io-width = <4>;
clocks = <&apb1_gates 23>;
@@ -353,7 +353,7 @@
i2c0: i2c@01c2ac00 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2ac00 0x400>;
- interrupts = <0 7 1>;
+ interrupts = <0 7 4>;
clocks = <&apb1_gates 0>;
clock-frequency = <100000>;
status = "disabled";
@@ -362,7 +362,7 @@
i2c1: i2c@01c2b000 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2b000 0x400>;
- interrupts = <0 8 1>;
+ interrupts = <0 8 4>;
clocks = <&apb1_gates 1>;
clock-frequency = <100000>;
status = "disabled";
@@ -371,7 +371,7 @@
i2c2: i2c@01c2b400 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2b400 0x400>;
- interrupts = <0 9 1>;
+ interrupts = <0 9 4>;
clocks = <&apb1_gates 2>;
clock-frequency = <100000>;
status = "disabled";
@@ -380,7 +380,7 @@
i2c3: i2c@01c2b800 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2b800 0x400>;
- interrupts = <0 88 1>;
+ interrupts = <0 88 4>;
clocks = <&apb1_gates 3>;
clock-frequency = <100000>;
status = "disabled";
@@ -389,7 +389,7 @@
i2c4: i2c@01c2bc00 {
compatible = "allwinner,sun4i-i2c";
reg = <0x01c2bc00 0x400>;
- interrupts = <0 89 1>;
+ interrupts = <0 89 4>;
clocks = <&apb1_gates 15>;
clock-frequency = <100000>;
status = "disabled";
diff --git a/arch/arm/crypto/aesbs-core.S_shipped b/arch/arm/crypto/aesbs-core.S_shipped
index 64205d4..71e5fc7 100644
--- a/arch/arm/crypto/aesbs-core.S_shipped
+++ b/arch/arm/crypto/aesbs-core.S_shipped
@@ -58,7 +58,7 @@
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__ 7
#endif
#ifdef __thumb__
diff --git a/arch/arm/crypto/bsaes-armv7.pl b/arch/arm/crypto/bsaes-armv7.pl
index f3d96d9..be068db 100644
--- a/arch/arm/crypto/bsaes-armv7.pl
+++ b/arch/arm/crypto/bsaes-armv7.pl
@@ -701,7 +701,7 @@ $code.=<<___;
# define VFP_ABI_FRAME 0
# define BSAES_ASM_EXTENDED_KEY
# define XTS_CHAIN_TWEAK
-# define __ARM_ARCH__ __LINUX_ARM_ARCH__
+# define __ARM_ARCH__ 7
#endif
#ifdef __thumb__
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 3c597c2..fbeb39c 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -329,7 +329,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
*/
#define ioremap(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE)
-#define ioremap_cached(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
+#define ioremap_cache(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap((cookie), (size), MT_DEVICE_WC)
#define iounmap __arm_iounmap
diff --git a/arch/arm/include/asm/memory.h b/arch/arm/include/asm/memory.h
index 9ecccc8..8756e4b 100644
--- a/arch/arm/include/asm/memory.h
+++ b/arch/arm/include/asm/memory.h
@@ -100,23 +100,19 @@
#define TASK_UNMAPPED_BASE UL(0x00000000)
#endif
-#ifndef PHYS_OFFSET
-#define PHYS_OFFSET UL(CONFIG_DRAM_BASE)
-#endif
-
#ifndef END_MEM
#define END_MEM (UL(CONFIG_DRAM_BASE) + CONFIG_DRAM_SIZE)
#endif
#ifndef PAGE_OFFSET
-#define PAGE_OFFSET (PHYS_OFFSET)
+#define PAGE_OFFSET PLAT_PHYS_OFFSET
#endif
/*
* The module can be at any place in ram in nommu mode.
*/
#define MODULES_END (END_MEM)
-#define MODULES_VADDR (PHYS_OFFSET)
+#define MODULES_VADDR PAGE_OFFSET
#define XIP_VIRT_ADDR(physaddr) (physaddr)
@@ -157,6 +153,16 @@
#endif
#define ARCH_PGD_MASK ((1 << ARCH_PGD_SHIFT) - 1)
+/*
+ * PLAT_PHYS_OFFSET is the offset (from zero) of the start of physical
+ * memory. This is used for XIP and NoMMU kernels, or by kernels which
+ * have their own mach/memory.h. Assembly code must always use
+ * PLAT_PHYS_OFFSET and not PHYS_OFFSET.
+ */
+#ifndef PLAT_PHYS_OFFSET
+#define PLAT_PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -239,6 +245,8 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#else
+#define PHYS_OFFSET PLAT_PHYS_OFFSET
+
static inline phys_addr_t __virt_to_phys(unsigned long x)
{
return (phys_addr_t)x - PAGE_OFFSET + PHYS_OFFSET;
@@ -251,17 +259,6 @@ static inline unsigned long __phys_to_virt(phys_addr_t x)
#endif
#endif
-#endif /* __ASSEMBLY__ */
-
-#ifndef PHYS_OFFSET
-#ifdef PLAT_PHYS_OFFSET
-#define PHYS_OFFSET PLAT_PHYS_OFFSET
-#else
-#define PHYS_OFFSET UL(CONFIG_PHYS_OFFSET)
-#endif
-#endif
-
-#ifndef __ASSEMBLY__
/*
* PFNs are used to describe any physical page; this means
@@ -350,7 +347,8 @@ static inline __deprecated void *bus_to_virt(unsigned long x)
#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
-#define virt_addr_valid(kaddr) ((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory)
+#define virt_addr_valid(kaddr) (((unsigned long)(kaddr) >= PAGE_OFFSET && (unsigned long)(kaddr) < (unsigned long)high_memory) \
+ && pfn_valid(__pa(kaddr) >> PAGE_SHIFT) )
#endif
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 75579a9..3759cac 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -117,6 +117,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
return __set_phys_to_machine(pfn, mfn);
}
-#define xen_remap(cookie, size) ioremap_cached((cookie), (size));
+#define xen_remap(cookie, size) ioremap_cache((cookie), (size));
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index 14235ba..716249c 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -68,7 +68,7 @@ ENTRY(stext)
#ifdef CONFIG_ARM_MPU
/* Calculate the size of a region covering just the kernel */
- ldr r5, =PHYS_OFFSET @ Region start: PHYS_OFFSET
+ ldr r5, =PLAT_PHYS_OFFSET @ Region start: PHYS_OFFSET
ldr r6, =(_end) @ Cover whole kernel
sub r6, r6, r5 @ Minimum size of region to map
clz r6, r6 @ Region size must be 2^N...
@@ -213,7 +213,7 @@ ENTRY(__setup_mpu)
set_region_nr r0, #MPU_RAM_REGION
isb
/* Full access from PL0, PL1, shared for CONFIG_SMP, cacheable */
- ldr r0, =PHYS_OFFSET @ RAM starts at PHYS_OFFSET
+ ldr r0, =PLAT_PHYS_OFFSET @ RAM starts at PHYS_OFFSET
ldr r5,=(MPU_AP_PL1RW_PL0RW | MPU_RGN_NORMAL)
setup_region r0, r5, r6, MPU_DATA_SIDE @ PHYS_OFFSET, shared, enabled
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index 11d59b3..32f317e 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -110,7 +110,7 @@ ENTRY(stext)
sub r4, r3, r4 @ (PHYS_OFFSET - PAGE_OFFSET)
add r8, r8, r4 @ PHYS_OFFSET
#else
- ldr r8, =PHYS_OFFSET @ always constant in this case
+ ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case
#endif
/*
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 94f6b05..92f7b15 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -404,6 +404,7 @@ EXPORT_SYMBOL(dump_fpu);
unsigned long get_wchan(struct task_struct *p)
{
struct stackframe frame;
+ unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
@@ -412,9 +413,11 @@ unsigned long get_wchan(struct task_struct *p)
frame.sp = thread_saved_sp(p);
frame.lr = 0; /* recovered from the stack */
frame.pc = thread_saved_pc(p);
+ stack_page = (unsigned long)task_stack_page(p);
do {
- int ret = unwind_frame(&frame);
- if (ret < 0)
+ if (frame.sp < stack_page ||
+ frame.sp >= stack_page + THREAD_SIZE ||
+ unwind_frame(&frame) < 0)
return 0;
if (!in_sched_functions(frame.pc))
return frame.pc;
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 6a1b8a8..987a7f5 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -873,8 +873,6 @@ void __init setup_arch(char **cmdline_p)
machine_desc = mdesc;
machine_name = mdesc->name;
- setup_dma_zone(mdesc);
-
if (mdesc->reboot_mode != REBOOT_HARD)
reboot_mode = mdesc->reboot_mode;
@@ -892,6 +890,7 @@ void __init setup_arch(char **cmdline_p)
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
+ setup_dma_zone(mdesc);
sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc);
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index 00f79e5..af4e8c8 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -31,7 +31,7 @@ int notrace unwind_frame(struct stackframe *frame)
high = ALIGN(low, THREAD_SIZE);
/* check current frame pointer is within bounds */
- if (fp < (low + 12) || fp + 4 >= high)
+ if (fp < low + 12 || fp > high - 4)
return -EINVAL;
/* restore the registers from the stack frame */
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index dbf0923..6eda3bf 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -36,7 +36,13 @@
#include <asm/system_misc.h>
#include <asm/opcodes.h>
-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" };
+static const char *handler[]= {
+ "prefetch abort",
+ "data abort",
+ "address exception",
+ "interrupt",
+ "undefined instruction",
+};
void *vectors_page;
@@ -509,9 +515,10 @@ static inline int
__do_cache_op(unsigned long start, unsigned long end)
{
int ret;
- unsigned long chunk = PAGE_SIZE;
do {
+ unsigned long chunk = min(PAGE_SIZE, end - start);
+
if (signal_pending(current)) {
struct thread_info *ti = current_thread_info();
diff --git a/arch/arm/mach-davinci/devices-da8xx.c b/arch/arm/mach-davinci/devices-da8xx.c
index c46eccb..78829c5 100644
--- a/arch/arm/mach-davinci/devices-da8xx.c
+++ b/arch/arm/mach-davinci/devices-da8xx.c
@@ -487,7 +487,7 @@ int __init da8xx_register_emac(void)
static struct resource da830_mcasp1_resources[] = {
{
- .name = "mcasp1",
+ .name = "mpu",
.start = DAVINCI_DA830_MCASP1_REG_BASE,
.end = DAVINCI_DA830_MCASP1_REG_BASE + (SZ_1K * 12) - 1,
.flags = IORESOURCE_MEM,
@@ -515,7 +515,7 @@ static struct platform_device da830_mcasp1_device = {
static struct resource da850_mcasp_resources[] = {
{
- .name = "mcasp",
+ .name = "mpu",
.start = DAVINCI_DA8XX_MCASP0_REG_BASE,
.end = DAVINCI_DA8XX_MCASP0_REG_BASE + (SZ_1K * 12) - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm355.c b/arch/arm/mach-davinci/dm355.c
index ef9ff1f..6117fc6 100644
--- a/arch/arm/mach-davinci/dm355.c
+++ b/arch/arm/mach-davinci/dm355.c
@@ -641,6 +641,7 @@ static struct platform_device dm355_edma_device = {
static struct resource dm355_asp1_resources[] = {
{
+ .name = "mpu",
.start = DAVINCI_ASP1_BASE,
.end = DAVINCI_ASP1_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
@@ -906,7 +907,7 @@ static struct davinci_gpio_platform_data dm355_gpio_platform_data = {
int __init dm355_gpio_register(void)
{
return davinci_gpio_register(dm355_gpio_resources,
- sizeof(dm355_gpio_resources),
+ ARRAY_SIZE(dm355_gpio_resources),
&dm355_gpio_platform_data);
}
/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm365.c b/arch/arm/mach-davinci/dm365.c
index 1511a06..d7c6f85 100644
--- a/arch/arm/mach-davinci/dm365.c
+++ b/arch/arm/mach-davinci/dm365.c
@@ -720,7 +720,7 @@ static struct davinci_gpio_platform_data dm365_gpio_platform_data = {
int __init dm365_gpio_register(void)
{
return davinci_gpio_register(dm365_gpio_resources,
- sizeof(dm365_gpio_resources),
+ ARRAY_SIZE(dm365_gpio_resources),
&dm365_gpio_platform_data);
}
@@ -942,6 +942,7 @@ static struct platform_device dm365_edma_device = {
static struct resource dm365_asp_resources[] = {
{
+ .name = "mpu",
.start = DAVINCI_DM365_ASP0_BASE,
.end = DAVINCI_DM365_ASP0_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
diff --git a/arch/arm/mach-davinci/dm644x.c b/arch/arm/mach-davinci/dm644x.c
index 143a321..3ce4799 100644
--- a/arch/arm/mach-davinci/dm644x.c
+++ b/arch/arm/mach-davinci/dm644x.c
@@ -572,6 +572,7 @@ static struct platform_device dm644x_edma_device = {
/* DM6446 EVM uses ASP0; line-out is a pair of RCA jacks */
static struct resource dm644x_asp_resources[] = {
{
+ .name = "mpu",
.start = DAVINCI_ASP0_BASE,
.end = DAVINCI_ASP0_BASE + SZ_8K - 1,
.flags = IORESOURCE_MEM,
@@ -792,7 +793,7 @@ static struct davinci_gpio_platform_data dm644_gpio_platform_data = {
int __init dm644x_gpio_register(void)
{
return davinci_gpio_register(dm644_gpio_resources,
- sizeof(dm644_gpio_resources),
+ ARRAY_SIZE(dm644_gpio_resources),
&dm644_gpio_platform_data);
}
/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-davinci/dm646x.c b/arch/arm/mach-davinci/dm646x.c
index 2a73f29..0e81fea65 100644
--- a/arch/arm/mach-davinci/dm646x.c
+++ b/arch/arm/mach-davinci/dm646x.c
@@ -621,7 +621,7 @@ static struct platform_device dm646x_edma_device = {
static struct resource dm646x_mcasp0_resources[] = {
{
- .name = "mcasp0",
+ .name = "mpu",
.start = DAVINCI_DM646X_MCASP0_REG_BASE,
.end = DAVINCI_DM646X_MCASP0_REG_BASE + (SZ_1K << 1) - 1,
.flags = IORESOURCE_MEM,
@@ -641,7 +641,7 @@ static struct resource dm646x_mcasp0_resources[] = {
static struct resource dm646x_mcasp1_resources[] = {
{
- .name = "mcasp1",
+ .name = "mpu",
.start = DAVINCI_DM646X_MCASP1_REG_BASE,
.end = DAVINCI_DM646X_MCASP1_REG_BASE + (SZ_1K << 1) - 1,
.flags = IORESOURCE_MEM,
@@ -769,7 +769,7 @@ static struct davinci_gpio_platform_data dm646x_gpio_platform_data = {
int __init dm646x_gpio_register(void)
{
return davinci_gpio_register(dm646x_gpio_resources,
- sizeof(dm646x_gpio_resources),
+ ARRAY_SIZE(dm646x_gpio_resources),
&dm646x_gpio_platform_data);
}
/*----------------------------------------------------------------------*/
diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c
index 9ee78f7..782f6c7 100644
--- a/arch/arm/mach-footbridge/dc21285-timer.c
+++ b/arch/arm/mach-footbridge/dc21285-timer.c
@@ -96,11 +96,12 @@ static struct irqaction footbridge_timer_irq = {
void __init footbridge_timer_init(void)
{
struct clock_event_device *ce = &ckevt_dc21285;
+ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16);
- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16);
+ clocksource_register_hz(&cksrc_dc21285, rate);
setup_irq(ce->irq, &footbridge_timer_irq);
ce->cpumask = cpumask_of(smp_processor_id());
- clockevents_config_and_register(ce, mem_fclk_21285, 0x4, 0xffffff);
+ clockevents_config_and_register(ce, rate, 0x4, 0xffffff);
}
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index b3d7e56..bd3bf66 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -17,12 +17,15 @@
#include <linux/clkdev.h>
#include <linux/clocksource.h>
#include <linux/dma-mapping.h>
+#include <linux/input.h>
#include <linux/io.h>
#include <linux/irqchip.h>
+#include <linux/mailbox.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_address.h>
+#include <linux/reboot.h>
#include <linux/amba/bus.h>
#include <linux/platform_device.h>
@@ -130,6 +133,24 @@ static struct platform_device highbank_cpuidle_device = {
.name = "cpuidle-calxeda",
};
+static int hb_keys_notifier(struct notifier_block *nb, unsigned long event, void *data)
+{
+ u32 key = *(u32 *)data;
+
+ if (event != 0x1000)
+ return 0;
+
+ if (key == KEY_POWER)
+ orderly_poweroff(false);
+ else if (key == 0xffff)
+ ctrl_alt_del();
+
+ return 0;
+}
+static struct notifier_block hb_keys_nb = {
+ .notifier_call = hb_keys_notifier,
+};
+
static void __init highbank_init(void)
{
struct device_node *np;
@@ -145,6 +166,8 @@ static void __init highbank_init(void)
bus_register_notifier(&platform_bus_type, &highbank_platform_nb);
bus_register_notifier(&amba_bustype, &highbank_amba_nb);
+ pl320_ipc_register_notifier(&hb_keys_nb);
+
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (psci_ops.cpu_suspend)
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 19f1652..8d972ff1 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -131,6 +131,24 @@ DT_MACHINE_START(OMAP3_GP_DT, "Generic OMAP3-GP (Flattened Device Tree)")
.dt_compat = omap3_gp_boards_compat,
.restart = omap3xxx_restart,
MACHINE_END
+
+static const char *am3517_boards_compat[] __initdata = {
+ "ti,am3517",
+ NULL,
+};
+
+DT_MACHINE_START(AM3517_DT, "Generic AM3517 (Flattened Device Tree)")
+ .reserve = omap_reserve,
+ .map_io = omap3_map_io,
+ .init_early = am35xx_init_early,
+ .init_irq = omap_intc_of_init,
+ .handle_irq = omap3_intc_handle_irq,
+ .init_machine = omap_generic_init,
+ .init_late = omap3_init_late,
+ .init_time = omap3_gptimer_timer_init,
+ .dt_compat = am3517_boards_compat,
+ .restart = omap3xxx_restart,
+MACHINE_END
#endif
#ifdef CONFIG_SOC_AM33XX
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 4ec8d82..44a59c3 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -242,12 +242,18 @@ static void __init ldp_display_init(void)
static int ldp_twl_gpio_setup(struct device *dev, unsigned gpio, unsigned ngpio)
{
+ int res;
+
/* LCD enable GPIO */
ldp_lcd_pdata.enable_gpio = gpio + 7;
/* Backlight enable GPIO */
ldp_lcd_pdata.backlight_gpio = gpio + 15;
+ res = platform_device_register(&ldp_lcd_device);
+ if (res)
+ pr_err("Unable to register LCD: %d\n", res);
+
return 0;
}
@@ -346,7 +352,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = {
static struct platform_device *ldp_devices[] __initdata = {
&ldp_gpio_keys_device,
- &ldp_lcd_device,
};
#ifdef CONFIG_OMAP_MUX
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index 58347bb..4cf1655 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -101,13 +101,51 @@ static const struct omap_dss_hwmod_data omap4_dss_hwmod_data[] __initconst = {
{ "dss_hdmi", "omapdss_hdmi", -1 },
};
+static int omap4_dsi_mux_pads(int dsi_id, unsigned lanes)
+{
+ u32 enable_mask, enable_shift;
+ u32 pipd_mask, pipd_shift;
+ u32 reg;
+
+ if (dsi_id == 0) {
+ enable_mask = OMAP4_DSI1_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI1_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI1_PIPD_MASK;
+ pipd_shift = OMAP4_DSI1_PIPD_SHIFT;
+ } else if (dsi_id == 1) {
+ enable_mask = OMAP4_DSI2_LANEENABLE_MASK;
+ enable_shift = OMAP4_DSI2_LANEENABLE_SHIFT;
+ pipd_mask = OMAP4_DSI2_PIPD_MASK;
+ pipd_shift = OMAP4_DSI2_PIPD_SHIFT;
+ } else {
+ return -ENODEV;
+ }
+
+ reg = omap4_ctrl_pad_readl(OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+ reg &= ~enable_mask;
+ reg &= ~pipd_mask;
+
+ reg |= (lanes << enable_shift) & enable_mask;
+ reg |= (lanes << pipd_shift) & pipd_mask;
+
+ omap4_ctrl_pad_writel(reg, OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_DSIPHY);
+
+ return 0;
+}
+
static int omap_dsi_enable_pads(int dsi_id, unsigned lane_mask)
{
+ if (cpu_is_omap44xx())
+ return omap4_dsi_mux_pads(dsi_id, lane_mask);
+
return 0;
}
static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
{
+ if (cpu_is_omap44xx())
+ omap4_dsi_mux_pads(dsi_id, 0);
}
static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 53f0735..e0a398c 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -183,6 +183,10 @@ static int omap_device_build_from_dt(struct platform_device *pdev)
odbfd_exit1:
kfree(hwmods);
odbfd_exit:
+ /* if data/we are at fault.. load up a fail handler */
+ if (ret)
+ pdev->dev.pm_domain = &omap_device_fail_pm_domain;
+
return ret;
}
@@ -604,6 +608,19 @@ static int _od_runtime_resume(struct device *dev)
return pm_generic_runtime_resume(dev);
}
+
+static int _od_fail_runtime_suspend(struct device *dev)
+{
+ dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
+ return -ENODEV;
+}
+
+static int _od_fail_runtime_resume(struct device *dev)
+{
+ dev_warn(dev, "%s: FIXME: missing hwmod/omap_dev info\n", __func__);
+ return -ENODEV;
+}
+
#endif
#ifdef CONFIG_SUSPEND
@@ -657,6 +674,13 @@ static int _od_resume_noirq(struct device *dev)
#define _od_resume_noirq NULL
#endif
+struct dev_pm_domain omap_device_fail_pm_domain = {
+ .ops = {
+ SET_RUNTIME_PM_OPS(_od_fail_runtime_suspend,
+ _od_fail_runtime_resume, NULL)
+ }
+};
+
struct dev_pm_domain omap_device_pm_domain = {
.ops = {
SET_RUNTIME_PM_OPS(_od_runtime_suspend, _od_runtime_resume,
diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
index 17ca1ae..78c02b3 100644
--- a/arch/arm/mach-omap2/omap_device.h
+++ b/arch/arm/mach-omap2/omap_device.h
@@ -29,6 +29,7 @@
#include "omap_hwmod.h"
extern struct dev_pm_domain omap_device_pm_domain;
+extern struct dev_pm_domain omap_device_fail_pm_domain;
/* omap_device._state values */
#define OMAP_DEVICE_STATE_UNKNOWN 0
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index e3f0eca..8a1b5e0 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -399,7 +399,7 @@ static int _set_clockactivity(struct omap_hwmod *oh, u8 clockact, u32 *v)
}
/**
- * _set_softreset: set OCP_SYSCONFIG.CLOCKACTIVITY bits in @v
+ * _set_softreset: set OCP_SYSCONFIG.SOFTRESET bit in @v
* @oh: struct omap_hwmod *
* @v: pointer to register contents to modify
*
@@ -427,6 +427,36 @@ static int _set_softreset(struct omap_hwmod *oh, u32 *v)
}
/**
+ * _clear_softreset: clear OCP_SYSCONFIG.SOFTRESET bit in @v
+ * @oh: struct omap_hwmod *
+ * @v: pointer to register contents to modify
+ *
+ * Clear the SOFTRESET bit in @v for hwmod @oh. Returns -EINVAL upon
+ * error or 0 upon success.
+ */
+static int _clear_softreset(struct omap_hwmod *oh, u32 *v)
+{
+ u32 softrst_mask;
+
+ if (!oh->class->sysc ||
+ !(oh->class->sysc->sysc_flags & SYSC_HAS_SOFTRESET))
+ return -EINVAL;
+
+ if (!oh->class->sysc->sysc_fields) {
+ WARN(1,
+ "omap_hwmod: %s: sysc_fields absent for sysconfig class\n",
+ oh->name);
+ return -EINVAL;
+ }
+
+ softrst_mask = (0x1 << oh->class->sysc->sysc_fields->srst_shift);
+
+ *v &= ~softrst_mask;
+
+ return 0;
+}
+
+/**
* _wait_softreset_complete - wait for an OCP softreset to complete
* @oh: struct omap_hwmod * to wait on
*
@@ -785,6 +815,7 @@ static int _init_interface_clks(struct omap_hwmod *oh)
pr_warning("omap_hwmod: %s: cannot clk_get interface_clk %s\n",
oh->name, os->clk);
ret = -EINVAL;
+ continue;
}
os->_clk = c;
/*
@@ -821,6 +852,7 @@ static int _init_opt_clks(struct omap_hwmod *oh)
pr_warning("omap_hwmod: %s: cannot clk_get opt_clk %s\n",
oh->name, oc->clk);
ret = -EINVAL;
+ continue;
}
oc->_clk = c;
/*
@@ -1911,6 +1943,12 @@ static int _ocp_softreset(struct omap_hwmod *oh)
ret = _set_softreset(oh, &v);
if (ret)
goto dis_opt_clks;
+
+ _write_sysconfig(v, oh);
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto dis_opt_clks;
+
_write_sysconfig(v, oh);
if (oh->class->sysc->srst_udelay)
@@ -2326,38 +2364,80 @@ static int _shutdown(struct omap_hwmod *oh)
return 0;
}
+static int of_dev_find_hwmod(struct device_node *np,
+ struct omap_hwmod *oh)
+{
+ int count, i, res;
+ const char *p;
+
+ count = of_property_count_strings(np, "ti,hwmods");
+ if (count < 1)
+ return -ENODEV;
+
+ for (i = 0; i < count; i++) {
+ res = of_property_read_string_index(np, "ti,hwmods",
+ i, &p);
+ if (res)
+ continue;
+ if (!strcmp(p, oh->name)) {
+ pr_debug("omap_hwmod: dt %s[%i] uses hwmod %s\n",
+ np->name, i, oh->name);
+ return i;
+ }
+ }
+
+ return -ENODEV;
+}
+
/**
* of_dev_hwmod_lookup - look up needed hwmod from dt blob
* @np: struct device_node *
* @oh: struct omap_hwmod *
+ * @index: index of the entry found
+ * @found: struct device_node * found or NULL
*
* Parse the dt blob and find out needed hwmod. Recursive function is
* implemented to take care hierarchical dt blob parsing.
- * Return: The device node on success or NULL on failure.
+ * Return: Returns 0 on success, -ENODEV when not found.
*/
-static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
- struct omap_hwmod *oh)
+static int of_dev_hwmod_lookup(struct device_node *np,
+ struct omap_hwmod *oh,
+ int *index,
+ struct device_node **found)
{
- struct device_node *np0 = NULL, *np1 = NULL;
- const char *p;
+ struct device_node *np0 = NULL;
+ int res;
+
+ res = of_dev_find_hwmod(np, oh);
+ if (res >= 0) {
+ *found = np;
+ *index = res;
+ return 0;
+ }
for_each_child_of_node(np, np0) {
- if (of_find_property(np0, "ti,hwmods", NULL)) {
- p = of_get_property(np0, "ti,hwmods", NULL);
- if (!strcmp(p, oh->name))
- return np0;
- np1 = of_dev_hwmod_lookup(np0, oh);
- if (np1)
- return np1;
+ struct device_node *fc;
+ int i;
+
+ res = of_dev_hwmod_lookup(np0, oh, &i, &fc);
+ if (res == 0) {
+ *found = fc;
+ *index = i;
+ return 0;
}
}
- return NULL;
+
+ *found = NULL;
+ *index = 0;
+
+ return -ENODEV;
}
/**
* _init_mpu_rt_base - populate the virtual address for a hwmod
* @oh: struct omap_hwmod * to locate the virtual address
* @data: (unused, caller should pass NULL)
+ * @index: index of the reg entry iospace in device tree
* @np: struct device_node * of the IP block's device node in the DT data
*
* Cache the virtual address used by the MPU to access this IP block's
@@ -2368,7 +2448,7 @@ static struct device_node *of_dev_hwmod_lookup(struct device_node *np,
* -ENXIO on absent or invalid register target address space.
*/
static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
- struct device_node *np)
+ int index, struct device_node *np)
{
struct omap_hwmod_addr_space *mem;
void __iomem *va_start = NULL;
@@ -2390,13 +2470,17 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
if (!np)
return -ENXIO;
- va_start = of_iomap(np, oh->mpu_rt_idx);
+ va_start = of_iomap(np, index + oh->mpu_rt_idx);
} else {
va_start = ioremap(mem->pa_start, mem->pa_end - mem->pa_start);
}
if (!va_start) {
- pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
+ if (mem)
+ pr_err("omap_hwmod: %s: Could not ioremap\n", oh->name);
+ else
+ pr_err("omap_hwmod: %s: Missing dt reg%i for %s\n",
+ oh->name, index, np->full_name);
return -ENXIO;
}
@@ -2422,17 +2506,29 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
*/
static int __init _init(struct omap_hwmod *oh, void *data)
{
- int r;
+ int r, index;
struct device_node *np = NULL;
if (oh->_state != _HWMOD_STATE_REGISTERED)
return 0;
- if (of_have_populated_dt())
- np = of_dev_hwmod_lookup(of_find_node_by_name(NULL, "ocp"), oh);
+ if (of_have_populated_dt()) {
+ struct device_node *bus;
+
+ bus = of_find_node_by_name(NULL, "ocp");
+ if (!bus)
+ return -ENODEV;
+
+ r = of_dev_hwmod_lookup(bus, oh, &index, &np);
+ if (r)
+ pr_debug("omap_hwmod: %s missing dt data\n", oh->name);
+ else if (np && index)
+ pr_warn("omap_hwmod: %s using broken dt data from %s\n",
+ oh->name, np->name);
+ }
if (oh->class->sysc) {
- r = _init_mpu_rt_base(oh, NULL, np);
+ r = _init_mpu_rt_base(oh, NULL, index, np);
if (r < 0) {
WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
oh->name);
@@ -3169,6 +3265,11 @@ int omap_hwmod_softreset(struct omap_hwmod *oh)
goto error;
_write_sysconfig(v, oh);
+ ret = _clear_softreset(oh, &v);
+ if (ret)
+ goto error;
+ _write_sysconfig(v, oh);
+
error:
return ret;
}
diff --git a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
index 56cebb0..d23c77f 100644
--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_hwmod = {
/* gpmc */
static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng_hwmod_class = {
};
static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
- { .irq = 52 },
+ { .irq = 52 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index 9e56fab..4c3b1e6 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -1943,7 +1943,8 @@ static struct omap_hwmod_class_sysconfig omap3xxx_usb_host_hs_sysc = {
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_CLOCKACTIVITY |
SYSC_HAS_SIDLEMODE | SYSC_HAS_ENAWAKEUP |
- SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE),
+ SYSC_HAS_SOFTRESET | SYSC_HAS_AUTOIDLE |
+ SYSS_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
MSTANDBY_FORCE | MSTANDBY_NO | MSTANDBY_SMART),
.sysc_fields = &omap_hwmod_sysc_type1,
@@ -2021,15 +2022,7 @@ static struct omap_hwmod omap3xxx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
};
/*
@@ -2172,7 +2165,7 @@ static struct omap_hwmod_class omap3xxx_gpmc_hwmod_class = {
};
static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
- { .irq = 20 },
+ { .irq = 20 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -3006,7 +2999,7 @@ static struct omap_mmu_dev_attr mmu_isp_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
- { .irq = 24 },
+ { .irq = 24 + OMAP_INTC_START, },
{ .irq = -1 }
};
@@ -3048,7 +3041,7 @@ static struct omap_mmu_dev_attr mmu_iva_dev_attr = {
static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
- { .irq = 28 },
+ { .irq = 28 + OMAP_INTC_START, },
{ .irq = -1 }
};
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 1e5b12c..3318cae9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -2937,7 +2937,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_usb_host_hs_sysc = {
.sysc_offs = 0x0010,
.syss_offs = 0x0014,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE |
- SYSC_HAS_SOFTRESET),
+ SYSC_HAS_SOFTRESET | SYSC_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -3001,15 +3001,7 @@ static struct omap_hwmod omap44xx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
};
/*
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 9e08d699..e297d62 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -1544,7 +1544,8 @@ static struct omap_hwmod_class_sysconfig omap54xx_usb_host_hs_sysc = {
.rev_offs = 0x0000,
.sysc_offs = 0x0010,
.sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_RESET_STATUS |
- SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET),
+ SYSC_HAS_SIDLEMODE | SYSC_HAS_SOFTRESET |
+ SYSC_HAS_RESET_STATUS),
.idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
@@ -1598,15 +1599,7 @@ static struct omap_hwmod omap54xx_usb_host_hs_hwmod = {
* hence HWMOD_SWSUP_MSTANDBY
*/
- /*
- * During system boot; If the hwmod framework resets the module
- * the module will have smart idle settings; which can lead to deadlock
- * (above Errata Id:i660); so, dont reset the module during boot;
- * Use HWMOD_INIT_NO_RESET.
- */
-
- .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY |
- HWMOD_INIT_NO_RESET,
+ .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
.main_clk = "l3init_60m_fclk",
.prcm = {
.omap4 = {
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index db32d53..18f333c 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1637,7 +1637,7 @@ static struct omap_hwmod dra7xx_uart1_hwmod = {
.class = &dra7xx_uart_hwmod_class,
.clkdm_name = "l4per_clkdm",
.main_clk = "uart1_gfclk_mux",
- .flags = HWMOD_SWSUP_SIDLE_ACT,
+ .flags = HWMOD_SWSUP_SIDLE_ACT | DEBUG_OMAP2UART1_FLAGS,
.prcm = {
.omap4 = {
.clkctrl_offs = DRA7XX_CM_L4PER_UART1_CLKCTRL_OFFSET,
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 2a086e8..958cd6af 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -10,6 +10,8 @@
* published by the Free Software Foundation.
*/
+#include <mach/irqs.h>
+
#define LUBBOCK_ETH_PHYS PXA_CS3_PHYS
#define LUBBOCK_FPGA_PHYS PXA_CS2_PHYS
diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
index 0d5dd64..263b152 100644
--- a/arch/arm/mach-pxa/reset.c
+++ b/arch/arm/mach-pxa/reset.c
@@ -13,6 +13,7 @@
#include <mach/regs-ost.h>
#include <mach/reset.h>
+#include <mach/smemc.h>
unsigned int reset_status;
EXPORT_SYMBOL(reset_status);
@@ -81,6 +82,12 @@ static void do_hw_reset(void)
writel_relaxed(OSSR_M3, OSSR);
/* ... in 100 ms */
writel_relaxed(readl_relaxed(OSCR) + 368640, OSMR3);
+ /*
+ * SDRAM hangs on watchdog reset on Marvell PXA270 (erratum 71)
+ * we put SDRAM into self-refresh to prevent that
+ */
+ while (1)
+ writel_relaxed(MDREFR_SLFRSH, MDREFR);
}
void pxa_restart(enum reboot_mode mode, const char *cmd)
@@ -104,4 +111,3 @@ void pxa_restart(enum reboot_mode mode, const char *cmd)
break;
}
}
-
diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
index 0206b91..ef5557b 100644
--- a/arch/arm/mach-pxa/tosa.c
+++ b/arch/arm/mach-pxa/tosa.c
@@ -425,57 +425,57 @@ static struct platform_device tosa_power_device = {
* Tosa Keyboard
*/
static const uint32_t tosakbd_keymap[] = {
- KEY(0, 2, KEY_W),
- KEY(0, 6, KEY_K),
- KEY(0, 7, KEY_BACKSPACE),
- KEY(0, 8, KEY_P),
- KEY(1, 1, KEY_Q),
- KEY(1, 2, KEY_E),
- KEY(1, 3, KEY_T),
- KEY(1, 4, KEY_Y),
- KEY(1, 6, KEY_O),
- KEY(1, 7, KEY_I),
- KEY(1, 8, KEY_COMMA),
- KEY(2, 1, KEY_A),
- KEY(2, 2, KEY_D),
- KEY(2, 3, KEY_G),
- KEY(2, 4, KEY_U),
- KEY(2, 6, KEY_L),
- KEY(2, 7, KEY_ENTER),
- KEY(2, 8, KEY_DOT),
- KEY(3, 1, KEY_Z),
- KEY(3, 2, KEY_C),
- KEY(3, 3, KEY_V),
- KEY(3, 4, KEY_J),
- KEY(3, 5, TOSA_KEY_ADDRESSBOOK),
- KEY(3, 6, TOSA_KEY_CANCEL),
- KEY(3, 7, TOSA_KEY_CENTER),
- KEY(3, 8, TOSA_KEY_OK),
- KEY(3, 9, KEY_LEFTSHIFT),
- KEY(4, 1, KEY_S),
- KEY(4, 2, KEY_R),
- KEY(4, 3, KEY_B),
- KEY(4, 4, KEY_N),
- KEY(4, 5, TOSA_KEY_CALENDAR),
- KEY(4, 6, TOSA_KEY_HOMEPAGE),
- KEY(4, 7, KEY_LEFTCTRL),
- KEY(4, 8, TOSA_KEY_LIGHT),
- KEY(4, 10, KEY_RIGHTSHIFT),
- KEY(5, 1, KEY_TAB),
- KEY(5, 2, KEY_SLASH),
- KEY(5, 3, KEY_H),
- KEY(5, 4, KEY_M),
- KEY(5, 5, TOSA_KEY_MENU),
- KEY(5, 7, KEY_UP),
- KEY(5, 11, TOSA_KEY_FN),
- KEY(6, 1, KEY_X),
- KEY(6, 2, KEY_F),
- KEY(6, 3, KEY_SPACE),
- KEY(6, 4, KEY_APOSTROPHE),
- KEY(6, 5, TOSA_KEY_MAIL),
- KEY(6, 6, KEY_LEFT),
- KEY(6, 7, KEY_DOWN),
- KEY(6, 8, KEY_RIGHT),
+ KEY(0, 1, KEY_W),
+ KEY(0, 5, KEY_K),
+ KEY(0, 6, KEY_BACKSPACE),
+ KEY(0, 7, KEY_P),
+ KEY(1, 0, KEY_Q),
+ KEY(1, 1, KEY_E),
+ KEY(1, 2, KEY_T),
+ KEY(1, 3, KEY_Y),
+ KEY(1, 5, KEY_O),
+ KEY(1, 6, KEY_I),
+ KEY(1, 7, KEY_COMMA),
+ KEY(2, 0, KEY_A),
+ KEY(2, 1, KEY_D),
+ KEY(2, 2, KEY_G),
+ KEY(2, 3, KEY_U),
+ KEY(2, 5, KEY_L),
+ KEY(2, 6, KEY_ENTER),
+ KEY(2, 7, KEY_DOT),
+ KEY(3, 0, KEY_Z),
+ KEY(3, 1, KEY_C),
+ KEY(3, 2, KEY_V),
+ KEY(3, 3, KEY_J),
+ KEY(3, 4, TOSA_KEY_ADDRESSBOOK),
+ KEY(3, 5, TOSA_KEY_CANCEL),
+ KEY(3, 6, TOSA_KEY_CENTER),
+ KEY(3, 7, TOSA_KEY_OK),
+ KEY(3, 8, KEY_LEFTSHIFT),
+ KEY(4, 0, KEY_S),
+ KEY(4, 1, KEY_R),
+ KEY(4, 2, KEY_B),
+ KEY(4, 3, KEY_N),
+ KEY(4, 4, TOSA_KEY_CALENDAR),
+ KEY(4, 5, TOSA_KEY_HOMEPAGE),
+ KEY(4, 6, KEY_LEFTCTRL),
+ KEY(4, 7, TOSA_KEY_LIGHT),
+ KEY(4, 9, KEY_RIGHTSHIFT),
+ KEY(5, 0, KEY_TAB),
+ KEY(5, 1, KEY_SLASH),
+ KEY(5, 2, KEY_H),
+ KEY(5, 3, KEY_M),
+ KEY(5, 4, TOSA_KEY_MENU),
+ KEY(5, 6, KEY_UP),
+ KEY(5, 10, TOSA_KEY_FN),
+ KEY(6, 0, KEY_X),
+ KEY(6, 1, KEY_F),
+ KEY(6, 2, KEY_SPACE),
+ KEY(6, 3, KEY_APOSTROPHE),
+ KEY(6, 4, TOSA_KEY_MAIL),
+ KEY(6, 5, KEY_LEFT),
+ KEY(6, 6, KEY_DOWN),
+ KEY(6, 7, KEY_RIGHT),
};
static struct matrix_keymap_data tosakbd_keymap_data = {
diff --git a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
index 7eb9a10..2fddf38 100644
--- a/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
+++ b/arch/arm/mach-s3c64xx/mach-s3c64xx-dt.c
@@ -8,8 +8,6 @@
* published by the Free Software Foundation.
*/
-#include <linux/clk-provider.h>
-#include <linux/irqchip.h>
#include <linux/of_platform.h>
#include <asm/mach/arch.h>
@@ -48,15 +46,9 @@ static void __init s3c64xx_dt_map_io(void)
panic("SoC is not S3C64xx!");
}
-static void __init s3c64xx_dt_init_irq(void)
-{
- of_clk_init(NULL);
- samsung_wdt_reset_of_init();
- irqchip_init();
-};
-
static void __init s3c64xx_dt_init_machine(void)
{
+ samsung_wdt_reset_of_init();
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
@@ -79,7 +71,6 @@ DT_MACHINE_START(S3C6400_DT, "Samsung S3C64xx (Flattened Device Tree)")
/* Maintainer: Tomasz Figa <tomasz.figa@gmail.com> */
.dt_compat = s3c64xx_dt_compat,
.map_io = s3c64xx_dt_map_io,
- .init_irq = s3c64xx_dt_init_irq,
.init_machine = s3c64xx_dt_init_machine,
.restart = s3c64xx_dt_restart,
MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-armadillo800eva.c b/arch/arm/mach-shmobile/board-armadillo800eva.c
index 958e3cb..8ea87bd 100644
--- a/arch/arm/mach-shmobile/board-armadillo800eva.c
+++ b/arch/arm/mach-shmobile/board-armadillo800eva.c
@@ -483,7 +483,7 @@ static struct platform_device lcdc0_device = {
.id = 0,
.dev = {
.platform_data = &lcdc0_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -580,7 +580,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -614,6 +614,11 @@ static struct regulator_consumer_supply fixed3v3_power_consumers[] = {
REGULATOR_SUPPLY("vqmmc", "sh_mmcif"),
};
+/* Fixed 3.3V regulator used by LCD backlight */
+static struct regulator_consumer_supply fixed5v0_power_consumers[] = {
+ REGULATOR_SUPPLY("power", "pwm-backlight.0"),
+};
+
/* Fixed 3.3V regulator to be used by SDHI0 */
static struct regulator_consumer_supply vcc_sdhi0_consumers[] = {
REGULATOR_SUPPLY("vmmc", "sh_mobile_sdhi.0"),
@@ -1196,6 +1201,8 @@ static void __init eva_init(void)
regulator_register_always_on(0, "fixed-3.3V", fixed3v3_power_consumers,
ARRAY_SIZE(fixed3v3_power_consumers), 3300000);
+ regulator_register_always_on(3, "fixed-5.0V", fixed5v0_power_consumers,
+ ARRAY_SIZE(fixed5v0_power_consumers), 5000000);
pinctrl_register_mappings(eva_pinctrl_map, ARRAY_SIZE(eva_pinctrl_map));
pwm_add_table(pwm_lookup, ARRAY_SIZE(pwm_lookup));
diff --git a/arch/arm/mach-shmobile/board-bockw.c b/arch/arm/mach-shmobile/board-bockw.c
index 38611526..3c4995a 100644
--- a/arch/arm/mach-shmobile/board-bockw.c
+++ b/arch/arm/mach-shmobile/board-bockw.c
@@ -679,7 +679,7 @@ static void __init bockw_init(void)
.id = i,
.data = &rsnd_card_info[i],
.size_data = sizeof(struct asoc_simple_card_info),
- .dma_mask = ~0,
+ .dma_mask = DMA_BIT_MASK(32),
};
platform_device_register_full(&cardinfo);
diff --git a/arch/arm/mach-shmobile/board-kzm9g.c b/arch/arm/mach-shmobile/board-kzm9g.c
index fe689b7..bc40b85 100644
--- a/arch/arm/mach-shmobile/board-kzm9g.c
+++ b/arch/arm/mach-shmobile/board-kzm9g.c
@@ -334,7 +334,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-shmobile/board-lager.c b/arch/arm/mach-shmobile/board-lager.c
index a8d3ce6..e0406fd 100644
--- a/arch/arm/mach-shmobile/board-lager.c
+++ b/arch/arm/mach-shmobile/board-lager.c
@@ -245,7 +245,9 @@ static void __init lager_init(void)
{
lager_add_standard_devices();
- phy_register_fixup_for_id("r8a7790-ether-ff:01", lager_ksz8041_fixup);
+ if (IS_ENABLED(CONFIG_PHYLIB))
+ phy_register_fixup_for_id("r8a7790-ether-ff:01",
+ lager_ksz8041_fixup);
}
static const char * const lager_boards_compat_dt[] __initconst = {
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index af06753..e721d2c 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -409,7 +409,7 @@ static struct platform_device lcdc_device = {
.resource = lcdc_resources,
.dev = {
.platform_data = &lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
@@ -499,7 +499,7 @@ static struct platform_device hdmi_lcdc_device = {
.id = 1,
.dev = {
.platform_data = &hdmi_lcdc_info,
- .coherent_dma_mask = ~0,
+ .coherent_dma_mask = DMA_BIT_MASK(32),
},
};
diff --git a/arch/arm/mach-tegra/fuse.c b/arch/arm/mach-tegra/fuse.c
index 9a4e910..3a9c1f1 100644
--- a/arch/arm/mach-tegra/fuse.c
+++ b/arch/arm/mach-tegra/fuse.c
@@ -198,10 +198,12 @@ void __init tegra_init_fuse(void)
switch (tegra_chip_id) {
case TEGRA20:
tegra20_fuse_init_randomness();
+ break;
case TEGRA30:
case TEGRA114:
default:
tegra30_fuse_init_randomness();
+ break;
}
pr_info("Tegra Revision: %s SKU: %d CPU Process: %d Core Process: %d\n",
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index f6b6bfa..f61a570 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -158,13 +158,49 @@ struct dma_map_ops arm_coherent_dma_ops = {
};
EXPORT_SYMBOL(arm_coherent_dma_ops);
+static int __dma_supported(struct device *dev, u64 mask, bool warn)
+{
+ unsigned long max_dma_pfn;
+
+ /*
+ * If the mask allows for more memory than we can address,
+ * and we actually have that much memory, then we must
+ * indicate that DMA to this device is not supported.
+ */
+ if (sizeof(mask) != sizeof(dma_addr_t) &&
+ mask > (dma_addr_t)~0 &&
+ dma_to_pfn(dev, ~0) < max_pfn) {
+ if (warn) {
+ dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
+ mask);
+ dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
+ }
+ return 0;
+ }
+
+ max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
+
+ /*
+ * Translate the device's DMA mask to a PFN limit. This
+ * PFN number includes the page which we can DMA to.
+ */
+ if (dma_to_pfn(dev, mask) < max_dma_pfn) {
+ if (warn)
+ dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
+ mask,
+ dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
+ max_dma_pfn + 1);
+ return 0;
+ }
+
+ return 1;
+}
+
static u64 get_coherent_dma_mask(struct device *dev)
{
u64 mask = (u64)DMA_BIT_MASK(32);
if (dev) {
- unsigned long max_dma_pfn;
-
mask = dev->coherent_dma_mask;
/*
@@ -176,34 +212,8 @@ static u64 get_coherent_dma_mask(struct device *dev)
return 0;
}
- max_dma_pfn = min(max_pfn, arm_dma_pfn_limit);
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then fail the
- * allocation.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) > max_dma_pfn) {
- dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n",
- mask);
- dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n");
- return 0;
- }
-
- /*
- * Now check that the mask, when translated to a PFN,
- * fits within the allowable addresses which we can
- * allocate.
- */
- if (dma_to_pfn(dev, mask) < max_dma_pfn) {
- dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n",
- mask,
- dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1,
- arm_dma_pfn_limit + 1);
+ if (!__dma_supported(dev, mask, true))
return 0;
- }
}
return mask;
@@ -1032,28 +1042,7 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
*/
int dma_supported(struct device *dev, u64 mask)
{
- unsigned long limit;
-
- /*
- * If the mask allows for more memory than we can address,
- * and we actually have that much memory, then we must
- * indicate that DMA to this device is not supported.
- */
- if (sizeof(mask) != sizeof(dma_addr_t) &&
- mask > (dma_addr_t)~0 &&
- dma_to_pfn(dev, ~0) > arm_dma_pfn_limit)
- return 0;
-
- /*
- * Translate the device's DMA mask to a PFN limit. This
- * PFN number includes the page which we can DMA to.
- */
- limit = dma_to_pfn(dev, mask);
-
- if (limit < arm_dma_pfn_limit)
- return 0;
-
- return 1;
+ return __dma_supported(dev, mask, false);
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 6d5ba9a..3387e60 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -175,16 +175,16 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
unsigned long i;
if (cache_is_vipt_nonaliasing()) {
for (i = 0; i < (1 << compound_order(page)); i++) {
- void *addr = kmap_atomic(page);
+ void *addr = kmap_atomic(page + i);
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
kunmap_atomic(addr);
}
} else {
for (i = 0; i < (1 << compound_order(page)); i++) {
- void *addr = kmap_high_get(page);
+ void *addr = kmap_high_get(page + i);
if (addr) {
__cpuc_flush_dcache_area(addr, PAGE_SIZE);
- kunmap_high(page);
+ kunmap_high(page + i);
}
}
}
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 3e8f106e..1f7b19a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -229,7 +229,7 @@ void __init setup_dma_zone(const struct machine_desc *mdesc)
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
- arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
+ arm_dma_limit = __pv_phys_offset + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
arm_dma_pfn_limit = arm_dma_limit >> PAGE_SHIFT;
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 83e4f95..8550123 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -96,7 +96,7 @@ static int remap_pte_fn(pte_t *ptep, pgtable_t token, unsigned long addr,
struct remap_data *info = data;
struct page *page = info->pages[info->index++];
unsigned long pfn = page_to_pfn(page);
- pte_t pte = pfn_pte(pfn, info->prot);
+ pte_t pte = pte_mkspecial(pfn_pte(pfn, info->prot));
if (map_foreign_page(pfn, info->fgmfn, info->domid))
return -EFAULT;
@@ -224,10 +224,10 @@ static int __init xen_guest_init(void)
}
if (of_address_to_resource(node, GRANT_TABLE_PHYSADDR, &res))
return 0;
- xen_hvm_resume_frames = res.start >> PAGE_SHIFT;
+ xen_hvm_resume_frames = res.start;
xen_events_irq = irq_of_parse_and_map(node, 0);
pr_info("Xen %s support found, events_irq=%d gnttab_frame_pfn=%lx\n",
- version, xen_events_irq, xen_hvm_resume_frames);
+ version, xen_events_irq, (xen_hvm_resume_frames >> PAGE_SHIFT));
xen_domain_type = XEN_HVM_DOMAIN;
xen_setup_features();
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index 2820f1a..dde3fc9 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -23,25 +23,21 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
}
static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs)
{
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
}
static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
}
static inline void xen_dma_sync_single_for_device(struct device *hwdev,
dma_addr_t handle, size_t size, enum dma_data_direction dir)
{
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
}
#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index 6777a21..6a8928b 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -214,31 +214,29 @@ static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
{
int err, len, type, disabled = !ctrl.enabled;
- if (disabled) {
- len = 0;
- type = HW_BREAKPOINT_EMPTY;
- } else {
- err = arch_bp_generic_fields(ctrl, &len, &type);
- if (err)
- return err;
-
- switch (note_type) {
- case NT_ARM_HW_BREAK:
- if ((type & HW_BREAKPOINT_X) != type)
- return -EINVAL;
- break;
- case NT_ARM_HW_WATCH:
- if ((type & HW_BREAKPOINT_RW) != type)
- return -EINVAL;
- break;
- default:
+ attr->disabled = disabled;
+ if (disabled)
+ return 0;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_ARM_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
return -EINVAL;
- }
+ break;
+ case NT_ARM_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
}
attr->bp_len = len;
attr->bp_type = type;
- attr->disabled = disabled;
return 0;
}
diff --git a/arch/avr32/boards/favr-32/setup.c b/arch/avr32/boards/favr-32/setup.c
index 7b1f2cd..1f12149 100644
--- a/arch/avr32/boards/favr-32/setup.c
+++ b/arch/avr32/boards/favr-32/setup.c
@@ -298,8 +298,10 @@ static int __init set_abdac_rate(struct platform_device *pdev)
*/
retval = clk_round_rate(pll1,
CONFIG_BOARD_FAVR32_ABDAC_RATE * 256 * 16);
- if (retval < 0)
+ if (retval <= 0) {
+ retval = -EINVAL;
goto out_abdac;
+ }
retval = clk_set_rate(pll1, retval);
if (retval != 0)
diff --git a/arch/avr32/configs/atngw100_defconfig b/arch/avr32/configs/atngw100_defconfig
index d5aff36..4733e38 100644
--- a/arch/avr32/configs/atngw100_defconfig
+++ b/arch/avr32/configs/atngw100_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd100_defconfig b/arch/avr32/configs/atngw100_evklcd100_defconfig
index 4abcf43..1be0ee3 100644
--- a/arch/avr32/configs/atngw100_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd100_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_evklcd101_defconfig b/arch/avr32/configs/atngw100_evklcd101_defconfig
index 18f3fa0..796e536 100644
--- a/arch/avr32/configs/atngw100_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100_evklcd101_defconfig
@@ -60,7 +60,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100_mrmt_defconfig b/arch/avr32/configs/atngw100_mrmt_defconfig
index 06e389c..9a57da4 100644
--- a/arch/avr32/configs/atngw100_mrmt_defconfig
+++ b/arch/avr32/configs/atngw100_mrmt_defconfig
@@ -48,7 +48,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_defconfig b/arch/avr32/configs/atngw100mkii_defconfig
index 2518a13..97fe1b3 100644
--- a/arch/avr32/configs/atngw100mkii_defconfig
+++ b/arch/avr32/configs/atngw100mkii_defconfig
@@ -59,7 +59,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
index 245ef6b..a176d24 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd100_defconfig
@@ -62,7 +62,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
index fa6cbac..d1bf6dcf 100644
--- a/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
+++ b/arch/avr32/configs/atngw100mkii_evklcd101_defconfig
@@ -61,7 +61,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1002_defconfig b/arch/avr32/configs/atstk1002_defconfig
index bbd5131..2813dd2 100644
--- a/arch/avr32/configs/atstk1002_defconfig
+++ b/arch/avr32/configs/atstk1002_defconfig
@@ -53,7 +53,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1003_defconfig b/arch/avr32/configs/atstk1003_defconfig
index c1cd726..f8ff3a3 100644
--- a/arch/avr32/configs/atstk1003_defconfig
+++ b/arch/avr32/configs/atstk1003_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1004_defconfig b/arch/avr32/configs/atstk1004_defconfig
index 754ae56..992228e 100644
--- a/arch/avr32/configs/atstk1004_defconfig
+++ b/arch/avr32/configs/atstk1004_defconfig
@@ -42,7 +42,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/atstk1006_defconfig b/arch/avr32/configs/atstk1006_defconfig
index 58589d8..b8e698b 100644
--- a/arch/avr32/configs/atstk1006_defconfig
+++ b/arch/avr32/configs/atstk1006_defconfig
@@ -54,7 +54,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/favr-32_defconfig b/arch/avr32/configs/favr-32_defconfig
index c90fbf6..07bed3f 100644
--- a/arch/avr32/configs/favr-32_defconfig
+++ b/arch/avr32/configs/favr-32_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/hammerhead_defconfig b/arch/avr32/configs/hammerhead_defconfig
index ba7c31e..18db853 100644
--- a/arch/avr32/configs/hammerhead_defconfig
+++ b/arch/avr32/configs/hammerhead_defconfig
@@ -58,7 +58,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/configs/merisc_defconfig b/arch/avr32/configs/merisc_defconfig
index 65de443..91df6b2 100644
--- a/arch/avr32/configs/merisc_defconfig
+++ b/arch/avr32/configs/merisc_defconfig
@@ -46,7 +46,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
CONFIG_MTD_CONCAT=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_CFI=y
diff --git a/arch/avr32/configs/mimc200_defconfig b/arch/avr32/configs/mimc200_defconfig
index 0a8bfdc..d630e08 100644
--- a/arch/avr32/configs/mimc200_defconfig
+++ b/arch/avr32/configs/mimc200_defconfig
@@ -49,7 +49,6 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y
-CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CMDLINE_PARTS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index 12f828a..d0f771b 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -59,7 +59,7 @@ static irqreturn_t timer_interrupt(int irq, void *dev_id)
static struct irqaction timer_irqaction = {
.handler = timer_interrupt,
/* Oprofile uses the same irq as the timer, so allow it to be shared */
- .flags = IRQF_TIMER | IRQF_DISABLED | IRQF_SHARED,
+ .flags = IRQF_TIMER | IRQF_SHARED,
.name = "avr32_comparator",
};
diff --git a/arch/avr32/mach-at32ap/pm.c b/arch/avr32/mach-at32ap/pm.c
index 32d680e..db19084 100644
--- a/arch/avr32/mach-at32ap/pm.c
+++ b/arch/avr32/mach-at32ap/pm.c
@@ -181,7 +181,7 @@ static const struct platform_suspend_ops avr32_pm_ops = {
.enter = avr32_pm_enter,
};
-static unsigned long avr32_pm_offset(void *symbol)
+static unsigned long __init avr32_pm_offset(void *symbol)
{
extern u8 pm_exception[];
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index f0e2784..2f9b751 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -125,42 +125,38 @@ flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vma
void mark_rodata_ro(void);
#endif
-#ifdef CONFIG_PA8X00
-/* Only pa8800, pa8900 needs this */
-
#include <asm/kmap_types.h>
#define ARCH_HAS_KMAP
-void kunmap_parisc(void *addr);
-
static inline void *kmap(struct page *page)
{
might_sleep();
+ flush_dcache_page(page);
return page_address(page);
}
static inline void kunmap(struct page *page)
{
- kunmap_parisc(page_address(page));
+ flush_kernel_dcache_page_addr(page_address(page));
}
static inline void *kmap_atomic(struct page *page)
{
pagefault_disable();
+ flush_dcache_page(page);
return page_address(page);
}
static inline void __kunmap_atomic(void *addr)
{
- kunmap_parisc(addr);
+ flush_kernel_dcache_page_addr(addr);
pagefault_enable();
}
#define kmap_atomic_prot(page, prot) kmap_atomic(page)
#define kmap_atomic_pfn(pfn) kmap_atomic(pfn_to_page(pfn))
#define kmap_atomic_to_page(ptr) virt_to_page(ptr)
-#endif
#endif /* _PARISC_CACHEFLUSH_H */
diff --git a/arch/parisc/include/asm/page.h b/arch/parisc/include/asm/page.h
index b7adb2a..c53fc63 100644
--- a/arch/parisc/include/asm/page.h
+++ b/arch/parisc/include/asm/page.h
@@ -28,9 +28,8 @@ struct page;
void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *pg);
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg);
+#define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
+#define copy_user_page(vto, vfrom, vaddr, page) copy_page_asm(vto, vfrom)
/* #define CONFIG_PARISC_TMPALIAS */
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index c035673..a725455 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -388,41 +388,6 @@ void flush_kernel_dcache_page_addr(void *addr)
}
EXPORT_SYMBOL(flush_kernel_dcache_page_addr);
-void clear_user_page(void *vto, unsigned long vaddr, struct page *page)
-{
- clear_page_asm(vto);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(clear_user_page);
-
-void copy_user_page(void *vto, void *vfrom, unsigned long vaddr,
- struct page *pg)
-{
- /* Copy using kernel mapping. No coherency is needed
- (all in kmap/kunmap) on machines that don't support
- non-equivalent aliasing. However, the `from' page
- needs to be flushed before it can be accessed through
- the kernel mapping. */
- preempt_disable();
- flush_dcache_page_asm(__pa(vfrom), vaddr);
- preempt_enable();
- copy_page_asm(vto, vfrom);
- if (!parisc_requires_coherency())
- flush_kernel_dcache_page_asm(vto);
-}
-EXPORT_SYMBOL(copy_user_page);
-
-#ifdef CONFIG_PA8X00
-
-void kunmap_parisc(void *addr)
-{
- if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(addr);
-}
-EXPORT_SYMBOL(kunmap_parisc);
-#endif
-
void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
{
unsigned long flags;
diff --git a/arch/powerpc/boot/dts/mpc5121.dtsi b/arch/powerpc/boot/dts/mpc5121.dtsi
index bd14c00..2d7cb04 100644
--- a/arch/powerpc/boot/dts/mpc5121.dtsi
+++ b/arch/powerpc/boot/dts/mpc5121.dtsi
@@ -77,7 +77,6 @@
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
#size-cells = <1>;
- #interrupt-cells = <2>;
ranges = <0x0 0x80000000 0x400000>;
reg = <0x80000000 0x400000>;
bus-frequency = <66000000>; /* 66 MHz ips bus */
diff --git a/arch/powerpc/boot/dts/mpc5125twr.dts b/arch/powerpc/boot/dts/mpc5125twr.dts
index 4177b62..a618dfc 100644
--- a/arch/powerpc/boot/dts/mpc5125twr.dts
+++ b/arch/powerpc/boot/dts/mpc5125twr.dts
@@ -58,7 +58,6 @@
compatible = "fsl,mpc5121-immr";
#address-cells = <1>;
#size-cells = <1>;
- #interrupt-cells = <2>;
ranges = <0x0 0x80000000 0x400000>;
reg = <0x80000000 0x400000>;
bus-frequency = <66000000>; // 66 MHz ips bus
@@ -189,6 +188,10 @@
reg = <0xA000 0x1000>;
};
+ // disable USB1 port
+ // TODO:
+ // correct pinmux config and fix USB3320 ulpi dependency
+ // before re-enabling it
usb@3000 {
compatible = "fsl,mpc5121-usb2-dr";
reg = <0x3000 0x400>;
@@ -197,6 +200,7 @@
interrupts = <43 0x8>;
dr_mode = "host";
phy_type = "ulpi";
+ status = "disabled";
};
// 5125 PSCs are not 52xx or 5121 PSC compatible
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig
index 69b57da..0b88c7b 100644
--- a/arch/powerpc/configs/52xx/cm5200_defconfig
+++ b/arch/powerpc/configs/52xx/cm5200_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
# CONFIG_PCI is not set
@@ -71,6 +70,8 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=y
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig
index f3638ae..104a332 100644
--- a/arch/powerpc/configs/52xx/lite5200b_defconfig
+++ b/arch/powerpc/configs/52xx/lite5200b_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
CONFIG_PPC_LITE5200=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_SPARSE_IRQ=y
@@ -59,6 +58,8 @@ CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MPC=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig
index 0c7de96..0d13ad7 100644
--- a/arch/powerpc/configs/52xx/motionpro_defconfig
+++ b/arch/powerpc/configs/52xx/motionpro_defconfig
@@ -12,7 +12,6 @@ CONFIG_EXPERT=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_SPARSE_IRQ=y
CONFIG_PM=y
# CONFIG_PCI is not set
@@ -84,6 +83,8 @@ CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig
index 22e7195..430aa18 100644
--- a/arch/powerpc/configs/52xx/pcm030_defconfig
+++ b/arch/powerpc/configs/52xx/pcm030_defconfig
@@ -21,7 +21,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_HZ_100=y
@@ -87,6 +86,8 @@ CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
CONFIG_USB_STORAGE=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_PCF8563=m
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=m
CONFIG_EXT3_FS=m
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig
index 716a37b..7af4c5b 100644
--- a/arch/powerpc/configs/52xx/tqm5200_defconfig
+++ b/arch/powerpc/configs/52xx/tqm5200_defconfig
@@ -17,7 +17,6 @@ CONFIG_PPC_MPC52xx=y
CONFIG_PPC_MPC5200_SIMPLE=y
CONFIG_PPC_MPC5200_BUGFIX=y
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_PM=y
# CONFIG_PCI is not set
CONFIG_NET=y
@@ -86,6 +85,8 @@ CONFIG_USB_STORAGE=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 6640a35..8b682d1c 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -15,7 +15,6 @@ CONFIG_PPC_MEDIA5200=y
CONFIG_PPC_MPC5200_BUGFIX=y
CONFIG_PPC_MPC5200_LPBFIFO=m
# CONFIG_PPC_PMAC is not set
-CONFIG_PPC_BESTCOMM=y
CONFIG_SIMPLE_GPIO=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -125,6 +124,8 @@ CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_RTC_DRV_DS1374=y
CONFIG_RTC_DRV_PCF8563=m
+CONFIG_DMADEVICES=y
+CONFIG_PPC_BESTCOMM=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig
index bd8a6f7..cec044a 100644
--- a/arch/powerpc/configs/pasemi_defconfig
+++ b/arch/powerpc/configs/pasemi_defconfig
@@ -2,7 +2,6 @@ CONFIG_PPC64=y
CONFIG_ALTIVEC=y
CONFIG_SMP=y
CONFIG_NR_CPUS=2
-CONFIG_EXPERIMENTAL=y
CONFIG_SYSVIPC=y
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
@@ -45,8 +44,9 @@ CONFIG_INET_AH=y
CONFIG_INET_ESP=y
# CONFIG_IPV6 is not set
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_MTD=y
-CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_SLRAM=y
CONFIG_MTD_PHRAM=y
@@ -88,7 +88,6 @@ CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
CONFIG_NETDEVICES=y
CONFIG_DUMMY=y
-CONFIG_MII=y
CONFIG_TIGON3=y
CONFIG_E1000=y
CONFIG_PASEMI_MAC=y
@@ -174,8 +173,8 @@ CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_CRC_CCITT=y
CONFIG_PRINTK_TIME=y
-CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_FS=y
+CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_KERNEL=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 894662a..243ce69 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -284,7 +284,7 @@ do_kvm_##n: \
subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \
beq- 1f; \
ld r1,PACAKSAVE(r13); /* kernel stack to use */ \
-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \
+1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \
blt+ cr1,3f; /* abort if it is */ \
li r1,(n); /* will be reloaded later */ \
sth r1,PACA_TRAP_SAVE(r13); \
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 4a594b7..bc23b1b 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -192,6 +192,10 @@ extern void kvmppc_load_up_vsx(void);
extern u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst);
extern ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst);
extern int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd);
+extern void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
+ struct kvm_vcpu *vcpu);
+extern void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
+ struct kvmppc_book3s_shadow_vcpu *svcpu);
static inline struct kvmppc_vcpu_book3s *to_book3s(struct kvm_vcpu *vcpu)
{
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 0bd9348..192917d 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,7 @@ struct kvmppc_host_state {
ulong vmhandler;
ulong scratch0;
ulong scratch1;
+ ulong scratch2;
u8 in_guest;
u8 restore_hid5;
u8 napping;
@@ -106,6 +107,7 @@ struct kvmppc_host_state {
};
struct kvmppc_book3s_shadow_vcpu {
+ bool in_use;
ulong gpr[14];
u32 cr;
u32 xer;
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 033c06b..7bdcf34 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -720,13 +720,13 @@ int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe,
int64_t opal_pci_poll(uint64_t phb_id);
int64_t opal_return_cpu(void);
-int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, uint64_t *val);
+int64_t opal_xscom_read(uint32_t gcid, uint32_t pcb_addr, __be64 *val);
int64_t opal_xscom_write(uint32_t gcid, uint32_t pcb_addr, uint64_t val);
int64_t opal_lpc_write(uint32_t chip_id, enum OpalLPCAddressType addr_type,
uint32_t addr, uint32_t data, uint32_t sz);
int64_t opal_lpc_read(uint32_t chip_id, enum OpalLPCAddressType addr_type,
- uint32_t addr, uint32_t *data, uint32_t sz);
+ uint32_t addr, __be32 *data, uint32_t sz);
int64_t opal_validate_flash(uint64_t buffer, uint32_t *size, uint32_t *result);
int64_t opal_manage_flash(uint8_t op);
int64_t opal_update_flash(uint64_t blk_list);
diff --git a/arch/powerpc/include/asm/pgalloc-32.h b/arch/powerpc/include/asm/pgalloc-32.h
index 27b2386..842846c 100644
--- a/arch/powerpc/include/asm/pgalloc-32.h
+++ b/arch/powerpc/include/asm/pgalloc-32.h
@@ -84,10 +84,8 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#endif /* _ASM_POWERPC_PGALLOC_32_H */
diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
index 6940128..4b0be20 100644
--- a/arch/powerpc/include/asm/pgalloc-64.h
+++ b/arch/powerpc/include/asm/pgalloc-64.h
@@ -148,11 +148,9 @@ static inline void pgtable_free_tlb(struct mmu_gather *tlb,
static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
unsigned long address)
{
- struct page *page = page_address(table);
-
tlb_flush_pgtable(tlb, address);
- pgtable_page_dtor(page);
- pgtable_free_tlb(tlb, page, 0);
+ pgtable_page_dtor(table);
+ pgtable_free_tlb(tlb, page_address(table), 0);
}
#else /* if CONFIG_PPC_64K_PAGES */
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 9ee1261..aace905 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -35,7 +35,7 @@ extern void giveup_vsx(struct task_struct *);
extern void enable_kernel_spe(void);
extern void giveup_spe(struct task_struct *);
extern void load_up_spe(struct task_struct *);
-extern void switch_booke_debug_regs(struct thread_struct *new_thread);
+extern void switch_booke_debug_regs(struct debug_reg *new_debug);
#ifndef CONFIG_SMP
extern void discard_lazy_cpu_state(void);
diff --git a/arch/powerpc/include/asm/unaligned.h b/arch/powerpc/include/asm/unaligned.h
index 5f1b1e3..8296381 100644
--- a/arch/powerpc/include/asm/unaligned.h
+++ b/arch/powerpc/include/asm/unaligned.h
@@ -4,13 +4,18 @@
#ifdef __KERNEL__
/*
- * The PowerPC can do unaligned accesses itself in big endian mode.
+ * The PowerPC can do unaligned accesses itself based on its endian mode.
*/
#include <linux/unaligned/access_ok.h>
#include <linux/unaligned/generic.h>
+#ifdef __LITTLE_ENDIAN__
+#define get_unaligned __get_unaligned_le
+#define put_unaligned __put_unaligned_le
+#else
#define get_unaligned __get_unaligned_be
#define put_unaligned __put_unaligned_be
+#endif
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_UNALIGNED_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 2ea5cc0..d3de010 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -576,6 +576,7 @@ int main(void)
HSTATE_FIELD(HSTATE_VMHANDLER, vmhandler);
HSTATE_FIELD(HSTATE_SCRATCH0, scratch0);
HSTATE_FIELD(HSTATE_SCRATCH1, scratch1);
+ HSTATE_FIELD(HSTATE_SCRATCH2, scratch2);
HSTATE_FIELD(HSTATE_IN_GUEST, in_guest);
HSTATE_FIELD(HSTATE_RESTORE_HID5, restore_hid5);
HSTATE_FIELD(HSTATE_NAPPING, napping);
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index 779a78c..11c1d06 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -124,15 +124,15 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end)
{
unsigned long addr;
- const u32 *basep, *sizep;
+ const __be32 *basep, *sizep;
unsigned int rtas_start = 0, rtas_end = 0;
basep = of_get_property(rtas.dev, "linux,rtas-base", NULL);
sizep = of_get_property(rtas.dev, "rtas-size", NULL);
if (basep && sizep) {
- rtas_start = *basep;
- rtas_end = *basep + *sizep;
+ rtas_start = be32_to_cpup(basep);
+ rtas_end = rtas_start + be32_to_cpup(sizep);
}
for (addr = begin; addr < end; addr += PAGE_SIZE) {
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 2ae41ab..4f0946d 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -80,6 +80,7 @@ END_FTR_SECTION(0, 1)
* of the function that the cpu should jump to to continue
* initialization.
*/
+ .balign 8
.globl __secondary_hold_spinloop
__secondary_hold_spinloop:
.llong 0x0
@@ -470,6 +471,7 @@ _STATIC(__after_prom_start)
mtctr r8
bctr
+.balign 8
p_end: .llong _end - _stext
4: /* Now copy the rest of the kernel up to _end */
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 88a7fb4..75d4f73 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -148,7 +148,7 @@ void __init reserve_crashkernel(void)
* a small SLB (128MB) since the crash kernel needs to place
* itself and some stacks to be in the first segment.
*/
- crashk_res.start = min(0x80000000ULL, (ppc64_rma_size / 2));
+ crashk_res.start = min(0x8000000ULL, (ppc64_rma_size / 2));
#else
crashk_res.start = KDUMP_KERNELBASE;
#endif
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index e59caf8..64bf8db 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -246,8 +246,8 @@ _GLOBAL(__bswapdi2)
or r3,r7,r9
blr
-#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
+#ifdef CONFIG_PPC_EARLY_DEBUG_BOOTX
_GLOBAL(rmci_on)
sync
isync
@@ -277,6 +277,9 @@ _GLOBAL(rmci_off)
isync
sync
blr
+#endif /* CONFIG_PPC_EARLY_DEBUG_BOOTX */
+
+#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
* Do an IO access in real mode
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 3386d8a..4a96556 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -339,7 +339,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread)
#endif
}
-static void prime_debug_regs(struct thread_struct *thread)
+static void prime_debug_regs(struct debug_reg *debug)
{
/*
* We could have inherited MSR_DE from userspace, since
@@ -348,22 +348,22 @@ static void prime_debug_regs(struct thread_struct *thread)
*/
mtmsr(mfmsr() & ~MSR_DE);
- mtspr(SPRN_IAC1, thread->debug.iac1);
- mtspr(SPRN_IAC2, thread->debug.iac2);
+ mtspr(SPRN_IAC1, debug->iac1);
+ mtspr(SPRN_IAC2, debug->iac2);
#if CONFIG_PPC_ADV_DEBUG_IACS > 2
- mtspr(SPRN_IAC3, thread->debug.iac3);
- mtspr(SPRN_IAC4, thread->debug.iac4);
+ mtspr(SPRN_IAC3, debug->iac3);
+ mtspr(SPRN_IAC4, debug->iac4);
#endif
- mtspr(SPRN_DAC1, thread->debug.dac1);
- mtspr(SPRN_DAC2, thread->debug.dac2);
+ mtspr(SPRN_DAC1, debug->dac1);
+ mtspr(SPRN_DAC2, debug->dac2);
#if CONFIG_PPC_ADV_DEBUG_DVCS > 0
- mtspr(SPRN_DVC1, thread->debug.dvc1);
- mtspr(SPRN_DVC2, thread->debug.dvc2);
+ mtspr(SPRN_DVC1, debug->dvc1);
+ mtspr(SPRN_DVC2, debug->dvc2);
#endif
- mtspr(SPRN_DBCR0, thread->debug.dbcr0);
- mtspr(SPRN_DBCR1, thread->debug.dbcr1);
+ mtspr(SPRN_DBCR0, debug->dbcr0);
+ mtspr(SPRN_DBCR1, debug->dbcr1);
#ifdef CONFIG_BOOKE
- mtspr(SPRN_DBCR2, thread->debug.dbcr2);
+ mtspr(SPRN_DBCR2, debug->dbcr2);
#endif
}
/*
@@ -371,11 +371,11 @@ static void prime_debug_regs(struct thread_struct *thread)
* debug registers, set the debug registers from the values
* stored in the new thread.
*/
-void switch_booke_debug_regs(struct thread_struct *new_thread)
+void switch_booke_debug_regs(struct debug_reg *new_debug)
{
if ((current->thread.debug.dbcr0 & DBCR0_IDM)
- || (new_thread->debug.dbcr0 & DBCR0_IDM))
- prime_debug_regs(new_thread);
+ || (new_debug->dbcr0 & DBCR0_IDM))
+ prime_debug_regs(new_debug);
}
EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
#else /* !CONFIG_PPC_ADV_DEBUG_REGS */
@@ -683,7 +683,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
#endif /* CONFIG_SMP */
#ifdef CONFIG_PPC_ADV_DEBUG_REGS
- switch_booke_debug_regs(&new->thread);
+ switch_booke_debug_regs(&new->thread.debug);
#else
/*
* For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index 75fb404..2e3d2bf 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -1555,7 +1555,7 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&tmp, &child->thread.fp_state.fpr,
+ memcpy(&tmp, &child->thread.TS_FPR(fpidx),
sizeof(long));
else
tmp = child->thread.fp_state.fpscr;
@@ -1588,7 +1588,7 @@ long arch_ptrace(struct task_struct *child, long request,
flush_fp_to_thread(child);
if (fpidx < (PT_FPSCR - PT_FPR0))
- memcpy(&child->thread.fp_state.fpr, &data,
+ memcpy(&child->thread.TS_FPR(fpidx), &data,
sizeof(long));
else
child->thread.fp_state.fpscr = data;
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index febc804..bc76cc6 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -479,7 +479,7 @@ void __init smp_setup_cpu_maps(void)
if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR) &&
(dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
- const unsigned int *ireg;
+ const __be32 *ireg;
num_addr_cell = of_n_addr_cells(dn);
num_size_cell = of_n_size_cells(dn);
@@ -489,7 +489,7 @@ void __init smp_setup_cpu_maps(void)
if (!ireg)
goto out;
- maxcpus = ireg[num_addr_cell + num_size_cell];
+ maxcpus = be32_to_cpup(ireg + num_addr_cell + num_size_cell);
/* Double maxcpus for processors which have SMT capability */
if (cpu_has_feature(CPU_FTR_SMT))
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index a3b64f3..c1cf4a1 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -580,7 +580,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
int cpu_to_core_id(int cpu)
{
struct device_node *np;
- const int *reg;
+ const __be32 *reg;
int id = -1;
np = of_get_cpu_node(cpu, NULL);
@@ -591,7 +591,7 @@ int cpu_to_core_id(int cpu)
if (!reg)
goto out;
- id = *reg;
+ id = be32_to_cpup(reg);
out:
of_node_put(np);
return id;
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index f3ff587..c5d1484 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -469,11 +469,14 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
slb_v = vcpu->kvm->arch.vrma_slb_v;
}
+ preempt_disable();
/* Find the HPTE in the hash table */
index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
HPTE_V_VALID | HPTE_V_ABSENT);
- if (index < 0)
+ if (index < 0) {
+ preempt_enable();
return -ENOENT;
+ }
hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
v = hptep[0] & ~HPTE_V_HVLOCK;
gr = kvm->arch.revmap[index].guest_rpte;
@@ -481,6 +484,7 @@ static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
/* Unlock the HPTE */
asm volatile("lwsync" : : : "memory");
hptep[0] = v;
+ preempt_enable();
gpte->eaddr = eaddr;
gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
@@ -665,6 +669,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
return -EFAULT;
} else {
page = pages[0];
+ pfn = page_to_pfn(page);
if (PageHuge(page)) {
page = compound_head(page);
pte_size <<= compound_order(page);
@@ -689,7 +694,6 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
}
rcu_read_unlock_sched();
}
- pfn = page_to_pfn(page);
}
ret = -EFAULT;
@@ -707,8 +711,14 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
}
- /* Set the HPTE to point to pfn */
- r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
+ /*
+ * Set the HPTE to point to pfn.
+ * Since the pfn is at PAGE_SIZE granularity, make sure we
+ * don't mask out lower-order bits if psize < PAGE_SIZE.
+ */
+ if (psize < PAGE_SIZE)
+ psize = PAGE_SIZE;
+ r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
if (hpte_is_writable(r) && !write_ok)
r = hpte_make_readonly(r);
ret = RESUME_GUEST;
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 072287f..b51d5db 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -131,8 +131,9 @@ static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long flags;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE &&
vc->preempt_tb != TB_NIL) {
vc->stolen_tb += mftb() - vc->preempt_tb;
@@ -143,19 +144,20 @@ static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
vcpu->arch.busy_preempt = TB_NIL;
}
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
{
struct kvmppc_vcore *vc = vcpu->arch.vcore;
+ unsigned long flags;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
vc->preempt_tb = mftb();
if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
vcpu->arch.busy_preempt = mftb();
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
}
static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
@@ -486,11 +488,11 @@ static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
*/
if (vc->vcore_state != VCORE_INACTIVE &&
vc->runner->arch.run_task != current) {
- spin_lock(&vc->runner->arch.tbacct_lock);
+ spin_lock_irq(&vc->runner->arch.tbacct_lock);
p = vc->stolen_tb;
if (vc->preempt_tb != TB_NIL)
p += now - vc->preempt_tb;
- spin_unlock(&vc->runner->arch.tbacct_lock);
+ spin_unlock_irq(&vc->runner->arch.tbacct_lock);
} else {
p = vc->stolen_tb;
}
@@ -512,10 +514,10 @@ static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
core_stolen = vcore_stolen_time(vc, now);
stolen = core_stolen - vcpu->arch.stolen_logged;
vcpu->arch.stolen_logged = core_stolen;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irq(&vcpu->arch.tbacct_lock);
stolen += vcpu->arch.busy_stolen;
vcpu->arch.busy_stolen = 0;
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irq(&vcpu->arch.tbacct_lock);
if (!dt || !vpa)
return;
memset(dt, 0, sizeof(struct dtl_entry));
@@ -589,7 +591,9 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
if (list_empty(&vcpu->kvm->arch.rtas_tokens))
return RESUME_HOST;
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
rc = kvmppc_rtas_hcall(vcpu);
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
if (rc == -ENOENT)
return RESUME_HOST;
@@ -1115,13 +1119,13 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
return;
- spin_lock(&vcpu->arch.tbacct_lock);
+ spin_lock_irq(&vcpu->arch.tbacct_lock);
now = mftb();
vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
vcpu->arch.stolen_logged;
vcpu->arch.busy_preempt = now;
vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
- spin_unlock(&vcpu->arch.tbacct_lock);
+ spin_unlock_irq(&vcpu->arch.tbacct_lock);
--vc->n_runnable;
list_del(&vcpu->arch.run_list);
}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 9c51544..8689e2e 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -225,6 +225,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
is_io = pa & (HPTE_R_I | HPTE_R_W);
pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
pa &= PAGE_MASK;
+ pa |= gpa & ~PAGE_MASK;
} else {
/* Translate to host virtual address */
hva = __gfn_to_hva_memslot(memslot, gfn);
@@ -238,13 +239,13 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
ptel = hpte_make_readonly(ptel);
is_io = hpte_cache_bits(pte_val(pte));
pa = pte_pfn(pte) << PAGE_SHIFT;
+ pa |= hva & (pte_size - 1);
+ pa |= gpa & ~PAGE_MASK;
}
}
if (pte_size < psize)
return H_PARAMETER;
- if (pa && pte_size > psize)
- pa |= gpa & (pte_size - 1);
ptel &= ~(HPTE_R_PP0 - psize);
ptel |= pa;
@@ -749,6 +750,10 @@ static int slb_base_page_shift[4] = {
20, /* 1M, unsupported */
};
+/* When called from virtmode, this func should be protected by
+ * preempt_disable(), otherwise, the holding of HPTE_V_HVLOCK
+ * can trigger deadlock issue.
+ */
long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
unsigned long valid)
{
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index bc8de75..be4fa04a 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -153,7 +153,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
13: b machine_check_fwnmi
-
/*
* We come in here when wakened from nap mode on a secondary hw thread.
* Relocation is off and most register values are lost.
@@ -224,6 +223,11 @@ kvm_start_guest:
/* Clear our vcpu pointer so we don't come back in early */
li r0, 0
std r0, HSTATE_KVM_VCPU(r13)
+ /*
+ * Make sure we clear HSTATE_KVM_VCPU(r13) before incrementing
+ * the nap_count, because once the increment to nap_count is
+ * visible we could be given another vcpu.
+ */
lwsync
/* Clear any pending IPI - we're an offline thread */
ld r5, HSTATE_XICS_PHYS(r13)
@@ -241,7 +245,6 @@ kvm_start_guest:
/* increment the nap count and then go to nap mode */
ld r4, HSTATE_KVM_VCORE(r13)
addi r4, r4, VCORE_NAP_COUNT
- lwsync /* make previous updates visible */
51: lwarx r3, 0, r4
addi r3, r3, 1
stwcx. r3, 0, r4
@@ -751,15 +754,14 @@ kvmppc_interrupt_hv:
* guest CR, R12 saved in shadow VCPU SCRATCH1/0
* guest R13 saved in SPRN_SCRATCH0
*/
- /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
- std r9, HSTATE_HOST_R2(r13)
+ std r9, HSTATE_SCRATCH2(r13)
lbz r9, HSTATE_IN_GUEST(r13)
cmpwi r9, KVM_GUEST_MODE_HOST_HV
beq kvmppc_bad_host_intr
#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
cmpwi r9, KVM_GUEST_MODE_GUEST
- ld r9, HSTATE_HOST_R2(r13)
+ ld r9, HSTATE_SCRATCH2(r13)
beq kvmppc_interrupt_pr
#endif
/* We're now back in the host but in guest MMU context */
@@ -779,7 +781,7 @@ kvmppc_interrupt_hv:
std r6, VCPU_GPR(R6)(r9)
std r7, VCPU_GPR(R7)(r9)
std r8, VCPU_GPR(R8)(r9)
- ld r0, HSTATE_HOST_R2(r13)
+ ld r0, HSTATE_SCRATCH2(r13)
std r0, VCPU_GPR(R9)(r9)
std r10, VCPU_GPR(R10)(r9)
std r11, VCPU_GPR(R11)(r9)
@@ -990,14 +992,13 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
*/
/* Increment the threads-exiting-guest count in the 0xff00
bits of vcore->entry_exit_count */
- lwsync
ld r5,HSTATE_KVM_VCORE(r13)
addi r6,r5,VCORE_ENTRY_EXIT
41: lwarx r3,0,r6
addi r0,r3,0x100
stwcx. r0,0,r6
bne 41b
- lwsync
+ isync /* order stwcx. vs. reading napping_threads */
/*
* At this point we have an interrupt that we have to pass
@@ -1030,6 +1031,8 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
sld r0,r0,r4
andc. r3,r3,r0 /* no sense IPI'ing ourselves */
beq 43f
+ /* Order entry/exit update vs. IPIs */
+ sync
mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
subf r6,r4,r13
42: andi. r0,r3,1
@@ -1638,10 +1641,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
bge kvm_cede_exit
stwcx. r4,0,r6
bne 31b
+ /* order napping_threads update vs testing entry_exit_count */
+ isync
li r0,1
stb r0,HSTATE_NAPPING(r13)
- /* order napping_threads update vs testing entry_exit_count */
- lwsync
mr r4,r3
lwz r7,VCORE_ENTRY_EXIT(r5)
cmpwi r7,0x100
diff --git a/arch/powerpc/kvm/book3s_interrupts.S b/arch/powerpc/kvm/book3s_interrupts.S
index f4dd041..f779450 100644
--- a/arch/powerpc/kvm/book3s_interrupts.S
+++ b/arch/powerpc/kvm/book3s_interrupts.S
@@ -129,29 +129,32 @@ kvm_start_lightweight:
* R12 = exit handler id
* R13 = PACA
* SVCPU.* = guest *
+ * MSR.EE = 1
*
*/
+ PPC_LL r3, GPR4(r1) /* vcpu pointer */
+
+ /*
+ * kvmppc_copy_from_svcpu can clobber volatile registers, save
+ * the exit handler id to the vcpu and restore it from there later.
+ */
+ stw r12, VCPU_TRAP(r3)
+
/* Transfer reg values from shadow vcpu back to vcpu struct */
/* On 64-bit, interrupts are still off at this point */
- PPC_LL r3, GPR4(r1) /* vcpu pointer */
+
GET_SHADOW_VCPU(r4)
bl FUNC(kvmppc_copy_from_svcpu)
nop
#ifdef CONFIG_PPC_BOOK3S_64
- /* Re-enable interrupts */
- ld r3, HSTATE_HOST_MSR(r13)
- ori r3, r3, MSR_EE
- MTMSR_EERI(r3)
-
/*
* Reload kernel SPRG3 value.
* No need to save guest value as usermode can't modify SPRG3.
*/
ld r3, PACA_SPRG3(r13)
mtspr SPRN_SPRG3, r3
-
#endif /* CONFIG_PPC_BOOK3S_64 */
/* R7 = vcpu */
@@ -177,7 +180,7 @@ kvm_start_lightweight:
PPC_STL r31, VCPU_GPR(R31)(r7)
/* Pass the exit number as 3rd argument to kvmppc_handle_exit */
- mr r5, r12
+ lwz r5, VCPU_TRAP(r7)
/* Restore r3 (kvm_run) and r4 (vcpu) */
REST_2GPRS(3, r1)
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index fe14ca3..5b9e906 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -66,6 +66,7 @@ static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
+ svcpu->in_use = 0;
svcpu_put(svcpu);
#endif
vcpu->cpu = smp_processor_id();
@@ -78,6 +79,9 @@ static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_PPC_BOOK3S_64
struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
+ if (svcpu->in_use) {
+ kvmppc_copy_from_svcpu(vcpu, svcpu);
+ }
memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
svcpu_put(svcpu);
@@ -110,12 +114,26 @@ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
svcpu->ctr = vcpu->arch.ctr;
svcpu->lr = vcpu->arch.lr;
svcpu->pc = vcpu->arch.pc;
+ svcpu->in_use = true;
}
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
struct kvmppc_book3s_shadow_vcpu *svcpu)
{
+ /*
+ * vcpu_put would just call us again because in_use hasn't
+ * been updated yet.
+ */
+ preempt_disable();
+
+ /*
+ * Maybe we were already preempted and synced the svcpu from
+ * our preempt notifiers. Don't bother touching this svcpu then.
+ */
+ if (!svcpu->in_use)
+ goto out;
+
vcpu->arch.gpr[0] = svcpu->gpr[0];
vcpu->arch.gpr[1] = svcpu->gpr[1];
vcpu->arch.gpr[2] = svcpu->gpr[2];
@@ -139,6 +157,10 @@ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
vcpu->arch.fault_dar = svcpu->fault_dar;
vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
vcpu->arch.last_inst = svcpu->last_inst;
+ svcpu->in_use = false;
+
+out:
+ preempt_enable();
}
static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S
index a38c4c9..c3c5231 100644
--- a/arch/powerpc/kvm/book3s_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_rmhandlers.S
@@ -153,15 +153,11 @@ _GLOBAL(kvmppc_entry_trampoline)
li r6, MSR_IR | MSR_DR
andc r6, r5, r6 /* Clear DR and IR in MSR value */
-#ifdef CONFIG_PPC_BOOK3S_32
/*
* Set EE in HOST_MSR so that it's enabled when we get into our
- * C exit handler function. On 64-bit we delay enabling
- * interrupts until we have finished transferring stuff
- * to or from the PACA.
+ * C exit handler function.
*/
ori r5, r5, MSR_EE
-#endif
mtsrr0 r7
mtsrr1 r6
RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index 53e65a2..0591e05 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -681,7 +681,7 @@ int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
{
int ret, s;
- struct thread_struct thread;
+ struct debug_reg debug;
#ifdef CONFIG_PPC_FPU
struct thread_fp_state fp;
int fpexc_mode;
@@ -723,9 +723,9 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
#endif
/* Switch to guest debug context */
- thread.debug = vcpu->arch.shadow_dbg_reg;
- switch_booke_debug_regs(&thread);
- thread.debug = current->thread.debug;
+ debug = vcpu->arch.shadow_dbg_reg;
+ switch_booke_debug_regs(&debug);
+ debug = current->thread.debug;
current->thread.debug = vcpu->arch.shadow_dbg_reg;
kvmppc_fix_ee_before_entry();
@@ -736,8 +736,8 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
We also get here with interrupts enabled. */
/* Switch back to user space debug context */
- switch_booke_debug_regs(&thread);
- current->thread.debug = thread.debug;
+ switch_booke_debug_regs(&debug);
+ current->thread.debug = debug;
#ifdef CONFIG_PPC_FPU
kvmppc_save_guest_fp(vcpu);
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index d73a590..596a285 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -9,6 +9,14 @@
#include <asm/processor.h>
#include <asm/ppc_asm.h>
+#ifdef __BIG_ENDIAN__
+#define sLd sld /* Shift towards low-numbered address. */
+#define sHd srd /* Shift towards high-numbered address. */
+#else
+#define sLd srd /* Shift towards low-numbered address. */
+#define sHd sld /* Shift towards high-numbered address. */
+#endif
+
.align 7
_GLOBAL(__copy_tofrom_user)
BEGIN_FTR_SECTION
@@ -118,10 +126,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
- sld r6,r9,r10
+ sLd r6,r9,r10
26: ldu r9,16(r4)
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
@@ -129,35 +137,35 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
- sld r8,r0,r10
+ sLd r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
- srd r12,r9,r11
- sld r6,r9,r10
+ sHd r12,r9,r11
+ sLd r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
-2: srd r12,r9,r11
- sld r6,r9,r10
+2: sHd r12,r9,r11
+ sLd r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
- srd r7,r0,r11
- sld r8,r0,r10
+ sHd r7,r0,r11
+ sLd r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
-5: srd r12,r9,r11
+5: sHd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
@@ -165,23 +173,38 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
- sld r9,r9,r10
+ sLd r9,r9,r10
ble cr1,7f
34: ld r0,8(r4)
- srd r7,r0,r11
+ sHd r7,r0,r11
or r9,r7,r9
7:
bf cr7*4+1,1f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,32
+#endif
94: stw r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,32
+#endif
addi r3,r3,4
1: bf cr7*4+2,2f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,16
+#endif
95: sth r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,16
+#endif
addi r3,r3,2
2: bf cr7*4+3,3f
+#ifdef __BIG_ENDIAN__
rotldi r9,r9,8
+#endif
96: stb r9,0(r3)
+#ifdef __LITTLE_ENDIAN__
+ rotrdi r9,r9,8
+#endif
3: li r3,0
blr
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 02245ce..d7ddcee 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -36,7 +36,6 @@
#include "powernv.h"
#include "pci.h"
-static char *hub_diag = NULL;
static int ioda_eeh_nb_init = 0;
static int ioda_eeh_event(struct notifier_block *nb,
@@ -140,15 +139,6 @@ static int ioda_eeh_post_init(struct pci_controller *hose)
ioda_eeh_nb_init = 1;
}
- /* We needn't HUB diag-data on PHB3 */
- if (phb->type == PNV_PHB_IODA1 && !hub_diag) {
- hub_diag = (char *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
- if (!hub_diag) {
- pr_err("%s: Out of memory !\n", __func__);
- return -ENOMEM;
- }
- }
-
#ifdef CONFIG_DEBUG_FS
if (phb->dbgfs) {
debugfs_create_file("err_injct_outbound", 0600,
@@ -633,11 +623,10 @@ static void ioda_eeh_hub_diag_common(struct OpalIoP7IOCErrorData *data)
static void ioda_eeh_hub_diag(struct pci_controller *hose)
{
struct pnv_phb *phb = hose->private_data;
- struct OpalIoP7IOCErrorData *data;
+ struct OpalIoP7IOCErrorData *data = &phb->diag.hub_diag;
long rc;
- data = (struct OpalIoP7IOCErrorData *)ioda_eeh_hub_diag;
- rc = opal_pci_get_hub_diag_data(phb->hub_id, data, PAGE_SIZE);
+ rc = opal_pci_get_hub_diag_data(phb->hub_id, data, sizeof(*data));
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get HUB#%llx diag-data (%ld)\n",
__func__, phb->hub_id, rc);
@@ -820,14 +809,15 @@ static void ioda_eeh_phb_diag(struct pci_controller *hose)
struct OpalIoPhbErrorCommon *common;
long rc;
- common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
- rc = opal_pci_get_phb_diag_data2(phb->opal_id, common, PAGE_SIZE);
+ rc = opal_pci_get_phb_diag_data2(phb->opal_id, phb->diag.blob,
+ PNV_PCI_DIAG_BUF_SIZE);
if (rc != OPAL_SUCCESS) {
pr_warning("%s: Failed to get diag-data for PHB#%x (%ld)\n",
__func__, hose->global_number, rc);
return;
}
+ common = (struct OpalIoPhbErrorCommon *)phb->diag.blob;
switch (common->ioType) {
case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
ioda_eeh_p7ioc_phb_diag(hose, common);
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index e7e59e4..79d83ca 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -24,25 +24,25 @@ static int opal_lpc_chip_id = -1;
static u8 opal_lpc_inb(unsigned long port)
{
int64_t rc;
- uint32_t data;
+ __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xffff)
return 0xff;
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 1);
- return rc ? 0xff : data;
+ return rc ? 0xff : be32_to_cpu(data);
}
static __le16 __opal_lpc_inw(unsigned long port)
{
int64_t rc;
- uint32_t data;
+ __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xfffe)
return 0xffff;
if (port & 1)
return (__le16)opal_lpc_inb(port) << 8 | opal_lpc_inb(port + 1);
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 2);
- return rc ? 0xffff : data;
+ return rc ? 0xffff : be32_to_cpu(data);
}
static u16 opal_lpc_inw(unsigned long port)
{
@@ -52,7 +52,7 @@ static u16 opal_lpc_inw(unsigned long port)
static __le32 __opal_lpc_inl(unsigned long port)
{
int64_t rc;
- uint32_t data;
+ __be32 data;
if (opal_lpc_chip_id < 0 || port > 0xfffc)
return 0xffffffff;
@@ -62,7 +62,7 @@ static __le32 __opal_lpc_inl(unsigned long port)
(__le32)opal_lpc_inb(port + 2) << 8 |
opal_lpc_inb(port + 3);
rc = opal_lpc_read(opal_lpc_chip_id, OPAL_LPC_IO, port, &data, 4);
- return rc ? 0xffffffff : data;
+ return rc ? 0xffffffff : be32_to_cpu(data);
}
static u32 opal_lpc_inl(unsigned long port)
diff --git a/arch/powerpc/platforms/powernv/opal-xscom.c b/arch/powerpc/platforms/powernv/opal-xscom.c
index 4d99a8f..4fbf276 100644
--- a/arch/powerpc/platforms/powernv/opal-xscom.c
+++ b/arch/powerpc/platforms/powernv/opal-xscom.c
@@ -96,9 +96,11 @@ static int opal_scom_read(scom_map_t map, u64 reg, u64 *value)
{
struct opal_scom_map *m = map;
int64_t rc;
+ __be64 v;
reg = opal_scom_unmangle(reg);
- rc = opal_xscom_read(m->chip, m->addr + reg, (uint64_t *)__pa(value));
+ rc = opal_xscom_read(m->chip, m->addr + reg, (__be64 *)__pa(&v));
+ *value = be64_to_cpu(v);
return opal_xscom_err_xlate(rc);
}
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 084cdfa..2c6d173 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -720,6 +720,7 @@ static void pnv_pci_ioda2_setup_dma_pe(struct pnv_phb *phb,
tbl->it_type = TCE_PCI_SWINV_CREATE | TCE_PCI_SWINV_FREE;
}
iommu_init_table(tbl, phb->hose->node);
+ iommu_register_group(tbl, pci_domain_nr(pe->pbus), pe->pe_number);
if (pe->pdev)
set_iommu_table_base(&pe->pdev->dev, tbl);
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 911c24e..1ed8d5f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -172,11 +172,13 @@ struct pnv_phb {
} ioda;
};
- /* PHB status structure */
+ /* PHB and hub status structure */
union {
unsigned char blob[PNV_PCI_DIAG_BUF_SIZE];
struct OpalIoP7IOCPhbErrorData p7ioc;
+ struct OpalIoP7IOCErrorData hub_diag;
} diag;
+
};
extern struct pci_ops pnv_pci_ops;
diff --git a/arch/powerpc/platforms/pseries/lparcfg.c b/arch/powerpc/platforms/pseries/lparcfg.c
index e738007..c9fecf0 100644
--- a/arch/powerpc/platforms/pseries/lparcfg.c
+++ b/arch/powerpc/platforms/pseries/lparcfg.c
@@ -157,7 +157,7 @@ static void parse_ppp_data(struct seq_file *m)
{
struct hvcall_ppp_data ppp_data;
struct device_node *root;
- const int *perf_level;
+ const __be32 *perf_level;
int rc;
rc = h_get_ppp(&ppp_data);
@@ -201,7 +201,7 @@ static void parse_ppp_data(struct seq_file *m)
perf_level = of_get_property(root,
"ibm,partition-performance-parameters-level",
NULL);
- if (perf_level && (*perf_level >= 1)) {
+ if (perf_level && (be32_to_cpup(perf_level) >= 1)) {
seq_printf(m,
"physical_procs_allocated_to_virtualization=%d\n",
ppp_data.phys_platform_procs);
@@ -435,7 +435,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
int partition_potential_processors;
int partition_active_processors;
struct device_node *rtas_node;
- const int *lrdrp = NULL;
+ const __be32 *lrdrp = NULL;
rtas_node = of_find_node_by_path("/rtas");
if (rtas_node)
@@ -444,7 +444,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v)
if (lrdrp == NULL) {
partition_potential_processors = vdso_data->processorCount;
} else {
- partition_potential_processors = *(lrdrp + 4);
+ partition_potential_processors = be32_to_cpup(lrdrp + 4);
}
of_node_put(rtas_node);
@@ -654,7 +654,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
const char *model = "";
const char *system_id = "";
const char *tmp;
- const unsigned int *lp_index_ptr;
+ const __be32 *lp_index_ptr;
unsigned int lp_index = 0;
seq_printf(m, "%s %s\n", MODULE_NAME, MODULE_VERS);
@@ -670,7 +670,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
NULL);
if (lp_index_ptr)
- lp_index = *lp_index_ptr;
+ lp_index = be32_to_cpup(lp_index_ptr);
of_node_put(rootdn);
}
seq_printf(m, "serial_number=%s\n", system_id);
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 6d2f0ab..0c882e8 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -130,7 +130,8 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
{
struct device_node *dn;
struct pci_dn *pdn;
- const u32 *req_msi;
+ const __be32 *p;
+ u32 req_msi;
pdn = pci_get_pdn(pdev);
if (!pdn)
@@ -138,19 +139,20 @@ static int check_req(struct pci_dev *pdev, int nvec, char *prop_name)
dn = pdn->node;
- req_msi = of_get_property(dn, prop_name, NULL);
- if (!req_msi) {
+ p = of_get_property(dn, prop_name, NULL);
+ if (!p) {
pr_debug("rtas_msi: No %s on %s\n", prop_name, dn->full_name);
return -ENOENT;
}
- if (*req_msi < nvec) {
+ req_msi = be32_to_cpup(p);
+ if (req_msi < nvec) {
pr_debug("rtas_msi: %s requests < %d MSIs\n", prop_name, nvec);
- if (*req_msi == 0) /* Be paranoid */
+ if (req_msi == 0) /* Be paranoid */
return -ENOSPC;
- return *req_msi;
+ return req_msi;
}
return 0;
@@ -171,7 +173,7 @@ static int check_req_msix(struct pci_dev *pdev, int nvec)
static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
{
struct device_node *dn;
- const u32 *p;
+ const __be32 *p;
dn = of_node_get(pci_device_to_OF_node(dev));
while (dn) {
@@ -179,7 +181,7 @@ static struct device_node *find_pe_total_msi(struct pci_dev *dev, int *total)
if (p) {
pr_debug("rtas_msi: found prop on dn %s\n",
dn->full_name);
- *total = *p;
+ *total = be32_to_cpup(p);
return dn;
}
@@ -232,13 +234,13 @@ struct msi_counts {
static void *count_non_bridge_devices(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
- const u32 *p;
+ const __be32 *p;
u32 class;
pr_debug("rtas_msi: counting %s\n", dn->full_name);
p = of_get_property(dn, "class-code", NULL);
- class = p ? *p : 0;
+ class = p ? be32_to_cpup(p) : 0;
if ((class >> 8) != PCI_CLASS_BRIDGE_PCI)
counts->num_devices++;
@@ -249,7 +251,7 @@ static void *count_non_bridge_devices(struct device_node *dn, void *data)
static void *count_spare_msis(struct device_node *dn, void *data)
{
struct msi_counts *counts = data;
- const u32 *p;
+ const __be32 *p;
int req;
if (dn == counts->requestor)
@@ -260,11 +262,11 @@ static void *count_spare_msis(struct device_node *dn, void *data)
req = 0;
p = of_get_property(dn, "ibm,req#msi", NULL);
if (p)
- req = *p;
+ req = be32_to_cpup(p);
p = of_get_property(dn, "ibm,req#msi-x", NULL);
if (p)
- req = max(req, (int)*p);
+ req = max(req, (int)be32_to_cpup(p));
}
if (req < counts->quota)
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 7bfaf58..d7096f2 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -43,8 +43,8 @@ static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */
static DEFINE_SPINLOCK(nvram_lock);
struct err_log_info {
- int error_type;
- unsigned int seq_num;
+ __be32 error_type;
+ __be32 seq_num;
};
struct nvram_os_partition {
@@ -79,9 +79,9 @@ static const char *pseries_nvram_os_partitions[] = {
};
struct oops_log_info {
- u16 version;
- u16 report_length;
- u64 timestamp;
+ __be16 version;
+ __be16 report_length;
+ __be64 timestamp;
} __attribute__((packed));
static void oops_to_nvram(struct kmsg_dumper *dumper,
@@ -291,8 +291,8 @@ int nvram_write_os_partition(struct nvram_os_partition *part, char * buff,
length = part->size;
}
- info.error_type = err_type;
- info.seq_num = error_log_cnt;
+ info.error_type = cpu_to_be32(err_type);
+ info.seq_num = cpu_to_be32(error_log_cnt);
tmp_index = part->index;
@@ -364,8 +364,8 @@ int nvram_read_partition(struct nvram_os_partition *part, char *buff,
}
if (part->os_partition) {
- *error_log_cnt = info.seq_num;
- *err_type = info.error_type;
+ *error_log_cnt = be32_to_cpu(info.seq_num);
+ *err_type = be32_to_cpu(info.error_type);
}
return 0;
@@ -529,9 +529,9 @@ static int zip_oops(size_t text_len)
pr_err("nvram: logging uncompressed oops/panic report\n");
return -1;
}
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) zipped_len;
- oops_hdr->timestamp = get_seconds();
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(zipped_len);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
return 0;
}
@@ -574,9 +574,9 @@ static int nvram_pstore_write(enum pstore_type_id type,
clobbering_unread_rtas_event())
return -1;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) size;
- oops_hdr->timestamp = get_seconds();
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(size);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
if (compressed)
err_type = ERR_TYPE_KERNEL_PANIC_GZ;
@@ -670,16 +670,16 @@ static ssize_t nvram_pstore_read(u64 *id, enum pstore_type_id *type,
size_t length, hdr_size;
oops_hdr = (struct oops_log_info *)buff;
- if (oops_hdr->version < OOPS_HDR_VERSION) {
+ if (be16_to_cpu(oops_hdr->version) < OOPS_HDR_VERSION) {
/* Old format oops header had 2-byte record size */
hdr_size = sizeof(u16);
- length = oops_hdr->version;
+ length = be16_to_cpu(oops_hdr->version);
time->tv_sec = 0;
time->tv_nsec = 0;
} else {
hdr_size = sizeof(*oops_hdr);
- length = oops_hdr->report_length;
- time->tv_sec = oops_hdr->timestamp;
+ length = be16_to_cpu(oops_hdr->report_length);
+ time->tv_sec = be64_to_cpu(oops_hdr->timestamp);
time->tv_nsec = 0;
}
*buf = kmalloc(length, GFP_KERNEL);
@@ -889,13 +889,13 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
kmsg_dump_get_buffer(dumper, false,
oops_data, oops_data_sz, &text_len);
err_type = ERR_TYPE_KERNEL_PANIC;
- oops_hdr->version = OOPS_HDR_VERSION;
- oops_hdr->report_length = (u16) text_len;
- oops_hdr->timestamp = get_seconds();
+ oops_hdr->version = cpu_to_be16(OOPS_HDR_VERSION);
+ oops_hdr->report_length = cpu_to_be16(text_len);
+ oops_hdr->timestamp = cpu_to_be64(get_seconds());
}
(void) nvram_write_os_partition(&oops_log_partition, oops_buf,
- (int) (sizeof(*oops_hdr) + oops_hdr->report_length), err_type,
+ (int) (sizeof(*oops_hdr) + text_len), err_type,
++oops_count);
spin_unlock_irqrestore(&lock, flags);
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 5f93856..70670a2 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -113,7 +113,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
{
struct device_node *dn, *pdn;
struct pci_bus *bus;
- const uint32_t *pcie_link_speed_stats;
+ const __be32 *pcie_link_speed_stats;
bus = bridge->bus;
@@ -122,7 +122,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
for (pdn = dn; pdn != NULL; pdn = of_get_next_parent(pdn)) {
- pcie_link_speed_stats = (const uint32_t *) of_get_property(pdn,
+ pcie_link_speed_stats = of_get_property(pdn,
"ibm,pcie-link-speed-stats", NULL);
if (pcie_link_speed_stats)
break;
@@ -135,7 +135,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
return 0;
}
- switch (pcie_link_speed_stats[0]) {
+ switch (be32_to_cpup(pcie_link_speed_stats)) {
case 0x01:
bus->max_bus_speed = PCIE_SPEED_2_5GT;
break;
@@ -147,7 +147,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
break;
}
- switch (pcie_link_speed_stats[1]) {
+ switch (be32_to_cpup(pcie_link_speed_stats)) {
case 0x01:
bus->cur_bus_speed = PCIE_SPEED_2_5GT;
break;
diff --git a/arch/powerpc/sysdev/ppc4xx_ocm.c b/arch/powerpc/sysdev/ppc4xx_ocm.c
index b7c4345..85d9e37 100644
--- a/arch/powerpc/sysdev/ppc4xx_ocm.c
+++ b/arch/powerpc/sysdev/ppc4xx_ocm.c
@@ -339,7 +339,7 @@ void *ppc4xx_ocm_alloc(phys_addr_t *phys, int size, int align,
if (IS_ERR_VALUE(offset))
continue;
- ocm_blk = kzalloc(sizeof(struct ocm_block *), GFP_KERNEL);
+ ocm_blk = kzalloc(sizeof(struct ocm_block), GFP_KERNEL);
if (!ocm_blk) {
printk(KERN_ERR "PPC4XX OCM: could not allocate ocm block");
rh_free(ocm_reg->rh, offset);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 5877e71..e9f3125 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -135,7 +135,6 @@ config S390
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_UID16 if 32BIT
select HAVE_VIRT_CPU_ACCOUNTING
- select INIT_ALL_POSSIBLE
select KTIME_SCALAR if 32BIT
select MODULES_USE_ELF_RELA
select OLD_SIGACTION
@@ -347,14 +346,14 @@ config SMP
Even if you don't know what to do here, say Y.
config NR_CPUS
- int "Maximum number of CPUs (2-64)"
- range 2 64
+ int "Maximum number of CPUs (2-256)"
+ range 2 256
depends on SMP
default "32" if !64BIT
default "64" if 64BIT
help
This allows you to specify the maximum number of CPUs which this
- kernel will support. The maximum supported value is 64 and the
+ kernel will support. The maximum supported value is 256 and the
minimum value which makes sense is 2.
This is purely to save memory - each supported CPU adds
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index 30ef748..2f39095 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <asm/chpid.h>
+#include <asm/cpu.h>
#define SCLP_CHP_INFO_MASK_SIZE 32
@@ -37,7 +38,7 @@ struct sclp_cpu_info {
unsigned int standby;
unsigned int combined;
int has_cpu_type;
- struct sclp_cpu_entry cpu[255];
+ struct sclp_cpu_entry cpu[MAX_CPU_ADDRESS + 1];
};
int sclp_get_cpu_info(struct sclp_cpu_info *info);
diff --git a/arch/s390/include/asm/smp.h b/arch/s390/include/asm/smp.h
index ac9bed8..1607793 100644
--- a/arch/s390/include/asm/smp.h
+++ b/arch/s390/include/asm/smp.h
@@ -31,6 +31,7 @@ extern void smp_yield(void);
extern void smp_stop_cpu(void);
extern void smp_cpu_set_polarization(int cpu, int val);
extern int smp_cpu_get_polarization(int cpu);
+extern void smp_fill_possible_mask(void);
#else /* CONFIG_SMP */
@@ -50,6 +51,7 @@ static inline int smp_vcpu_scheduled(int cpu) { return 1; }
static inline void smp_yield_cpu(int cpu) { }
static inline void smp_yield(void) { }
static inline void smp_stop_cpu(void) { }
+static inline void smp_fill_possible_mask(void) { }
#endif /* CONFIG_SMP */
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index 496116c..e4c99a1 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -72,6 +72,7 @@ int main(void)
/* constants used by the vdso */
DEFINE(__CLOCK_REALTIME, CLOCK_REALTIME);
DEFINE(__CLOCK_MONOTONIC, CLOCK_MONOTONIC);
+ DEFINE(__CLOCK_THREAD_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID);
DEFINE(__CLOCK_REALTIME_RES, MONOTONIC_RES_NSEC);
BLANK();
/* idle data offsets */
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 4444875..0f3d44e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -1023,6 +1023,7 @@ void __init setup_arch(char **cmdline_p)
setup_vmcoreinfo();
setup_lowcore();
+ smp_fill_possible_mask();
cpu_init();
s390_init_cpu_topology();
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index dc4a534..9587047 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -721,18 +721,14 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
return 0;
}
-static int __init setup_possible_cpus(char *s)
-{
- int max, cpu;
+static unsigned int setup_possible_cpus __initdata;
- if (kstrtoint(s, 0, &max) < 0)
- return 0;
- init_cpu_possible(cpumask_of(0));
- for (cpu = 1; cpu < max && cpu < nr_cpu_ids; cpu++)
- set_cpu_possible(cpu, true);
+static int __init _setup_possible_cpus(char *s)
+{
+ get_option(&s, &setup_possible_cpus);
return 0;
}
-early_param("possible_cpus", setup_possible_cpus);
+early_param("possible_cpus", _setup_possible_cpus);
#ifdef CONFIG_HOTPLUG_CPU
@@ -775,6 +771,17 @@ void __noreturn cpu_die(void)
#endif /* CONFIG_HOTPLUG_CPU */
+void __init smp_fill_possible_mask(void)
+{
+ unsigned int possible, cpu;
+
+ possible = setup_possible_cpus;
+ if (!possible)
+ possible = MACHINE_IS_VM ? 64 : nr_cpu_ids;
+ for (cpu = 0; cpu < possible && cpu < nr_cpu_ids; cpu++)
+ set_cpu_possible(cpu, true);
+}
+
void __init smp_prepare_cpus(unsigned int max_cpus)
{
/* request the 0x1201 emergency signal external interrupt */
diff --git a/arch/s390/kernel/vdso.c b/arch/s390/kernel/vdso.c
index a84476f..6136490 100644
--- a/arch/s390/kernel/vdso.c
+++ b/arch/s390/kernel/vdso.c
@@ -125,7 +125,7 @@ int vdso_alloc_per_cpu(struct _lowcore *lowcore)
psal[i] = 0x80000000;
lowcore->paste[4] = (u32)(addr_t) psal;
- psal[0] = 0x20000000;
+ psal[0] = 0x02000000;
psal[2] = (u32)(addr_t) aste;
*(unsigned long *) (aste + 2) = segment_table +
_ASCE_TABLE_LENGTH + _ASCE_USER_BITS + _ASCE_TYPE_SEGMENT;
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 5be8e47..65fc397 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -46,18 +46,13 @@ __kernel_clock_gettime:
jnm 3f
a %r0,__VDSO_TK_MULT(%r5)
3: alr %r0,%r2
- al %r0,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
- al %r1,__VDSO_XTIME_NSEC+4(%r5)
- brc 12,4f
- ahi %r0,1
-4: al %r0,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
+ al %r0,__VDSO_WTOM_NSEC(%r5)
al %r1,__VDSO_WTOM_NSEC+4(%r5)
brc 12,5f
ahi %r0,1
5: l %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
srdl %r0,0(%r2) /* >> tk->shift */
- l %r2,__VDSO_XTIME_SEC+4(%r5)
- al %r2,__VDSO_WTOM_SEC+4(%r5)
+ l %r2,__VDSO_WTOM_SEC+4(%r5)
cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
jne 1b
basr %r5,0
diff --git a/arch/s390/kernel/vdso64/clock_getres.S b/arch/s390/kernel/vdso64/clock_getres.S
index 176e1f7..34deba7 100644
--- a/arch/s390/kernel/vdso64/clock_getres.S
+++ b/arch/s390/kernel/vdso64/clock_getres.S
@@ -23,7 +23,9 @@ __kernel_clock_getres:
je 0f
cghi %r2,__CLOCK_MONOTONIC
je 0f
- cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ cghi %r2,__CLOCK_THREAD_CPUTIME_ID
+ je 0f
+ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
jne 2f
larl %r5,_vdso_data
icm %r0,15,__LC_ECTG_OK(%r5)
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 0add107..91940ed 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -22,7 +22,9 @@ __kernel_clock_gettime:
larl %r5,_vdso_data
cghi %r2,__CLOCK_REALTIME
je 4f
- cghi %r2,-2 /* CLOCK_THREAD_CPUTIME_ID for this thread */
+ cghi %r2,__CLOCK_THREAD_CPUTIME_ID
+ je 9f
+ cghi %r2,-2 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
je 9f
cghi %r2,__CLOCK_MONOTONIC
jne 12f
@@ -35,13 +37,11 @@ __kernel_clock_gettime:
jnz 0b
stck 48(%r15) /* Store TOD clock */
lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
- lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
- alg %r0,__VDSO_WTOM_SEC(%r5) /* + wall_to_monotonic.sec */
+ lg %r0,__VDSO_WTOM_SEC(%r5)
lg %r1,48(%r15)
sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
- alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
- alg %r1,__VDSO_WTOM_NSEC(%r5) /* + wall_to_monotonic.nsec */
+ alg %r1,__VDSO_WTOM_NSEC(%r5)
srlg %r1,%r1,0(%r2) /* >> tk->shift */
clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
jne 0b
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
index 800f064..0696072 100644
--- a/arch/s390/pci/pci_event.c
+++ b/arch/s390/pci/pci_event.c
@@ -75,6 +75,7 @@ void zpci_event_availability(void *data)
if (!zdev || zdev->state == ZPCI_FN_STATE_CONFIGURED)
break;
zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ zdev->fh = ccdf->fh;
ret = zpci_enable_device(zdev);
if (ret)
break;
@@ -101,6 +102,7 @@ void zpci_event_availability(void *data)
if (pdev)
pci_stop_and_remove_bus_device(pdev);
+ zdev->fh = ccdf->fh;
zpci_disable_device(zdev);
zdev->state = ZPCI_FN_STATE_STANDBY;
break;
diff --git a/arch/sh/kernel/sh_ksyms_32.c b/arch/sh/kernel/sh_ksyms_32.c
index 2a0a596..d77f2f6 100644
--- a/arch/sh/kernel/sh_ksyms_32.c
+++ b/arch/sh/kernel/sh_ksyms_32.c
@@ -20,6 +20,11 @@ EXPORT_SYMBOL(csum_partial_copy_generic);
EXPORT_SYMBOL(copy_page);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_FLATMEM
+/* need in pfn_valid macro */
+EXPORT_SYMBOL(min_low_pfn);
+EXPORT_SYMBOL(max_low_pfn);
+#endif
#define DECLARE_EXPORT(name) \
extern void name(void);EXPORT_SYMBOL(name)
diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile
index 7b95f29..3baff31 100644
--- a/arch/sh/lib/Makefile
+++ b/arch/sh/lib/Makefile
@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \
checksum.o strlen.o div64.o div64-generic.o
# Extracted from libgcc
-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
+obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \
ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \
udiv_qrnnd.o
diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h
index 8358dc1..0f9e945 100644
--- a/arch/sparc/include/asm/pgtable_64.h
+++ b/arch/sparc/include/asm/pgtable_64.h
@@ -619,7 +619,7 @@ static inline unsigned long pte_present(pte_t pte)
}
#define pte_accessible pte_accessible
-static inline unsigned long pte_accessible(pte_t a)
+static inline unsigned long pte_accessible(struct mm_struct *mm, pte_t a)
{
return pte_val(a) & _PAGE_VALID;
}
@@ -847,7 +847,7 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
* SUN4V NOTE: _PAGE_VALID is the same value in both the SUN4U
* and SUN4V pte layout, so this inline test is fine.
*/
- if (likely(mm != &init_mm) && pte_accessible(orig))
+ if (likely(mm != &init_mm) && pte_accessible(mm, orig))
tlb_batch_add(mm, addr, ptep, orig, fullmm);
}
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index e562d3c..ad7e178 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -262,8 +262,8 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
extern __must_check long strlen_user(const char __user *str);
extern __must_check long strnlen_user(const char __user *str, long n);
-#define __copy_to_user_inatomic ___copy_to_user
-#define __copy_from_user_inatomic ___copy_from_user
+#define __copy_to_user_inatomic __copy_to_user
+#define __copy_from_user_inatomic __copy_from_user
struct pt_regs;
extern unsigned long compute_effective_address(struct pt_regs *,
diff --git a/arch/sparc/kernel/iommu.c b/arch/sparc/kernel/iommu.c
index 070ed14..76663b0 100644
--- a/arch/sparc/kernel/iommu.c
+++ b/arch/sparc/kernel/iommu.c
@@ -854,7 +854,7 @@ int dma_supported(struct device *dev, u64 device_mask)
return 1;
#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
+ if (dev_is_pci(dev))
return pci64_dma_supported(to_pci_dev(dev), device_mask);
#endif
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index 2096468..e7e215d 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -666,10 +666,9 @@ EXPORT_SYMBOL(dma_ops);
*/
int dma_supported(struct device *dev, u64 mask)
{
-#ifdef CONFIG_PCI
- if (dev->bus == &pci_bus_type)
+ if (dev_is_pci(dev))
return 1;
-#endif
+
return 0;
}
EXPORT_SYMBOL(dma_supported);
diff --git a/arch/sparc/kernel/kgdb_64.c b/arch/sparc/kernel/kgdb_64.c
index 60b19f5..b45fe3f 100644
--- a/arch/sparc/kernel/kgdb_64.c
+++ b/arch/sparc/kernel/kgdb_64.c
@@ -6,6 +6,7 @@
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <linux/ftrace.h>
+#include <linux/context_tracking.h>
#include <asm/cacheflush.h>
#include <asm/kdebug.h>
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index b66a533..b085311 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -123,11 +123,12 @@ void smp_callin(void)
rmb();
set_cpu_online(cpuid, true);
- local_irq_enable();
/* idle thread is expected to have preempt disabled */
preempt_disable();
+ local_irq_enable();
+
cpu_startup_entry(CPUHP_ONLINE);
}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index e903c71..0952ecd 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -26,6 +26,7 @@ config X86
select HAVE_AOUT if X86_32
select HAVE_UNSTABLE_SCHED_CLOCK
select ARCH_SUPPORTS_NUMA_BALANCING
+ select ARCH_SUPPORTS_INT128 if X86_64
select ARCH_WANTS_PROT_NUMA_PROT_NONE
select HAVE_IDE
select HAVE_OPROFILE
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index eda00f9..57d0215 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -31,8 +31,8 @@ ifeq ($(CONFIG_X86_32),y)
KBUILD_CFLAGS += -msoft-float -mregparm=3 -freg-struct-return
- # Don't autogenerate SSE instructions
- KBUILD_CFLAGS += -mno-sse
+ # Don't autogenerate MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
# Never want PIC in a 32-bit kernel, prevent breakage with GCC built
# with nonstandard options
@@ -60,8 +60,8 @@ else
KBUILD_AFLAGS += -m64
KBUILD_CFLAGS += -m64
- # Don't autogenerate SSE instructions
- KBUILD_CFLAGS += -mno-sse
+ # Don't autogenerate MMX or SSE instructions
+ KBUILD_CFLAGS += -mno-mmx -mno-sse
# Use -mpreferred-stack-boundary=3 if supported.
KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
index dce69a2..d9c1195 100644
--- a/arch/x86/boot/Makefile
+++ b/arch/x86/boot/Makefile
@@ -53,18 +53,18 @@ $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
# How to compile the 16-bit code. Note we always compile for -march=i386,
# that way we can complain to the user if the CPU is insufficient.
-KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
+KBUILD_CFLAGS := $(USERINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ \
-DDISABLE_BRANCH_PROFILING \
-Wall -Wstrict-prototypes \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
-KBUILD_CFLAGS += $(call cc-option, -m32)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
GCOV_PROFILE := n
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index dcd90df..c8a6792 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -13,6 +13,7 @@ KBUILD_CFLAGS += -DDISABLE_BRANCH_PROFILING
cflags-$(CONFIG_X86_32) := -march=i386
cflags-$(CONFIG_X86_64) := -mcmodel=small
KBUILD_CFLAGS += $(cflags-y)
+KBUILD_CFLAGS += -mno-mmx -mno-sse
KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
index 3d19994..bbc8b12 100644
--- a/arch/x86/include/asm/pgtable.h
+++ b/arch/x86/include/asm/pgtable.h
@@ -452,9 +452,16 @@ static inline int pte_present(pte_t a)
}
#define pte_accessible pte_accessible
-static inline int pte_accessible(pte_t a)
+static inline bool pte_accessible(struct mm_struct *mm, pte_t a)
{
- return pte_flags(a) & _PAGE_PRESENT;
+ if (pte_flags(a) & _PAGE_PRESENT)
+ return true;
+
+ if ((pte_flags(a) & (_PAGE_PROTNONE | _PAGE_NUMA)) &&
+ mm_tlb_flush_pending(mm))
+ return true;
+
+ return false;
}
static inline int pte_hidden(pte_t pte)
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 8729723..c8b0519 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -8,6 +8,12 @@
DECLARE_PER_CPU(int, __preempt_count);
/*
+ * We use the PREEMPT_NEED_RESCHED bit as an inverted NEED_RESCHED such
+ * that a decrement hitting 0 means we can and should reschedule.
+ */
+#define PREEMPT_ENABLED (0 + PREEMPT_NEED_RESCHED)
+
+/*
* We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
* that think a non-zero value indicates we cannot preempt.
*/
@@ -74,6 +80,11 @@ static __always_inline void __preempt_count_sub(int val)
__this_cpu_add_4(__preempt_count, -val);
}
+/*
+ * Because we keep PREEMPT_NEED_RESCHED set when we do _not_ need to reschedule
+ * a decrement which hits zero means we have no preempt_count and should
+ * reschedule.
+ */
static __always_inline bool __preempt_count_dec_and_test(void)
{
GEN_UNARY_RMWcc("decl", __preempt_count, __percpu_arg(0), "e");
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index dc1ec0d..ea04b34 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -387,7 +387,8 @@ static void init_intel(struct cpuinfo_x86 *c)
set_cpu_cap(c, X86_FEATURE_PEBS);
}
- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
+ if (c->x86 == 6 && cpu_has_clflush &&
+ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
#ifdef CONFIG_X86_64
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index fd00bb2..c1a8618 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -262,11 +262,20 @@ struct cpu_hw_events {
__EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK, \
HWEIGHT(n), 0, PERF_X86_EVENT_PEBS_ST_HSW)
-#define EVENT_CONSTRAINT_END \
- EVENT_CONSTRAINT(0, 0, 0)
+/*
+ * We define the end marker as having a weight of -1
+ * to enable blacklisting of events using a counter bitmask
+ * of zero and thus a weight of zero.
+ * The end marker has a weight that cannot possibly be
+ * obtained from counting the bits in the bitmask.
+ */
+#define EVENT_CONSTRAINT_END { .weight = -1 }
+/*
+ * Check for end marker with weight == -1
+ */
#define for_each_event_constraint(e, c) \
- for ((e) = (c); (e)->weight; (e)++)
+ for ((e) = (c); (e)->weight != -1; (e)++)
/*
* Extra registers for specific events.
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5439117..1673940 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -143,6 +143,8 @@ static inline int kvm_apic_id(struct kvm_lapic *apic)
return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
}
+#define KVM_X2APIC_CID_BITS 0
+
static void recalculate_apic_map(struct kvm *kvm)
{
struct kvm_apic_map *new, *old = NULL;
@@ -180,7 +182,8 @@ static void recalculate_apic_map(struct kvm *kvm)
if (apic_x2apic_mode(apic)) {
new->ldr_bits = 32;
new->cid_shift = 16;
- new->cid_mask = new->lid_mask = 0xffff;
+ new->cid_mask = (1 << KVM_X2APIC_CID_BITS) - 1;
+ new->lid_mask = 0xffff;
} else if (kvm_apic_sw_enabled(apic) &&
!new->cid_mask /* flat mode */ &&
kvm_apic_get_reg(apic, APIC_DFR) == APIC_DFR_CLUSTER) {
@@ -841,7 +844,8 @@ static u32 apic_get_tmcct(struct kvm_lapic *apic)
ASSERT(apic != NULL);
/* if initial count is 0, current count should also be 0 */
- if (kvm_apic_get_reg(apic, APIC_TMICT) == 0)
+ if (kvm_apic_get_reg(apic, APIC_TMICT) == 0 ||
+ apic->lapic_timer.period == 0)
return 0;
remaining = hrtimer_get_remaining(&apic->lapic_timer.timer);
@@ -1346,6 +1350,10 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
return;
}
+ if (!kvm_vcpu_is_bsp(apic->vcpu))
+ value &= ~MSR_IA32_APICBASE_BSP;
+ vcpu->arch.apic_base = value;
+
/* update jump label if enable bit changes */
if ((vcpu->arch.apic_base ^ value) & MSR_IA32_APICBASE_ENABLE) {
if (value & MSR_IA32_APICBASE_ENABLE)
@@ -1355,10 +1363,6 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
recalculate_apic_map(vcpu->kvm);
}
- if (!kvm_vcpu_is_bsp(apic->vcpu))
- value &= ~MSR_IA32_APICBASE_BSP;
-
- vcpu->arch.apic_base = value;
if ((old_value ^ value) & X2APIC_ENABLE) {
if (value & X2APIC_ENABLE) {
u32 id = kvm_apic_id(apic);
@@ -1691,7 +1695,6 @@ static void apic_sync_pv_eoi_from_guest(struct kvm_vcpu *vcpu,
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
{
u32 data;
- void *vapic;
if (test_bit(KVM_APIC_PV_EOI_PENDING, &vcpu->arch.apic_attention))
apic_sync_pv_eoi_from_guest(vcpu, vcpu->arch.apic);
@@ -1699,9 +1702,8 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
return;
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
- kunmap_atomic(vapic);
+ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
apic_set_tpr(vcpu->arch.apic, data & 0xff);
}
@@ -1737,7 +1739,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
u32 data, tpr;
int max_irr, max_isr;
struct kvm_lapic *apic = vcpu->arch.apic;
- void *vapic;
apic_sync_pv_eoi_to_guest(vcpu, apic);
@@ -1753,18 +1754,24 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
max_isr = 0;
data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
- vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
- kunmap_atomic(vapic);
+ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data,
+ sizeof(u32));
}
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
{
- vcpu->arch.apic->vapic_addr = vapic_addr;
- if (vapic_addr)
+ if (vapic_addr) {
+ if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+ &vcpu->arch.apic->vapic_cache,
+ vapic_addr, sizeof(u32)))
+ return -EINVAL;
__set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
- else
+ } else {
__clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
+ }
+
+ vcpu->arch.apic->vapic_addr = vapic_addr;
+ return 0;
}
int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index c730ac9..c8b0d0d 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -34,7 +34,7 @@ struct kvm_lapic {
*/
void *regs;
gpa_t vapic_addr;
- struct page *vapic_page;
+ struct gfn_to_hva_cache vapic_cache;
unsigned long pending_events;
unsigned int sipi_vector;
};
@@ -76,7 +76,7 @@ void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data);
void kvm_apic_write_nodecode(struct kvm_vcpu *vcpu, u32 offset);
void kvm_apic_set_eoi_accelerated(struct kvm_vcpu *vcpu, int vector);
-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
+int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr);
void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu);
void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b2fe1c2..da7837e 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8283,8 +8283,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
vcpu->arch.cr4_guest_owned_bits = ~vmcs_readl(CR4_GUEST_HOST_MASK);
kvm_set_cr4(vcpu, vmcs12->host_cr4);
- if (nested_cpu_has_ept(vmcs12))
- nested_ept_uninit_mmu_context(vcpu);
+ nested_ept_uninit_mmu_context(vcpu);
kvm_set_cr3(vcpu, vmcs12->host_cr3);
kvm_mmu_reset_context(vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 21ef1ba..5d004da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3214,8 +3214,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
r = -EFAULT;
if (copy_from_user(&va, argp, sizeof va))
goto out;
- r = 0;
- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
+ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr);
break;
}
case KVM_X86_SETUP_MCE: {
@@ -5739,36 +5738,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
!kvm_event_needs_reinjection(vcpu);
}
-static int vapic_enter(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- struct page *page;
-
- if (!apic || !apic->vapic_addr)
- return 0;
-
- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- if (is_error_page(page))
- return -EFAULT;
-
- vcpu->arch.apic->vapic_page = page;
- return 0;
-}
-
-static void vapic_exit(struct kvm_vcpu *vcpu)
-{
- struct kvm_lapic *apic = vcpu->arch.apic;
- int idx;
-
- if (!apic || !apic->vapic_addr)
- return;
-
- idx = srcu_read_lock(&vcpu->kvm->srcu);
- kvm_release_page_dirty(apic->vapic_page);
- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
- srcu_read_unlock(&vcpu->kvm->srcu, idx);
-}
-
static void update_cr8_intercept(struct kvm_vcpu *vcpu)
{
int max_irr, tpr;
@@ -6069,11 +6038,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
struct kvm *kvm = vcpu->kvm;
vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
- r = vapic_enter(vcpu);
- if (r) {
- srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- return r;
- }
r = 1;
while (r > 0) {
@@ -6132,8 +6096,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
- vapic_exit(vcpu);
-
return r;
}
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index dd74e46..0596e8e 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -83,6 +83,12 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
pte_t pte = gup_get_pte(ptep);
struct page *page;
+ /* Similar to the PMD case, NUMA hinting must take slow path */
+ if (pte_numa(pte)) {
+ pte_unmap(ptep);
+ return 0;
+ }
+
if ((pte_flags(pte) & (mask | _PAGE_SPECIAL)) != mask) {
pte_unmap(ptep);
return 0;
@@ -167,6 +173,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
if (pmd_none(pmd) || pmd_trans_splitting(pmd))
return 0;
if (unlikely(pmd_large(pmd))) {
+ /*
+ * NUMA hinting faults need to be handled in the GUP
+ * slowpath for accounting purposes and so that they
+ * can be serialised against THP migration.
+ */
+ if (pmd_numa(pmd))
+ return 0;
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
return 0;
} else {
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 92c0234..cceb813 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -690,13 +690,6 @@ void __init efi_init(void)
set_bit(EFI_MEMMAP, &x86_efi_facility);
-#ifdef CONFIG_X86_32
- if (efi_is_native()) {
- x86_platform.get_wallclock = efi_get_time;
- x86_platform.set_wallclock = efi_set_rtc_mmss;
- }
-#endif
-
#if EFI_DEBUG
print_efi_memmap();
#endif
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 0f92173..efe4d72 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1070,12 +1070,13 @@ const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
unsigned long status;
bcp = &per_cpu(bau_control, cpu);
- stat = bcp->statp;
- stat->s_enters++;
if (bcp->nobau)
return cpumask;
+ stat = bcp->statp;
+ stat->s_enters++;
+
if (bcp->busy) {
descriptor_status =
read_lmmr(UVH_LB_BAU_SB_ACTIVATION_STATUS_0);
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
index 8869287..9cac825 100644
--- a/arch/x86/realmode/rm/Makefile
+++ b/arch/x86/realmode/rm/Makefile
@@ -73,9 +73,10 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
-march=i386 -mregparm=3 \
-include $(srctree)/$(src)/../../boot/code16gcc.h \
-fno-strict-aliasing -fomit-frame-pointer -fno-pic \
+ -mno-mmx -mno-sse \
$(call cc-option, -ffreestanding) \
$(call cc-option, -fno-toplevel-reorder,\
- $(call cc-option, -fno-unit-at-a-time)) \
+ $(call cc-option, -fno-unit-at-a-time)) \
$(call cc-option, -fno-stack-protector) \
$(call cc-option, -mpreferred-stack-boundary=2)
KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
diff --git a/block/blk-mq-sysfs.c b/block/blk-mq-sysfs.c
index ba6cf8e..b91ce75 100644
--- a/block/blk-mq-sysfs.c
+++ b/block/blk-mq-sysfs.c
@@ -335,9 +335,22 @@ static struct kobj_type blk_mq_hw_ktype = {
void blk_mq_unregister_disk(struct gendisk *disk)
{
struct request_queue *q = disk->queue;
+ struct blk_mq_hw_ctx *hctx;
+ struct blk_mq_ctx *ctx;
+ int i, j;
+
+ queue_for_each_hw_ctx(q, hctx, i) {
+ hctx_for_each_ctx(hctx, ctx, j) {
+ kobject_del(&ctx->kobj);
+ kobject_put(&ctx->kobj);
+ }
+ kobject_del(&hctx->kobj);
+ kobject_put(&hctx->kobj);
+ }
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj);
+ kobject_put(&q->mq_kobj);
kobject_put(&disk_to_dev(disk)->kobj);
}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 5d92485..4770de5 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -348,7 +348,6 @@ source "drivers/acpi/apei/Kconfig"
config ACPI_EXTLOG
tristate "Extended Error Log support"
depends on X86_MCE && X86_LOCAL_APIC
- select EFI
select UEFI_CPER
default n
help
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 6745fe1..e603905 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -162,6 +162,7 @@ static const struct acpi_device_id acpi_lpss_device_ids[] = {
{ "80860F14", (unsigned long)&byt_sdio_dev_desc },
{ "80860F41", (unsigned long)&byt_i2c_dev_desc },
{ "INT33B2", },
+ { "INT33FC", },
{ "INT3430", (unsigned long)&lpt_dev_desc },
{ "INT3431", (unsigned long)&lpt_dev_desc },
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index 786294b..3650b21 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -2,7 +2,6 @@ config ACPI_APEI
bool "ACPI Platform Error Interface (APEI)"
select MISC_FILESYSTEMS
select PSTORE
- select EFI
select UEFI_CPER
depends on X86
help
diff --git a/drivers/acpi/apei/erst.c b/drivers/acpi/apei/erst.c
index 26311f2..cb1d557 100644
--- a/drivers/acpi/apei/erst.c
+++ b/drivers/acpi/apei/erst.c
@@ -942,6 +942,7 @@ static int erst_clearer(enum pstore_type_id type, u64 id, int count,
static struct pstore_info erst_info = {
.owner = THIS_MODULE,
.name = "erst",
+ .flags = PSTORE_FLAGS_FRAGILE,
.open = erst_open_pstore,
.close = erst_close_pstore,
.read = erst_reader,
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index bba9b72..0710004 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -156,6 +156,16 @@ int acpi_bus_get_private_data(acpi_handle handle, void **data)
}
EXPORT_SYMBOL(acpi_bus_get_private_data);
+void acpi_bus_no_hotplug(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ if (adev)
+ adev->flags.no_hotplug = true;
+}
+EXPORT_SYMBOL_GPL(acpi_bus_no_hotplug);
+
static void acpi_print_osc_error(acpi_handle handle,
struct acpi_osc_context *context, char *error)
{
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 14f1e95..e3a92a6 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -427,6 +427,9 @@ static const struct pci_device_id ahci_pci_tbl[] = {
.driver_data = board_ahci_yes_fbs }, /* 88se9128 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125),
.driver_data = board_ahci_yes_fbs }, /* 88se9125 */
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178,
+ PCI_VENDOR_ID_MARVELL_EXT, 0x9170),
+ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a),
.driver_data = board_ahci_yes_fbs }, /* 88se9172 */
{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172),
@@ -1238,15 +1241,6 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (rc)
return rc;
- /* AHCI controllers often implement SFF compatible interface.
- * Grab all PCI BARs just in case.
- */
- rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
- if (rc == -EBUSY)
- pcim_pin_device(pdev);
- if (rc)
- return rc;
-
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == 0x2652 || pdev->device == 0x2653)) {
u8 map;
@@ -1263,6 +1257,15 @@ static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
}
+ /* AHCI controllers often implement SFF compatible interface.
+ * Grab all PCI BARs just in case.
+ */
+ rc = pcim_iomap_regions_request_all(pdev, 1 << ahci_pci_bar, DRV_NAME);
+ if (rc == -EBUSY)
+ pcim_pin_device(pdev);
+ if (rc)
+ return rc;
+
hpriv = devm_kzalloc(dev, sizeof(*hpriv), GFP_KERNEL);
if (!hpriv)
return -ENOMEM;
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index ae2d73f..3e23e99 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -113,7 +113,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
/*
* set PHY Paremeters, two steps to configure the GPR13,
* one write for rest of parameters, mask of first write
- * is 0x07fffffd, and the other one write for setting
+ * is 0x07ffffff, and the other one write for setting
* the mpll_clk_en.
*/
regmap_update_bits(imxpriv->gpr, 0x34, IMX6Q_GPR13_SATA_RX_EQ_VAL_MASK
@@ -124,6 +124,7 @@ static int imx6q_sata_init(struct device *dev, void __iomem *mmio)
| IMX6Q_GPR13_SATA_TX_ATTEN_MASK
| IMX6Q_GPR13_SATA_TX_BOOST_MASK
| IMX6Q_GPR13_SATA_TX_LVL_MASK
+ | IMX6Q_GPR13_SATA_MPLL_CLK_EN
| IMX6Q_GPR13_SATA_TX_EDGE_RATE
, IMX6Q_GPR13_SATA_RX_EQ_VAL_3_0_DB
| IMX6Q_GPR13_SATA_RX_LOS_LVL_SATA2M
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index 75b9367..1393a58 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2149,9 +2149,16 @@ static int ata_dev_config_ncq(struct ata_device *dev,
"failed to get NCQ Send/Recv Log Emask 0x%x\n",
err_mask);
} else {
+ u8 *cmds = dev->ncq_send_recv_cmds;
+
dev->flags |= ATA_DFLAG_NCQ_SEND_RECV;
- memcpy(dev->ncq_send_recv_cmds, ap->sector_buf,
- ATA_LOG_NCQ_SEND_RECV_SIZE);
+ memcpy(cmds, ap->sector_buf, ATA_LOG_NCQ_SEND_RECV_SIZE);
+
+ if (dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) {
+ ata_dev_dbg(dev, "disabling queued TRIM support\n");
+ cmds[ATA_LOG_NCQ_SEND_RECV_DSM_OFFSET] &=
+ ~ATA_LOG_NCQ_SEND_RECV_DSM_TRIM;
+ }
}
}
@@ -4156,6 +4163,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
ATA_HORKAGE_FIRMWARE_WARN },
+ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */
+ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
+
/* Blacklist entries taken from Silicon Image 3124/3132
Windows driver .inf file - also several Linux problem reports */
{ "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
@@ -4202,6 +4212,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
{ "PIONEER DVD-RW DVR-212D", NULL, ATA_HORKAGE_NOSETXFER },
{ "PIONEER DVD-RW DVR-216D", NULL, ATA_HORKAGE_NOSETXFER },
+ /* devices that don't properly handle queued TRIM commands */
+ { "Micron_M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+ { "Crucial_CT???M500SSD1", NULL, ATA_HORKAGE_NO_NCQ_TRIM, },
+
/* End Marker */
{ }
};
@@ -6519,6 +6533,7 @@ static int __init ata_parse_force_one(char **cur,
{ "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
{ "rstonce", .lflags = ATA_LFLAG_RST_ONCE },
{ "atapi_dmadir", .horkage_on = ATA_HORKAGE_ATAPI_DMADIR },
+ { "disable", .horkage_on = ATA_HORKAGE_DISABLE },
};
char *start = *cur, *p = *cur;
char *id, *val, *endp;
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index ab58556..377eb88 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -3872,6 +3872,27 @@ void ata_scsi_hotplug(struct work_struct *work)
return;
}
+ /*
+ * XXX - UGLY HACK
+ *
+ * The block layer suspend/resume path is fundamentally broken due
+ * to freezable kthreads and workqueue and may deadlock if a block
+ * device gets removed while resume is in progress. I don't know
+ * what the solution is short of removing freezable kthreads and
+ * workqueues altogether.
+ *
+ * The following is an ugly hack to avoid kicking off device
+ * removal while freezer is active. This is a joke but does avoid
+ * this particular deadlock scenario.
+ *
+ * https://bugzilla.kernel.org/show_bug.cgi?id=62801
+ * http://marc.info/?l=linux-kernel&m=138695698516487
+ */
+#ifdef CONFIG_FREEZER
+ while (pm_freezing)
+ msleep(10);
+#endif
+
DPRINTK("ENTER\n");
mutex_lock(&ap->scsi_scan_mutex);
diff --git a/drivers/ata/sata_sis.c b/drivers/ata/sata_sis.c
index fe3ca09..1ad2f62 100644
--- a/drivers/ata/sata_sis.c
+++ b/drivers/ata/sata_sis.c
@@ -83,6 +83,10 @@ static struct pci_driver sis_pci_driver = {
.id_table = sis_pci_tbl,
.probe = sis_init_one,
.remove = ata_pci_remove_one,
+#ifdef CONFIG_PM
+ .suspend = ata_pci_device_suspend,
+ .resume = ata_pci_device_resume,
+#endif
};
static struct scsi_host_template sis_sht = {
diff --git a/drivers/base/regmap/regmap-mmio.c b/drivers/base/regmap/regmap-mmio.c
index 98745dd..81f9775 100644
--- a/drivers/base/regmap/regmap-mmio.c
+++ b/drivers/base/regmap/regmap-mmio.c
@@ -40,7 +40,7 @@ static int regmap_mmio_gather_write(void *context,
BUG_ON(reg_size != 4);
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
@@ -73,7 +73,7 @@ static int regmap_mmio_gather_write(void *context,
offset += ctx->val_bytes;
}
- if (ctx->clk)
+ if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
@@ -96,7 +96,7 @@ static int regmap_mmio_read(void *context,
BUG_ON(reg_size != 4);
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
ret = clk_enable(ctx->clk);
if (ret < 0)
return ret;
@@ -129,7 +129,7 @@ static int regmap_mmio_read(void *context,
offset += ctx->val_bytes;
}
- if (ctx->clk)
+ if (!IS_ERR(ctx->clk))
clk_disable(ctx->clk);
return 0;
@@ -139,7 +139,7 @@ static void regmap_mmio_free_context(void *context)
{
struct regmap_mmio_context *ctx = context;
- if (ctx->clk) {
+ if (!IS_ERR(ctx->clk)) {
clk_unprepare(ctx->clk);
clk_put(ctx->clk);
}
@@ -209,6 +209,7 @@ static struct regmap_mmio_context *regmap_mmio_gen_context(struct device *dev,
ctx->regs = regs;
ctx->val_bytes = config->val_bits / 8;
+ ctx->clk = ERR_PTR(-ENODEV);
if (clk_id == NULL)
return ctx;
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 9c021d9..c2e0021 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -1549,7 +1549,7 @@ int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
val + (i * val_bytes),
val_bytes);
if (ret != 0)
- return ret;
+ goto out;
}
} else {
ret = _regmap_raw_write(map, reg, wval, val_bytes * val_count);
@@ -1743,7 +1743,7 @@ static int _regmap_read(struct regmap *map, unsigned int reg,
/**
* regmap_read(): Read a value from a single register
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: Register to be read from
* @val: Pointer to store read value
*
@@ -1770,7 +1770,7 @@ EXPORT_SYMBOL_GPL(regmap_read);
/**
* regmap_raw_read(): Read raw data from the device
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value
* @val_len: Size of data to read
@@ -1882,7 +1882,7 @@ EXPORT_SYMBOL_GPL(regmap_fields_read);
/**
* regmap_bulk_read(): Read multiple registers from the device
*
- * @map: Register map to write to
+ * @map: Register map to read from
* @reg: First register to be read from
* @val: Pointer to store read value, in native register size for device
* @val_count: Number of registers to read
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index ea192ec..a2e69d2 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1,4 +1,5 @@
#include <linux/module.h>
+
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/fs.h>
@@ -65,7 +66,7 @@ enum {
NULL_Q_MQ = 2,
};
-static int submit_queues = 1;
+static int submit_queues;
module_param(submit_queues, int, S_IRUGO);
MODULE_PARM_DESC(submit_queues, "Number of submission queues");
@@ -101,9 +102,9 @@ static int hw_queue_depth = 64;
module_param(hw_queue_depth, int, S_IRUGO);
MODULE_PARM_DESC(hw_queue_depth, "Queue depth for each hardware queue. Default: 64");
-static bool use_per_node_hctx = true;
+static bool use_per_node_hctx = false;
module_param(use_per_node_hctx, bool, S_IRUGO);
-MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: true");
+MODULE_PARM_DESC(use_per_node_hctx, "Use per-node allocation for hardware context queues. Default: false");
static void put_tag(struct nullb_queue *nq, unsigned int tag)
{
@@ -346,8 +347,37 @@ static int null_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *rq)
static struct blk_mq_hw_ctx *null_alloc_hctx(struct blk_mq_reg *reg, unsigned int hctx_index)
{
- return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL,
- hctx_index);
+ int b_size = DIV_ROUND_UP(reg->nr_hw_queues, nr_online_nodes);
+ int tip = (reg->nr_hw_queues % nr_online_nodes);
+ int node = 0, i, n;
+
+ /*
+ * Split submit queues evenly wrt to the number of nodes. If uneven,
+ * fill the first buckets with one extra, until the rest is filled with
+ * no extra.
+ */
+ for (i = 0, n = 1; i < hctx_index; i++, n++) {
+ if (n % b_size == 0) {
+ n = 0;
+ node++;
+
+ tip--;
+ if (!tip)
+ b_size = reg->nr_hw_queues / nr_online_nodes;
+ }
+ }
+
+ /*
+ * A node might not be online, therefore map the relative node id to the
+ * real node id.
+ */
+ for_each_online_node(n) {
+ if (!node)
+ break;
+ node--;
+ }
+
+ return kzalloc_node(sizeof(struct blk_mq_hw_ctx), GFP_KERNEL, n);
}
static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
@@ -355,16 +385,24 @@ static void null_free_hctx(struct blk_mq_hw_ctx *hctx, unsigned int hctx_index)
kfree(hctx);
}
+static void null_init_queue(struct nullb *nullb, struct nullb_queue *nq)
+{
+ BUG_ON(!nullb);
+ BUG_ON(!nq);
+
+ init_waitqueue_head(&nq->wait);
+ nq->queue_depth = nullb->queue_depth;
+}
+
static int null_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
unsigned int index)
{
struct nullb *nullb = data;
struct nullb_queue *nq = &nullb->queues[index];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = nullb->queue_depth;
- nullb->nr_queues++;
hctx->driver_data = nq;
+ null_init_queue(nullb, nq);
+ nullb->nr_queues++;
return 0;
}
@@ -417,13 +455,13 @@ static int setup_commands(struct nullb_queue *nq)
nq->cmds = kzalloc(nq->queue_depth * sizeof(*cmd), GFP_KERNEL);
if (!nq->cmds)
- return 1;
+ return -ENOMEM;
tag_size = ALIGN(nq->queue_depth, BITS_PER_LONG) / BITS_PER_LONG;
nq->tag_map = kzalloc(tag_size * sizeof(unsigned long), GFP_KERNEL);
if (!nq->tag_map) {
kfree(nq->cmds);
- return 1;
+ return -ENOMEM;
}
for (i = 0; i < nq->queue_depth; i++) {
@@ -454,33 +492,37 @@ static void cleanup_queues(struct nullb *nullb)
static int setup_queues(struct nullb *nullb)
{
- struct nullb_queue *nq;
- int i;
-
- nullb->queues = kzalloc(submit_queues * sizeof(*nq), GFP_KERNEL);
+ nullb->queues = kzalloc(submit_queues * sizeof(struct nullb_queue),
+ GFP_KERNEL);
if (!nullb->queues)
- return 1;
+ return -ENOMEM;
nullb->nr_queues = 0;
nullb->queue_depth = hw_queue_depth;
- if (queue_mode == NULL_Q_MQ)
- return 0;
+ return 0;
+}
+
+static int init_driver_queues(struct nullb *nullb)
+{
+ struct nullb_queue *nq;
+ int i, ret = 0;
for (i = 0; i < submit_queues; i++) {
nq = &nullb->queues[i];
- init_waitqueue_head(&nq->wait);
- nq->queue_depth = hw_queue_depth;
- if (setup_commands(nq))
- break;
+
+ null_init_queue(nullb, nq);
+
+ ret = setup_commands(nq);
+ if (ret)
+ goto err_queue;
nullb->nr_queues++;
}
- if (i == submit_queues)
- return 0;
-
+ return 0;
+err_queue:
cleanup_queues(nullb);
- return 1;
+ return ret;
}
static int null_add_dev(void)
@@ -495,34 +537,36 @@ static int null_add_dev(void)
spin_lock_init(&nullb->lock);
+ if (queue_mode == NULL_Q_MQ && use_per_node_hctx)
+ submit_queues = nr_online_nodes;
+
if (setup_queues(nullb))
goto err;
if (queue_mode == NULL_Q_MQ) {
null_mq_reg.numa_node = home_node;
null_mq_reg.queue_depth = hw_queue_depth;
+ null_mq_reg.nr_hw_queues = submit_queues;
if (use_per_node_hctx) {
null_mq_reg.ops->alloc_hctx = null_alloc_hctx;
null_mq_reg.ops->free_hctx = null_free_hctx;
-
- null_mq_reg.nr_hw_queues = nr_online_nodes;
} else {
null_mq_reg.ops->alloc_hctx = blk_mq_alloc_single_hw_queue;
null_mq_reg.ops->free_hctx = blk_mq_free_single_hw_queue;
-
- null_mq_reg.nr_hw_queues = submit_queues;
}
nullb->q = blk_mq_init_queue(&null_mq_reg, nullb);
} else if (queue_mode == NULL_Q_BIO) {
nullb->q = blk_alloc_queue_node(GFP_KERNEL, home_node);
blk_queue_make_request(nullb->q, null_queue_bio);
+ init_driver_queues(nullb);
} else {
nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
if (nullb->q)
blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
+ init_driver_queues(nullb);
}
if (!nullb->q)
@@ -579,7 +623,13 @@ static int __init null_init(void)
}
#endif
- if (submit_queues > nr_cpu_ids)
+ if (queue_mode == NULL_Q_MQ && use_per_node_hctx) {
+ if (submit_queues < nr_online_nodes) {
+ pr_warn("null_blk: submit_queues param is set to %u.",
+ nr_online_nodes);
+ submit_queues = nr_online_nodes;
+ }
+ } else if (submit_queues > nr_cpu_ids)
submit_queues = nr_cpu_ids;
else if (!submit_queues)
submit_queues = 1;
diff --git a/drivers/block/skd_main.c b/drivers/block/skd_main.c
index 9199c93..eb6e1e0 100644
--- a/drivers/block/skd_main.c
+++ b/drivers/block/skd_main.c
@@ -5269,7 +5269,7 @@ const char *skd_skdev_state_to_str(enum skd_drvr_state state)
}
}
-const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
+static const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
{
switch (state) {
case SKD_MSG_STATE_IDLE:
@@ -5281,7 +5281,7 @@ const char *skd_skmsg_state_to_str(enum skd_fit_msg_state state)
}
}
-const char *skd_skreq_state_to_str(enum skd_req_state state)
+static const char *skd_skreq_state_to_str(enum skd_req_state state)
{
switch (state) {
case SKD_REQ_STATE_IDLE:
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index 6bfc1bb..dceb85f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -87,6 +87,7 @@ static const struct usb_device_id ath3k_table[] = {
{ USB_DEVICE(0x0CF3, 0xE004) },
{ USB_DEVICE(0x0CF3, 0xE005) },
{ USB_DEVICE(0x0930, 0x0219) },
+ { USB_DEVICE(0x0930, 0x0220) },
{ USB_DEVICE(0x0489, 0xe057) },
{ USB_DEVICE(0x13d3, 0x3393) },
{ USB_DEVICE(0x0489, 0xe04e) },
@@ -129,6 +130,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index c0ff34f..3980fd1 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -154,6 +154,7 @@ static const struct usb_device_id blacklist_table[] = {
{ USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
+ { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
{ USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 8d3009e..5543b7d 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -87,7 +87,7 @@ static unsigned int _get_table_val(const struct clk_div_table *table,
return 0;
}
-static unsigned int _get_val(struct clk_divider *divider, u8 div)
+static unsigned int _get_val(struct clk_divider *divider, unsigned int div)
{
if (divider->flags & CLK_DIVIDER_ONE_BASED)
return div;
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 7be41e6..00a3abe 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -60,7 +60,7 @@ static int s2mps11_clk_prepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic,
S2MPS11_REG_RTC_CTRL,
s2mps11->mask, s2mps11->mask);
if (!ret)
@@ -74,7 +74,7 @@ static void s2mps11_clk_unprepare(struct clk_hw *hw)
struct s2mps11_clk *s2mps11 = to_s2mps11_clk(hw);
int ret;
- ret = regmap_update_bits(s2mps11->iodev->regmap, S2MPS11_REG_RTC_CTRL,
+ ret = regmap_update_bits(s2mps11->iodev->regmap_pmic, S2MPS11_REG_RTC_CTRL,
s2mps11->mask, ~s2mps11->mask);
if (!ret)
@@ -174,7 +174,7 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
s2mps11_clk->hw.init = &s2mps11_clks_init[i];
s2mps11_clk->mask = 1 << i;
- ret = regmap_read(s2mps11_clk->iodev->regmap,
+ ret = regmap_read(s2mps11_clk->iodev->regmap_pmic,
S2MPS11_REG_RTC_CTRL, &val);
if (ret < 0)
goto err_reg;
diff --git a/drivers/clk/samsung/clk-exynos-audss.c b/drivers/clk/samsung/clk-exynos-audss.c
index 39b40aa..68e515d 100644
--- a/drivers/clk/samsung/clk-exynos-audss.c
+++ b/drivers/clk/samsung/clk-exynos-audss.c
@@ -26,17 +26,17 @@ static struct clk_onecell_data clk_data;
#define ASS_CLK_DIV 0x4
#define ASS_CLK_GATE 0x8
+/* list of all parent clock list */
+static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
+static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
+
+#ifdef CONFIG_PM_SLEEP
static unsigned long reg_save[][2] = {
{ASS_CLK_SRC, 0},
{ASS_CLK_DIV, 0},
{ASS_CLK_GATE, 0},
};
-/* list of all parent clock list */
-static const char *mout_audss_p[] = { "fin_pll", "fout_epll" };
-static const char *mout_i2s_p[] = { "mout_audss", "cdclk0", "sclk_audio0" };
-
-#ifdef CONFIG_PM_SLEEP
static int exynos_audss_clk_suspend(void)
{
int i;
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index ad5ff50..1a7c1b9 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -39,7 +39,7 @@
#define SRC_TOP1 0xc214
#define SRC_CAM 0xc220
#define SRC_TV 0xc224
-#define SRC_MFC 0xcc28
+#define SRC_MFC 0xc228
#define SRC_G3D 0xc22c
#define E4210_SRC_IMAGE 0xc230
#define SRC_LCD0 0xc234
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index adf3234..e52359c 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -25,6 +25,7 @@
#define MPLL_LOCK 0x4000
#define MPLL_CON0 0x4100
#define SRC_CORE1 0x4204
+#define GATE_IP_ACP 0x8800
#define CPLL_LOCK 0x10020
#define EPLL_LOCK 0x10030
#define VPLL_LOCK 0x10040
@@ -75,7 +76,6 @@
#define SRC_CDREX 0x20200
#define PLL_DIV2_SEL 0x20a24
#define GATE_IP_DISP1 0x10928
-#define GATE_IP_ACP 0x10000
/* list of PLLs to be registered */
enum exynos5250_plls {
@@ -120,7 +120,8 @@ enum exynos5250_clks {
spi2, i2s1, i2s2, pcm1, pcm2, pwm, spdif, ac97, hsi2c0, hsi2c1, hsi2c2,
hsi2c3, chipid, sysreg, pmu, cmu_top, cmu_core, cmu_mem, tzpc0, tzpc1,
tzpc2, tzpc3, tzpc4, tzpc5, tzpc6, tzpc7, tzpc8, tzpc9, hdmi_cec, mct,
- wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d,
+ wdt, rtc, tmu, fimd1, mie1, dsim0, dp, mixer, hdmi, g2d, mdma0,
+ smmu_mdma0,
/* mux clocks */
mout_hdmi = 1024,
@@ -354,8 +355,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(smmu_gscl2, "smmu_gscl2", "aclk266", GATE_IP_GSCL, 9, 0, 0),
GATE(smmu_gscl3, "smmu_gscl3", "aclk266", GATE_IP_GSCL, 10, 0, 0),
GATE(mfc, "mfc", "aclk333", GATE_IP_MFC, 0, 0, 0),
- GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 1, 0, 0),
- GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 2, 0, 0),
+ GATE(smmu_mfcl, "smmu_mfcl", "aclk333", GATE_IP_MFC, 2, 0, 0),
+ GATE(smmu_mfcr, "smmu_mfcr", "aclk333", GATE_IP_MFC, 1, 0, 0),
GATE(rotator, "rotator", "aclk266", GATE_IP_GEN, 1, 0, 0),
GATE(jpeg, "jpeg", "aclk166", GATE_IP_GEN, 2, 0, 0),
GATE(mdma1, "mdma1", "aclk266", GATE_IP_GEN, 4, 0, 0),
@@ -406,7 +407,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(hsi2c2, "hsi2c2", "aclk66", GATE_IP_PERIC, 30, 0, 0),
GATE(hsi2c3, "hsi2c3", "aclk66", GATE_IP_PERIC, 31, 0, 0),
GATE(chipid, "chipid", "aclk66", GATE_IP_PERIS, 0, 0, 0),
- GATE(sysreg, "sysreg", "aclk66", GATE_IP_PERIS, 1, 0, 0),
+ GATE(sysreg, "sysreg", "aclk66",
+ GATE_IP_PERIS, 1, CLK_IGNORE_UNUSED, 0),
GATE(pmu, "pmu", "aclk66", GATE_IP_PERIS, 2, CLK_IGNORE_UNUSED, 0),
GATE(tzpc0, "tzpc0", "aclk66", GATE_IP_PERIS, 6, 0, 0),
GATE(tzpc1, "tzpc1", "aclk66", GATE_IP_PERIS, 7, 0, 0),
@@ -492,6 +494,8 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
GATE(mixer, "mixer", "mout_aclk200_disp1", GATE_IP_DISP1, 5, 0, 0),
GATE(hdmi, "hdmi", "mout_aclk200_disp1", GATE_IP_DISP1, 6, 0, 0),
GATE(g2d, "g2d", "aclk200", GATE_IP_ACP, 3, 0, 0),
+ GATE(mdma0, "mdma0", "aclk266", GATE_IP_ACP, 1, 0, 0),
+ GATE(smmu_mdma0, "smmu_mdma0", "aclk266", GATE_IP_ACP, 5, 0, 0),
};
static struct samsung_pll_rate_table vpll_24mhz_tbl[] __initdata = {
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index 5c07a56..634c4d6 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -75,6 +75,7 @@ config CLKSRC_DBX500_PRCMU_SCHED_CLOCK
config CLKSRC_EFM32
bool "Clocksource for Energy Micro's EFM32 SoCs" if !ARCH_EFM32
depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ select CLKSRC_MMIO
default ARCH_EFM32
help
Support to use the timers of EFM32 SoCs as clock source and clock
diff --git a/drivers/clocksource/clksrc-of.c b/drivers/clocksource/clksrc-of.c
index 35639cf4..b9ddd9e 100644
--- a/drivers/clocksource/clksrc-of.c
+++ b/drivers/clocksource/clksrc-of.c
@@ -35,6 +35,5 @@ void __init clocksource_of_init(void)
init_func = match->data;
init_func(np);
- of_node_put(np);
}
}
diff --git a/drivers/clocksource/dw_apb_timer_of.c b/drivers/clocksource/dw_apb_timer_of.c
index 45ba8ae..2a2ea27 100644
--- a/drivers/clocksource/dw_apb_timer_of.c
+++ b/drivers/clocksource/dw_apb_timer_of.c
@@ -108,12 +108,11 @@ static void __init add_clocksource(struct device_node *source_timer)
static u64 read_sched_clock(void)
{
- return __raw_readl(sched_io_base);
+ return ~__raw_readl(sched_io_base);
}
static const struct of_device_id sptimer_ids[] __initconst = {
{ .compatible = "picochip,pc3x2-rtc" },
- { .compatible = "snps,dw-apb-timer-sp" },
{ /* Sentinel */ },
};
@@ -151,4 +150,6 @@ static void __init dw_apb_timer_init(struct device_node *timer)
num_called++;
}
CLOCKSOURCE_OF_DECLARE(pc3x2_timer, "picochip,pc3x2-timer", dw_apb_timer_init);
-CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_osc, "snps,dw-apb-timer-osc", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer_sp, "snps,dw-apb-timer-sp", dw_apb_timer_init);
+CLOCKSOURCE_OF_DECLARE(apb_timer, "snps,dw-apb-timer", dw_apb_timer_init);
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index 2fb4695..a4f6119 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -179,6 +179,9 @@ static void __init sun4i_timer_init(struct device_node *node)
writel(TIMER_CTL_CLK_SRC(TIMER_CTL_CLK_SRC_OSC24M),
timer_base + TIMER_CTL_REG(0));
+ /* Make sure timer is stopped before playing with interrupts */
+ sun4i_clkevt_time_stop(0);
+
ret = setup_irq(irq, &sun4i_timer_irq);
if (ret)
pr_warn("failed to setup irq %d\n", irq);
diff --git a/drivers/clocksource/time-armada-370-xp.c b/drivers/clocksource/time-armada-370-xp.c
index d8e47e5..4e7f680 100644
--- a/drivers/clocksource/time-armada-370-xp.c
+++ b/drivers/clocksource/time-armada-370-xp.c
@@ -256,11 +256,6 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
ticks_per_jiffy = (timer_clk + HZ / 2) / HZ;
/*
- * Set scale and timer for sched_clock.
- */
- sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
-
- /*
* Setup free-running clocksource timer (interrupts
* disabled).
*/
@@ -270,6 +265,11 @@ static void __init armada_370_xp_timer_common_init(struct device_node *np)
timer_ctrl_clrset(0, TIMER0_EN | TIMER0_RELOAD_EN |
TIMER0_DIV(TIMER_DIVIDER_SHIFT));
+ /*
+ * Set scale and timer for sched_clock.
+ */
+ sched_clock_register(armada_370_xp_read_sched_clock, 32, timer_clk);
+
clocksource_mmio_init(timer_base + TIMER0_VAL_OFF,
"armada_370_xp_clocksource",
timer_clk, 300, 32, clocksource_mmio_readl_down);
diff --git a/drivers/cpufreq/at32ap-cpufreq.c b/drivers/cpufreq/at32ap-cpufreq.c
index 856ad80..7c03dd8 100644
--- a/drivers/cpufreq/at32ap-cpufreq.c
+++ b/drivers/cpufreq/at32ap-cpufreq.c
@@ -58,7 +58,7 @@ static int at32_set_target(struct cpufreq_policy *policy, unsigned int index)
return 0;
}
-static int __init at32_cpufreq_driver_init(struct cpufreq_policy *policy)
+static int at32_cpufreq_driver_init(struct cpufreq_policy *policy)
{
unsigned int frequency, rate, min_freq;
int retval, steps, i;
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 02d534d..8d19f7c 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -828,14 +828,17 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
int ret = 0;
memcpy(&new_policy, policy, sizeof(*policy));
+
+ /* Use the default policy if its valid. */
+ if (cpufreq_driver->setpolicy)
+ cpufreq_parse_governor(policy->governor->name,
+ &new_policy.policy, NULL);
+
/* assure that the starting sequence is run in cpufreq_set_policy */
policy->governor = NULL;
/* set default policy */
ret = cpufreq_set_policy(policy, &new_policy);
- policy->user_policy.policy = policy->policy;
- policy->user_policy.governor = policy->governor;
-
if (ret) {
pr_debug("setting policy failed\n");
if (cpufreq_driver->exit)
@@ -845,8 +848,7 @@ static void cpufreq_init_policy(struct cpufreq_policy *policy)
#ifdef CONFIG_HOTPLUG_CPU
static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
- unsigned int cpu, struct device *dev,
- bool frozen)
+ unsigned int cpu, struct device *dev)
{
int ret = 0;
unsigned long flags;
@@ -877,11 +879,7 @@ static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy,
}
}
- /* Don't touch sysfs links during light-weight init */
- if (!frozen)
- ret = sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
-
- return ret;
+ return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq");
}
#endif
@@ -926,6 +924,27 @@ err_free_policy:
return NULL;
}
+static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
+{
+ struct kobject *kobj;
+ struct completion *cmp;
+
+ down_read(&policy->rwsem);
+ kobj = &policy->kobj;
+ cmp = &policy->kobj_unregister;
+ up_read(&policy->rwsem);
+ kobject_put(kobj);
+
+ /*
+ * We need to make sure that the underlying kobj is
+ * actually not referenced anymore by anybody before we
+ * proceed with unloading.
+ */
+ pr_debug("waiting for dropping of refcount\n");
+ wait_for_completion(cmp);
+ pr_debug("wait complete\n");
+}
+
static void cpufreq_policy_free(struct cpufreq_policy *policy)
{
free_cpumask_var(policy->related_cpus);
@@ -986,7 +1005,7 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
list_for_each_entry(tpolicy, &cpufreq_policy_list, policy_list) {
if (cpumask_test_cpu(cpu, tpolicy->related_cpus)) {
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
- ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev, frozen);
+ ret = cpufreq_add_policy_cpu(tpolicy, cpu, dev);
up_read(&cpufreq_rwsem);
return ret;
}
@@ -994,15 +1013,17 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
read_unlock_irqrestore(&cpufreq_driver_lock, flags);
#endif
- if (frozen)
- /* Restore the saved policy when doing light-weight init */
- policy = cpufreq_policy_restore(cpu);
- else
+ /*
+ * Restore the saved policy when doing light-weight init and fall back
+ * to the full init if that fails.
+ */
+ policy = frozen ? cpufreq_policy_restore(cpu) : NULL;
+ if (!policy) {
+ frozen = false;
policy = cpufreq_policy_alloc();
-
- if (!policy)
- goto nomem_out;
-
+ if (!policy)
+ goto nomem_out;
+ }
/*
* In the resume path, since we restore a saved policy, the assignment
@@ -1047,8 +1068,10 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
*/
cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
- policy->user_policy.min = policy->min;
- policy->user_policy.max = policy->max;
+ if (!frozen) {
+ policy->user_policy.min = policy->min;
+ policy->user_policy.max = policy->max;
+ }
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_START, policy);
@@ -1079,6 +1102,11 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif,
cpufreq_init_policy(policy);
+ if (!frozen) {
+ policy->user_policy.policy = policy->policy;
+ policy->user_policy.governor = policy->governor;
+ }
+
kobject_uevent(&policy->kobj, KOBJ_ADD);
up_read(&cpufreq_rwsem);
@@ -1096,7 +1124,13 @@ err_get_freq:
if (cpufreq_driver->exit)
cpufreq_driver->exit(policy);
err_set_policy_cpu:
+ if (frozen) {
+ /* Do not leave stale fallback data behind. */
+ per_cpu(cpufreq_cpu_data_fallback, cpu) = NULL;
+ cpufreq_policy_put_kobj(policy);
+ }
cpufreq_policy_free(policy);
+
nomem_out:
up_read(&cpufreq_rwsem);
@@ -1118,7 +1152,7 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
}
static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
- unsigned int old_cpu, bool frozen)
+ unsigned int old_cpu)
{
struct device *cpu_dev;
int ret;
@@ -1126,10 +1160,6 @@ static int cpufreq_nominate_new_policy_cpu(struct cpufreq_policy *policy,
/* first sibling now owns the new sysfs dir */
cpu_dev = get_cpu_device(cpumask_any_but(policy->cpus, old_cpu));
- /* Don't touch sysfs files during light-weight tear-down */
- if (frozen)
- return cpu_dev->id;
-
sysfs_remove_link(&cpu_dev->kobj, "cpufreq");
ret = kobject_move(&policy->kobj, &cpu_dev->kobj);
if (ret) {
@@ -1196,7 +1226,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
if (!frozen)
sysfs_remove_link(&dev->kobj, "cpufreq");
} else if (cpus > 1) {
- new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu, frozen);
+ new_cpu = cpufreq_nominate_new_policy_cpu(policy, cpu);
if (new_cpu >= 0) {
update_policy_cpu(policy, new_cpu);
@@ -1218,8 +1248,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
int ret;
unsigned long flags;
struct cpufreq_policy *policy;
- struct kobject *kobj;
- struct completion *cmp;
read_lock_irqsave(&cpufreq_driver_lock, flags);
policy = per_cpu(cpufreq_cpu_data, cpu);
@@ -1249,22 +1277,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
}
}
- if (!frozen) {
- down_read(&policy->rwsem);
- kobj = &policy->kobj;
- cmp = &policy->kobj_unregister;
- up_read(&policy->rwsem);
- kobject_put(kobj);
-
- /*
- * We need to make sure that the underlying kobj is
- * actually not referenced anymore by anybody before we
- * proceed with unloading.
- */
- pr_debug("waiting for dropping of refcount\n");
- wait_for_completion(cmp);
- pr_debug("wait complete\n");
- }
+ if (!frozen)
+ cpufreq_policy_put_kobj(policy);
/*
* Perform the ->exit() even during light-weight tear-down,
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 5f1cbae..f9d561e 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -614,6 +614,11 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
cpu = all_cpu_data[cpunum];
intel_pstate_get_cpu_pstates(cpu);
+ if (!cpu->pstate.current_pstate) {
+ all_cpu_data[cpunum] = NULL;
+ kfree(cpu);
+ return -ENODATA;
+ }
cpu->cpu = cpunum;
diff --git a/drivers/cpuidle/cpuidle-calxeda.c b/drivers/cpuidle/cpuidle-calxeda.c
index 3679563..6e51114 100644
--- a/drivers/cpuidle/cpuidle-calxeda.c
+++ b/drivers/cpuidle/cpuidle-calxeda.c
@@ -65,7 +65,7 @@ static struct cpuidle_driver calxeda_idle_driver = {
.state_count = 2,
};
-static int __init calxeda_cpuidle_probe(struct platform_device *pdev)
+static int calxeda_cpuidle_probe(struct platform_device *pdev)
{
return cpuidle_register(&calxeda_idle_driver, NULL);
}
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 9dd6e01..f757a0f 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -1410,14 +1410,12 @@ static const struct platform_device_info ixp_dev_info __initdata = {
static int __init ixp_module_init(void)
{
int num = ARRAY_SIZE(ixp4xx_algos);
- int i, err ;
+ int i, err;
pdev = platform_device_register_full(&ixp_dev_info);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
- dev = &pdev->dev;
-
spin_lock_init(&desc_lock);
spin_lock_init(&emerg_lock);
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 446687c..c823daa 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -62,6 +62,7 @@ config INTEL_IOATDMA
tristate "Intel I/OAT DMA support"
depends on PCI && X86
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select DCA
help
Enable support for the Intel(R) I/OAT DMA engine present
@@ -112,6 +113,7 @@ config MV_XOR
bool "Marvell XOR engine support"
depends on PLAT_ORION
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
---help---
Enable support for the Marvell XOR engine.
@@ -187,6 +189,7 @@ config AMCC_PPC440SPE_ADMA
tristate "AMCC PPC440SPe ADMA support"
depends on 440SPe || 440SP
select DMA_ENGINE
+ select DMA_ENGINE_RAID
select ARCH_HAS_ASYNC_TX_FIND_CHANNEL
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
help
@@ -352,6 +355,7 @@ config NET_DMA
bool "Network: TCP receive copy offload"
depends on DMA_ENGINE && NET
default (INTEL_IOATDMA || FSL_DMA)
+ depends on BROKEN
help
This enables the use of DMA engines in the network stack to
offload receive copy-to-user operations, freeing CPU cycles.
@@ -377,4 +381,7 @@ config DMATEST
Simple DMA test client. Say N unless you're debugging a
DMA Device driver.
+config DMA_ENGINE_RAID
+ bool
+
endif
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 16a2aa2..ec4ee5c 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -1169,7 +1169,7 @@ static void pl08x_desc_free(struct virt_dma_desc *vd)
struct pl08x_txd *txd = to_pl08x_txd(&vd->tx);
struct pl08x_dma_chan *plchan = to_pl08x_chan(vd->tx.chan);
- dma_descriptor_unmap(txd);
+ dma_descriptor_unmap(&vd->tx);
if (!txd->done)
pl08x_release_mux(plchan);
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index f31d647..2787aba 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -347,10 +347,6 @@ static struct device *chan2dev(struct dma_chan *chan)
{
return &chan->dev->device;
}
-static struct device *chan2parent(struct dma_chan *chan)
-{
- return chan->dev->device.parent;
-}
#if defined(VERBOSE_DEBUG)
static void vdbg_dump_regs(struct at_dma_chan *atchan)
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index ea806bd..ef63b90 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -912,7 +912,7 @@ struct dmaengine_unmap_pool {
#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
static struct dmaengine_unmap_pool unmap_pool[] = {
__UNMAP_POOL(2),
- #if IS_ENABLED(CONFIG_ASYNC_TX_DMA)
+ #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
__UNMAP_POOL(16),
__UNMAP_POOL(128),
__UNMAP_POOL(256),
@@ -1054,7 +1054,7 @@ dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
dma_cookie_t cookie;
unsigned long flags;
- unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOIO);
+ unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
if (!unmap)
return -ENOMEM;
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index 20f9a3a..9dfcaf5c 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -539,9 +539,9 @@ static int dmatest_func(void *data)
um->len = params->buf_size;
for (i = 0; i < src_cnt; i++) {
- unsigned long buf = (unsigned long) thread->srcs[i];
+ void *buf = thread->srcs[i];
struct page *pg = virt_to_page(buf);
- unsigned pg_off = buf & ~PAGE_MASK;
+ unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
um->addr[i] = dma_map_page(dev->dev, pg, pg_off,
um->len, DMA_TO_DEVICE);
@@ -559,9 +559,9 @@ static int dmatest_func(void *data)
/* map with DMA_BIDIRECTIONAL to force writeback/invalidate */
dsts = &um->addr[src_cnt];
for (i = 0; i < dst_cnt; i++) {
- unsigned long buf = (unsigned long) thread->dsts[i];
+ void *buf = thread->dsts[i];
struct page *pg = virt_to_page(buf);
- unsigned pg_off = buf & ~PAGE_MASK;
+ unsigned pg_off = (unsigned long) buf & ~PAGE_MASK;
dsts[i] = dma_map_page(dev->dev, pg, pg_off, um->len,
DMA_BIDIRECTIONAL);
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 7086a16..f157c6f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -86,11 +86,6 @@ static void set_desc_cnt(struct fsldma_chan *chan,
hw->count = CPU_TO_DMA(chan, count, 32);
}
-static u32 get_desc_cnt(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
-{
- return DMA_TO_CPU(chan, desc->hw.count, 32);
-}
-
static void set_desc_src(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t src)
{
@@ -101,16 +96,6 @@ static void set_desc_src(struct fsldma_chan *chan,
hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
}
-static dma_addr_t get_desc_src(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
- ? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
- return DMA_TO_CPU(chan, desc->hw.src_addr, 64) & ~snoop_bits;
-}
-
static void set_desc_dst(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t dst)
{
@@ -121,16 +106,6 @@ static void set_desc_dst(struct fsldma_chan *chan,
hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
}
-static dma_addr_t get_desc_dst(struct fsldma_chan *chan,
- struct fsl_desc_sw *desc)
-{
- u64 snoop_bits;
-
- snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
- ? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
- return DMA_TO_CPU(chan, desc->hw.dst_addr, 64) & ~snoop_bits;
-}
-
static void set_desc_next(struct fsldma_chan *chan,
struct fsl_dma_ld_hw *hw, dma_addr_t next)
{
@@ -408,7 +383,7 @@ static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
struct fsl_desc_sw *child;
unsigned long flags;
- dma_cookie_t cookie;
+ dma_cookie_t cookie = -EINVAL;
spin_lock_irqsave(&chan->desc_lock, flags);
@@ -854,10 +829,6 @@ static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
struct fsl_desc_sw *desc)
{
struct dma_async_tx_descriptor *txd = &desc->async_tx;
- struct device *dev = chan->common.device->dev;
- dma_addr_t src = get_desc_src(chan, desc);
- dma_addr_t dst = get_desc_dst(chan, desc);
- u32 len = get_desc_cnt(chan, desc);
/* Run the link descriptor callback function */
if (txd->callback) {
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1a49c7776..8752918 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -817,7 +817,15 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
dma_src = dma_map_single(dev, src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, dma_src)) {
+ dev_err(dev, "mapping src buffer failed\n");
+ goto free_resources;
+ }
dma_dest = dma_map_single(dev, dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, dma_dest)) {
+ dev_err(dev, "mapping dest buffer failed\n");
+ goto unmap_src;
+ }
flags = DMA_PREP_INTERRUPT;
tx = device->common.device_prep_dma_memcpy(dma_chan, dma_dest, dma_src,
IOAT_TEST_SIZE, flags);
@@ -855,8 +863,9 @@ int ioat_dma_self_test(struct ioatdma_device *device)
}
unmap_dma:
- dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
dma_unmap_single(dev, dma_dest, IOAT_TEST_SIZE, DMA_FROM_DEVICE);
+unmap_src:
+ dma_unmap_single(dev, dma_src, IOAT_TEST_SIZE, DMA_TO_DEVICE);
free_resources:
dma->device_free_chan_resources(dma_chan);
out:
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index dcb1e05..8869500 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1017,6 +1017,7 @@ static int mmp_pdma_probe(struct platform_device *op)
}
}
+ platform_set_drvdata(op, pdev);
dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
return 0;
}
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 7807f0e..53fb0c8 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -54,12 +54,6 @@ static void mv_desc_init(struct mv_xor_desc_slot *desc, unsigned long flags)
hw_desc->desc_command = (1 << 31);
}
-static u32 mv_desc_get_dest_addr(struct mv_xor_desc_slot *desc)
-{
- struct mv_xor_desc *hw_desc = desc->hw_desc;
- return hw_desc->phy_dest_addr;
-}
-
static void mv_desc_set_byte_count(struct mv_xor_desc_slot *desc,
u32 byte_count)
{
@@ -787,7 +781,6 @@ static void mv_xor_issue_pending(struct dma_chan *chan)
/*
* Perform a transaction to verify the HW works.
*/
-#define MV_XOR_TEST_SIZE 2000
static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
{
@@ -797,20 +790,21 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
struct dma_chan *dma_chan;
dma_cookie_t cookie;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
int err = 0;
- src = kmalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ src = kmalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!src)
return -ENOMEM;
- dest = kzalloc(sizeof(u8) * MV_XOR_TEST_SIZE, GFP_KERNEL);
+ dest = kzalloc(sizeof(u8) * PAGE_SIZE, GFP_KERNEL);
if (!dest) {
kfree(src);
return -ENOMEM;
}
/* Fill in src buffer */
- for (i = 0; i < MV_XOR_TEST_SIZE; i++)
+ for (i = 0; i < PAGE_SIZE; i++)
((u8 *) src)[i] = (u8)i;
dma_chan = &mv_chan->dmachan;
@@ -819,14 +813,26 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
goto out;
}
- dest_dma = dma_map_single(dma_chan->device->dev, dest,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, 2, GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
+ src_dma = dma_map_page(dma_chan->device->dev, virt_to_page(src), 0,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->to_cnt = 1;
+ unmap->addr[0] = src_dma;
- src_dma = dma_map_single(dma_chan->device->dev, src,
- MV_XOR_TEST_SIZE, DMA_TO_DEVICE);
+ dest_dma = dma_map_page(dma_chan->device->dev, virt_to_page(dest), 0,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ unmap->from_cnt = 1;
+ unmap->addr[1] = dest_dma;
+
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_memcpy(dma_chan, dest_dma, src_dma,
- MV_XOR_TEST_SIZE, 0);
+ PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
async_tx_ack(tx);
@@ -841,8 +847,8 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
dma_sync_single_for_cpu(dma_chan->device->dev, dest_dma,
- MV_XOR_TEST_SIZE, DMA_FROM_DEVICE);
- if (memcmp(src, dest, MV_XOR_TEST_SIZE)) {
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ if (memcmp(src, dest, PAGE_SIZE)) {
dev_err(dma_chan->device->dev,
"Self-test copy failed compare, disabling\n");
err = -ENODEV;
@@ -850,6 +856,7 @@ static int mv_xor_memcpy_self_test(struct mv_xor_chan *mv_chan)
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
kfree(src);
@@ -867,13 +874,15 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
dma_addr_t dma_srcs[MV_XOR_NUM_SRC_TEST];
dma_addr_t dest_dma;
struct dma_async_tx_descriptor *tx;
+ struct dmaengine_unmap_data *unmap;
struct dma_chan *dma_chan;
dma_cookie_t cookie;
u8 cmp_byte = 0;
u32 cmp_word;
int err = 0;
+ int src_count = MV_XOR_NUM_SRC_TEST;
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
xor_srcs[src_idx] = alloc_page(GFP_KERNEL);
if (!xor_srcs[src_idx]) {
while (src_idx--)
@@ -890,13 +899,13 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}
/* Fill in src buffers */
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++) {
+ for (src_idx = 0; src_idx < src_count; src_idx++) {
u8 *ptr = page_address(xor_srcs[src_idx]);
for (i = 0; i < PAGE_SIZE; i++)
ptr[i] = (1 << src_idx);
}
- for (src_idx = 0; src_idx < MV_XOR_NUM_SRC_TEST; src_idx++)
+ for (src_idx = 0; src_idx < src_count; src_idx++)
cmp_byte ^= (u8) (1 << src_idx);
cmp_word = (cmp_byte << 24) | (cmp_byte << 16) |
@@ -910,16 +919,29 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
goto out;
}
+ unmap = dmaengine_get_unmap_data(dma_chan->device->dev, src_count + 1,
+ GFP_KERNEL);
+ if (!unmap) {
+ err = -ENOMEM;
+ goto free_resources;
+ }
+
/* test xor */
- dest_dma = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
- DMA_FROM_DEVICE);
+ for (i = 0; i < src_count; i++) {
+ unmap->addr[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
+ 0, PAGE_SIZE, DMA_TO_DEVICE);
+ dma_srcs[i] = unmap->addr[i];
+ unmap->to_cnt++;
+ }
- for (i = 0; i < MV_XOR_NUM_SRC_TEST; i++)
- dma_srcs[i] = dma_map_page(dma_chan->device->dev, xor_srcs[i],
- 0, PAGE_SIZE, DMA_TO_DEVICE);
+ unmap->addr[src_count] = dma_map_page(dma_chan->device->dev, dest, 0, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ dest_dma = unmap->addr[src_count];
+ unmap->from_cnt = 1;
+ unmap->len = PAGE_SIZE;
tx = mv_xor_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
- MV_XOR_NUM_SRC_TEST, PAGE_SIZE, 0);
+ src_count, PAGE_SIZE, 0);
cookie = mv_xor_tx_submit(tx);
mv_xor_issue_pending(dma_chan);
@@ -948,9 +970,10 @@ mv_xor_xor_self_test(struct mv_xor_chan *mv_chan)
}
free_resources:
+ dmaengine_unmap_put(unmap);
mv_xor_free_chan_resources(dma_chan);
out:
- src_idx = MV_XOR_NUM_SRC_TEST;
+ src_idx = src_count;
while (src_idx--)
__free_page(xor_srcs[src_idx]);
__free_page(dest);
@@ -1176,6 +1199,7 @@ static int mv_xor_probe(struct platform_device *pdev)
int i = 0;
for_each_child_of_node(pdev->dev.of_node, np) {
+ struct mv_xor_chan *chan;
dma_cap_mask_t cap_mask;
int irq;
@@ -1193,21 +1217,21 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}
- xordev->channels[i] =
- mv_xor_channel_add(xordev, pdev, i,
- cap_mask, irq);
- if (IS_ERR(xordev->channels[i])) {
- ret = PTR_ERR(xordev->channels[i]);
- xordev->channels[i] = NULL;
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
irq_dispose_mapping(irq);
goto err_channel_add;
}
+ xordev->channels[i] = chan;
i++;
}
} else if (pdata && pdata->channels) {
for (i = 0; i < MV_XOR_MAX_CHANNELS; i++) {
struct mv_xor_channel_data *cd;
+ struct mv_xor_chan *chan;
int irq;
cd = &pdata->channels[i];
@@ -1222,13 +1246,14 @@ static int mv_xor_probe(struct platform_device *pdev)
goto err_channel_add;
}
- xordev->channels[i] =
- mv_xor_channel_add(xordev, pdev, i,
- cd->cap_mask, irq);
- if (IS_ERR(xordev->channels[i])) {
- ret = PTR_ERR(xordev->channels[i]);
+ chan = mv_xor_channel_add(xordev, pdev, i,
+ cd->cap_mask, irq);
+ if (IS_ERR(chan)) {
+ ret = PTR_ERR(chan);
goto err_channel_add;
}
+
+ xordev->channels[i] = chan;
}
}
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cdf0483..536632f 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2492,12 +2492,9 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
static inline void _init_desc(struct dma_pl330_desc *desc)
{
- desc->pchan = NULL;
desc->req.x = &desc->px;
desc->req.token = desc;
desc->rqcfg.swap = SWAP_NO;
- desc->rqcfg.privileged = 0;
- desc->rqcfg.insnaccess = 0;
desc->rqcfg.scctl = SCCTRL0;
desc->rqcfg.dcctl = DCCTRL0;
desc->req.cfg = &desc->rqcfg;
@@ -2517,7 +2514,7 @@ static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
if (!pdmac)
return 0;
- desc = kmalloc(count * sizeof(*desc), flg);
+ desc = kcalloc(count, sizeof(*desc), flg);
if (!desc)
return 0;
diff --git a/drivers/dma/ppc4xx/adma.c b/drivers/dma/ppc4xx/adma.c
index 8da48c6..8bba298 100644
--- a/drivers/dma/ppc4xx/adma.c
+++ b/drivers/dma/ppc4xx/adma.c
@@ -533,29 +533,6 @@ static void ppc440spe_desc_init_memcpy(struct ppc440spe_adma_desc_slot *desc,
}
/**
- * ppc440spe_desc_init_memset - initialize the descriptor for MEMSET operation
- */
-static void ppc440spe_desc_init_memset(struct ppc440spe_adma_desc_slot *desc,
- int value, unsigned long flags)
-{
- struct dma_cdb *hw_desc = desc->hw_desc;
-
- memset(desc->hw_desc, 0, sizeof(struct dma_cdb));
- desc->hw_next = NULL;
- desc->src_cnt = 1;
- desc->dst_cnt = 1;
-
- if (flags & DMA_PREP_INTERRUPT)
- set_bit(PPC440SPE_DESC_INT, &desc->flags);
- else
- clear_bit(PPC440SPE_DESC_INT, &desc->flags);
-
- hw_desc->sg1u = hw_desc->sg1l = cpu_to_le32((u32)value);
- hw_desc->sg3u = hw_desc->sg3l = cpu_to_le32((u32)value);
- hw_desc->opc = DMA_CDB_OPC_DFILL128;
-}
-
-/**
* ppc440spe_desc_set_src_addr - set source address into the descriptor
*/
static void ppc440spe_desc_set_src_addr(struct ppc440spe_adma_desc_slot *desc,
@@ -1504,8 +1481,6 @@ static dma_cookie_t ppc440spe_adma_run_tx_complete_actions(
struct ppc440spe_adma_chan *chan,
dma_cookie_t cookie)
{
- int i;
-
BUG_ON(desc->async_tx.cookie < 0);
if (desc->async_tx.cookie > 0) {
cookie = desc->async_tx.cookie;
@@ -3898,7 +3873,7 @@ static void ppc440spe_adma_init_capabilities(struct ppc440spe_adma_device *adev)
ppc440spe_adma_prep_dma_interrupt;
}
pr_info("%s: AMCC(R) PPC440SP(E) ADMA Engine: "
- "( %s%s%s%s%s%s%s)\n",
+ "( %s%s%s%s%s%s)\n",
dev_name(adev->dev),
dma_has_cap(DMA_PQ, adev->common.cap_mask) ? "pq " : "",
dma_has_cap(DMA_PQ_VAL, adev->common.cap_mask) ? "pq_val " : "",
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 4cb1279..4eddedb 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -628,42 +628,13 @@ retry:
s3cchan->state = S3C24XX_DMA_CHAN_IDLE;
}
-static void s3c24xx_dma_unmap_buffers(struct s3c24xx_txd *txd)
-{
- struct device *dev = txd->vd.tx.chan->device->dev;
- struct s3c24xx_sg *dsg;
-
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- else {
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->src_addr, dsg->len,
- DMA_TO_DEVICE);
- }
- }
-
- if (!(txd->vd.tx.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
- if (txd->vd.tx.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_single(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- else
- list_for_each_entry(dsg, &txd->dsg_list, node)
- dma_unmap_page(dev, dsg->dst_addr, dsg->len,
- DMA_FROM_DEVICE);
- }
-}
-
static void s3c24xx_dma_desc_free(struct virt_dma_desc *vd)
{
struct s3c24xx_txd *txd = to_s3c24xx_txd(&vd->tx);
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(vd->tx.chan);
if (!s3cchan->slave)
- s3c24xx_dma_unmap_buffers(txd);
+ dma_descriptor_unmap(&vd->tx);
s3c24xx_dma_free_txd(txd);
}
@@ -795,7 +766,7 @@ static enum dma_status s3c24xx_dma_tx_status(struct dma_chan *chan,
spin_lock_irqsave(&s3cchan->vc.lock, flags);
ret = dma_cookie_status(chan, cookie, txstate);
- if (ret == DMA_SUCCESS) {
+ if (ret == DMA_COMPLETE) {
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
return ret;
}
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index ebad845..3083d90 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -60,6 +60,7 @@
#define HPB_DMAE_DSTPR_DMSTP BIT(0)
/* DMA status register (DSTSR) bits */
+#define HPB_DMAE_DSTSR_DQSTS BIT(2)
#define HPB_DMAE_DSTSR_DMSTS BIT(0)
/* DMA common registers */
@@ -286,6 +287,9 @@ static void hpb_dmae_halt(struct shdma_chan *schan)
ch_reg_write(chan, HPB_DMAE_DCMDR_DQEND, HPB_DMAE_DCMDR);
ch_reg_write(chan, HPB_DMAE_DSTPR_DMSTP, HPB_DMAE_DSTPR);
+
+ chan->plane_idx = 0;
+ chan->first_desc = true;
}
static const struct hpb_dmae_slave_config *
@@ -385,7 +389,10 @@ static bool hpb_dmae_channel_busy(struct shdma_chan *schan)
struct hpb_dmae_chan *chan = to_chan(schan);
u32 dstsr = ch_reg_read(chan, HPB_DMAE_DSTSR);
- return (dstsr & HPB_DMAE_DSTSR_DMSTS) == HPB_DMAE_DSTSR_DMSTS;
+ if (chan->xfer_mode == XFER_DOUBLE)
+ return dstsr & HPB_DMAE_DSTSR_DQSTS;
+ else
+ return dstsr & HPB_DMAE_DSTSR_DMSTS;
}
static int
@@ -510,6 +517,8 @@ static int hpb_dmae_chan_probe(struct hpb_dmae_device *hpbdev, int id)
}
schan = &new_hpb_chan->shdma_chan;
+ schan->max_xfer_len = HPB_DMA_TCR_MAX;
+
shdma_chan_probe(sdev, schan, id);
if (pdev->id >= 0)
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index bae6c29..17686ca 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -406,7 +406,6 @@ txx9dmac_descriptor_complete(struct txx9dmac_chan *dc,
dma_async_tx_callback callback;
void *param;
struct dma_async_tx_descriptor *txd = &desc->txd;
- struct txx9dmac_slave *ds = dc->chan.private;
dev_vdbg(chan2dev(&dc->chan), "descriptor %u %p complete\n",
txd->cookie, desc);
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 8472405..d7f1b57 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -945,7 +945,7 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
u32 tad_offset;
u32 rir_way;
u32 mb, kb;
- u64 ch_addr, offset, limit, prv = 0;
+ u64 ch_addr, offset, limit = 0, prv = 0;
/*
diff --git a/drivers/firewire/sbp2.c b/drivers/firewire/sbp2.c
index b0bb056..281029d 100644
--- a/drivers/firewire/sbp2.c
+++ b/drivers/firewire/sbp2.c
@@ -1623,7 +1623,6 @@ static struct scsi_host_template scsi_driver_template = {
.cmd_per_lun = 1,
.can_queue = 1,
.sdev_attrs = sbp2_scsi_sysfs_attrs,
- .no_write_same = 1,
};
MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile
index 299fad6b5..5373dc5 100644
--- a/drivers/firmware/Makefile
+++ b/drivers/firmware/Makefile
@@ -14,3 +14,4 @@ obj-$(CONFIG_FIRMWARE_MEMMAP) += memmap.o
obj-$(CONFIG_GOOGLE_FIRMWARE) += google/
obj-$(CONFIG_EFI) += efi/
+obj-$(CONFIG_UEFI_CPER) += efi/
diff --git a/drivers/firmware/efi/Kconfig b/drivers/firmware/efi/Kconfig
index 3150aa4..6aecbc8 100644
--- a/drivers/firmware/efi/Kconfig
+++ b/drivers/firmware/efi/Kconfig
@@ -36,7 +36,7 @@ config EFI_VARS_PSTORE_DEFAULT_DISABLE
backend for pstore by default. This setting can be overridden
using the efivars module's pstore_disable parameter.
-config UEFI_CPER
- def_bool n
-
endmenu
+
+config UEFI_CPER
+ bool
diff --git a/drivers/firmware/efi/Makefile b/drivers/firmware/efi/Makefile
index 9ba156d..6c2a41e 100644
--- a/drivers/firmware/efi/Makefile
+++ b/drivers/firmware/efi/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for linux kernel
#
-obj-y += efi.o vars.o
+obj-$(CONFIG_EFI) += efi.o vars.o
obj-$(CONFIG_EFI_VARS) += efivars.o
obj-$(CONFIG_EFI_VARS_PSTORE) += efi-pstore.o
obj-$(CONFIG_UEFI_CPER) += cper.o
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 743fd42..4b9dc83 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -356,6 +356,7 @@ static int efi_pstore_erase(enum pstore_type_id type, u64 id, int count,
static struct pstore_info efi_pstore_info = {
.owner = THIS_MODULE,
.name = "efi",
+ .flags = PSTORE_FLAGS_FRAGILE,
.open = efi_pstore_open,
.close = efi_pstore_close,
.read = efi_pstore_read,
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index 8847adf..84be701 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -327,7 +327,7 @@ static int gpio_to_irq_unbanked(struct gpio_chip *chip, unsigned offset)
* NOTE: we assume for now that only irqs in the first gpio_chip
* can provide direct-mapped IRQs to AINTC (up to 32 GPIOs).
*/
- if (offset < d->irq_base)
+ if (offset < d->gpio_unbanked)
return d->gpio_irq + offset;
else
return -ENODEV;
@@ -419,6 +419,8 @@ static int davinci_gpio_irq_setup(struct platform_device *pdev)
/* pass "bank 0" GPIO IRQs to AINTC */
chips[0].chip.to_irq = gpio_to_irq_unbanked;
+ chips[0].gpio_irq = bank_irq;
+ chips[0].gpio_unbanked = pdata->gpio_unbanked;
binten = BIT(0);
/* AINTC handles mask/unmask; GPIO handles triggering */
diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c
index 7b37300..2baf0dd 100644
--- a/drivers/gpio/gpio-msm-v2.c
+++ b/drivers/gpio/gpio-msm-v2.c
@@ -252,7 +252,7 @@ static void msm_gpio_irq_mask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags);
writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio));
- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
__clear_bit(gpio, msm_gpio.enabled_irqs);
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
@@ -264,7 +264,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d)
spin_lock_irqsave(&tlmm_lock, irq_flags);
__set_bit(gpio, msm_gpio.enabled_irqs);
- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio));
+ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio));
writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio));
spin_unlock_irqrestore(&tlmm_lock, irq_flags);
}
diff --git a/drivers/gpio/gpio-rcar.c b/drivers/gpio/gpio-rcar.c
index fe088a3..8b7e719 100644
--- a/drivers/gpio/gpio-rcar.c
+++ b/drivers/gpio/gpio-rcar.c
@@ -169,7 +169,8 @@ static irqreturn_t gpio_rcar_irq_handler(int irq, void *dev_id)
u32 pending;
unsigned int offset, irqs_handled = 0;
- while ((pending = gpio_rcar_read(p, INTDT))) {
+ while ((pending = gpio_rcar_read(p, INTDT) &
+ gpio_rcar_read(p, INTMSK))) {
offset = __ffs(pending);
gpio_rcar_write(p, INTCLR, BIT(offset));
generic_handle_irq(irq_find_mapping(p->irq_domain, offset));
diff --git a/drivers/gpio/gpio-twl4030.c b/drivers/gpio/gpio-twl4030.c
index b97d6a6..f999689 100644
--- a/drivers/gpio/gpio-twl4030.c
+++ b/drivers/gpio/gpio-twl4030.c
@@ -300,7 +300,7 @@ static int twl_direction_in(struct gpio_chip *chip, unsigned offset)
if (offset < TWL4030_GPIO_MAX)
ret = twl4030_set_gpio_direction(offset, 1);
else
- ret = -EINVAL;
+ ret = -EINVAL; /* LED outputs can't be set as input */
if (!ret)
priv->direction &= ~BIT(offset);
@@ -354,11 +354,20 @@ static void twl_set(struct gpio_chip *chip, unsigned offset, int value)
static int twl_direction_out(struct gpio_chip *chip, unsigned offset, int value)
{
struct gpio_twl4030_priv *priv = to_gpio_twl4030(chip);
- int ret = -EINVAL;
+ int ret = 0;
mutex_lock(&priv->mutex);
- if (offset < TWL4030_GPIO_MAX)
+ if (offset < TWL4030_GPIO_MAX) {
ret = twl4030_set_gpio_direction(offset, 0);
+ if (ret) {
+ mutex_unlock(&priv->mutex);
+ return ret;
+ }
+ }
+
+ /*
+ * LED gpios i.e. offset >= TWL4030_GPIO_MAX are always output
+ */
priv->direction |= BIT(offset);
mutex_unlock(&priv->mutex);
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index eef09ec..a72cae0 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -103,6 +103,7 @@ void armada_drm_queue_unref_work(struct drm_device *,
extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
int armada_fbdev_init(struct drm_device *);
+void armada_fbdev_lastclose(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
int armada_overlay_plane_create(struct drm_device *, unsigned long);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4f2b283..62d0ff3 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -321,6 +321,11 @@ static struct drm_ioctl_desc armada_ioctls[] = {
DRM_UNLOCKED),
};
+static void armada_drm_lastclose(struct drm_device *dev)
+{
+ armada_fbdev_lastclose(dev);
+}
+
static const struct file_operations armada_drm_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
@@ -337,7 +342,7 @@ static struct drm_driver armada_drm_driver = {
.open = NULL,
.preclose = NULL,
.postclose = NULL,
- .lastclose = NULL,
+ .lastclose = armada_drm_lastclose,
.unload = armada_drm_unload,
.get_vblank_counter = drm_vblank_count,
.enable_vblank = armada_drm_enable_vblank,
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index dd5ea77..948cb14 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -105,9 +105,9 @@ static int armada_fb_create(struct drm_fb_helper *fbh,
drm_fb_helper_fill_fix(info, dfb->fb.pitches[0], dfb->fb.depth);
drm_fb_helper_fill_var(info, fbh, sizes->fb_width, sizes->fb_height);
- DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08x\n",
- dfb->fb.width, dfb->fb.height,
- dfb->fb.bits_per_pixel, obj->phys_addr);
+ DRM_DEBUG_KMS("allocated %dx%d %dbpp fb: 0x%08llx\n",
+ dfb->fb.width, dfb->fb.height, dfb->fb.bits_per_pixel,
+ (unsigned long long)obj->phys_addr);
return 0;
@@ -177,6 +177,16 @@ int armada_fbdev_init(struct drm_device *dev)
return ret;
}
+void armada_fbdev_lastclose(struct drm_device *dev)
+{
+ struct armada_private *priv = dev->dev_private;
+
+ drm_modeset_lock_all(dev);
+ if (priv->fbdev)
+ drm_fb_helper_restore_fbdev_mode(priv->fbdev);
+ drm_modeset_unlock_all(dev);
+}
+
void armada_fbdev_fini(struct drm_device *dev)
{
struct armada_private *priv = dev->dev_private;
@@ -192,11 +202,11 @@ void armada_fbdev_fini(struct drm_device *dev)
framebuffer_release(info);
}
+ drm_fb_helper_fini(fbh);
+
if (fbh->fb)
fbh->fb->funcs->destroy(fbh->fb);
- drm_fb_helper_fini(fbh);
-
priv->fbdev = NULL;
}
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 9f2356b..887816f 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -172,8 +172,9 @@ armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
obj->dev_addr = obj->linear->start;
}
- DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
- obj, obj->phys_addr, obj->dev_addr);
+ DRM_DEBUG_DRIVER("obj %p phys %#llx dev %#llx\n", obj,
+ (unsigned long long)obj->phys_addr,
+ (unsigned long long)obj->dev_addr);
return 0;
}
@@ -557,7 +558,6 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
* refcount on the gem object itself.
*/
drm_gem_object_reference(obj);
- dma_buf_put(buf);
return obj;
}
}
@@ -573,6 +573,7 @@ armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
}
dobj->obj.import_attach = attach;
+ get_dma_buf(buf);
/*
* Don't call dma_buf_map_attachment() here - it maps the
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 0a1e4a5..8835dcd 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -68,6 +68,8 @@
#define EDID_QUIRK_DETAILED_SYNC_PP (1 << 6)
/* Force reduced-blanking timings for detailed modes */
#define EDID_QUIRK_FORCE_REDUCED_BLANKING (1 << 7)
+/* Force 8bpc */
+#define EDID_QUIRK_FORCE_8BPC (1 << 8)
struct detailed_mode_closure {
struct drm_connector *connector;
@@ -128,6 +130,9 @@ static struct edid_quirk {
/* Medion MD 30217 PG */
{ "MED", 0x7b8, EDID_QUIRK_PREFER_LARGE_75 },
+
+ /* Panel in Samsung NP700G7A-S01PL notebook reports 6bpc */
+ { "SEC", 0xd033, EDID_QUIRK_FORCE_8BPC },
};
/*
@@ -3435,6 +3440,9 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid)
drm_add_display_info(edid, &connector->display_info);
+ if (quirks & EDID_QUIRK_FORCE_8BPC)
+ connector->display_info.bpc = 8;
+
return num_modes;
}
EXPORT_SYMBOL(drm_add_edid_modes);
diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
index f53d524..66dd3a0 100644
--- a/drivers/gpu/drm/drm_stub.c
+++ b/drivers/gpu/drm/drm_stub.c
@@ -566,11 +566,11 @@ err_unload:
if (dev->driver->unload)
dev->driver->unload(dev);
err_primary_node:
- drm_put_minor(dev->primary);
+ drm_unplug_minor(dev->primary);
err_render_node:
- drm_put_minor(dev->render);
+ drm_unplug_minor(dev->render);
err_control_node:
- drm_put_minor(dev->control);
+ drm_unplug_minor(dev->control);
err_agp:
if (dev->driver->bus->agp_destroy)
dev->driver->bus->agp_destroy(dev);
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 0cab2d0..5c64842 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -83,6 +83,14 @@ void i915_update_dri1_breadcrumb(struct drm_device *dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
+ /*
+ * The dri breadcrumb update races against the drm master disappearing.
+ * Instead of trying to fix this (this is by far not the only ums issue)
+ * just don't do the update in kms mode.
+ */
+ if (drm_core_check_feature(dev, DRIVER_MODESET))
+ return;
+
if (dev->primary->master) {
master_priv = dev->primary->master->driver_priv;
if (master_priv->sarea_priv)
@@ -1490,16 +1498,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
spin_lock_init(&dev_priv->uncore.lock);
spin_lock_init(&dev_priv->mm.object_stat_lock);
mutex_init(&dev_priv->dpio_lock);
- mutex_init(&dev_priv->rps.hw_lock);
mutex_init(&dev_priv->modeset_restore_lock);
- mutex_init(&dev_priv->pc8.lock);
- dev_priv->pc8.requirements_met = false;
- dev_priv->pc8.gpu_idle = false;
- dev_priv->pc8.irqs_disabled = false;
- dev_priv->pc8.enabled = false;
- dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
- INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
+ intel_pm_setup(dev);
intel_display_crc_init(dev);
@@ -1603,7 +1604,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
}
intel_irq_init(dev);
- intel_pm_init(dev);
intel_uncore_sanitize(dev);
/* Try to make sure MCHBAR is enabled before poking at it */
@@ -1848,8 +1848,10 @@ void i915_driver_lastclose(struct drm_device * dev)
void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
{
+ mutex_lock(&dev->struct_mutex);
i915_gem_context_close(dev, file_priv);
i915_gem_release(dev, file_priv);
+ mutex_unlock(&dev->struct_mutex);
}
void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 2e367a1..5b7b7e0 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -651,6 +651,7 @@ static int __i915_drm_thaw(struct drm_device *dev, bool restore_gtt_mappings)
intel_modeset_init_hw(dev);
drm_modeset_lock_all(dev);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, true);
drm_modeset_unlock_all(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index ccdbecc..90fcccb 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1755,8 +1755,13 @@ struct drm_i915_file_private {
#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
#define IS_HSW_EARLY_SDV(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0C00)
-#define IS_ULT(dev) (IS_HASWELL(dev) && \
+#define IS_BDW_ULT(dev) (IS_BROADWELL(dev) && \
+ (((dev)->pdev->device & 0xf) == 0x2 || \
+ ((dev)->pdev->device & 0xf) == 0x6 || \
+ ((dev)->pdev->device & 0xf) == 0xe))
+#define IS_HSW_ULT(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0xFF00) == 0x0A00)
+#define IS_ULT(dev) (IS_HSW_ULT(dev) || IS_BDW_ULT(dev))
#define IS_HSW_GT3(dev) (IS_HASWELL(dev) && \
((dev)->pdev->device & 0x00F0) == 0x0020)
#define IS_PRELIMINARY_HW(intel_info) ((intel_info)->is_preliminary)
@@ -1901,9 +1906,7 @@ void i915_queue_hangcheck(struct drm_device *dev);
void i915_handle_error(struct drm_device *dev, bool wedged);
extern void intel_irq_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_hpd_init(struct drm_device *dev);
-extern void intel_pm_init(struct drm_device *dev);
extern void intel_uncore_sanitize(struct drm_device *dev);
extern void intel_uncore_early_sanitize(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 621c7c6..76d3d1a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request);
}
-static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
- struct intel_ring_buffer *ring)
+static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
{
- u32 completed_seqno;
- u32 acthd;
+ u32 completed_seqno = ring->get_seqno(ring, false);
+ u32 acthd = intel_ring_get_active_head(ring);
+ struct drm_i915_gem_request *request;
+
+ list_for_each_entry(request, &ring->request_list, list) {
+ if (i915_seqno_passed(completed_seqno, request->seqno))
+ continue;
- acthd = intel_ring_get_active_head(ring);
- completed_seqno = ring->get_seqno(ring, false);
+ i915_set_reset_status(ring, request, acthd);
+ }
+}
+static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
+ struct intel_ring_buffer *ring)
+{
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request,
list);
- if (request->seqno > completed_seqno)
- i915_set_reset_status(ring, request, acthd);
-
i915_gem_free_request(request);
}
@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
struct intel_ring_buffer *ring;
int i;
+ /*
+ * Before we free the objects from the requests, we need to inspect
+ * them for finding the guilty party. As the requests only borrow
+ * their reference to the objects, the inspection must be done first.
+ */
+ for_each_ring(ring, dev_priv, i)
+ i915_gem_reset_ring_status(dev_priv, ring);
+
for_each_ring(ring, dev_priv, i)
- i915_gem_reset_ring_lists(dev_priv, ring);
+ i915_gem_reset_ring_cleanup(dev_priv, ring);
i915_gem_cleanup_ringbuffer(dev);
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 72a3df3..b0f42b9 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -347,10 +347,8 @@ void i915_gem_context_close(struct drm_device *dev, struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
idr_for_each(&file_priv->context_idr, context_idr_cleanup, NULL);
idr_destroy(&file_priv->context_idr);
- mutex_unlock(&dev->struct_mutex);
}
static struct i915_hw_context *
@@ -423,11 +421,21 @@ static int do_switch(struct i915_hw_context *to)
if (ret)
return ret;
- /* Clear this page out of any CPU caches for coherent swap-in/out. Note
+ /*
+ * Pin can switch back to the default context if we end up calling into
+ * evict_everything - as a last ditch gtt defrag effort that also
+ * switches to the default context. Hence we need to reload from here.
+ */
+ from = ring->last_context;
+
+ /*
+ * Clear this page out of any CPU caches for coherent swap-in/out. Note
* that thanks to write = false in this call and us not setting any gpu
* write domains when putting a context object onto the active list
* (when switching away from it), this won't block.
- * XXX: We need a real interface to do this instead of trickery. */
+ *
+ * XXX: We need a real interface to do this instead of trickery.
+ */
ret = i915_gem_object_set_to_gtt_domain(to->obj, false);
if (ret) {
i915_gem_object_unpin(to->obj);
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index b737653..8f3adc7 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -88,6 +88,7 @@ i915_gem_evict_something(struct drm_device *dev, struct i915_address_space *vm,
} else
drm_mm_init_scan(&vm->mm, min_size, alignment, cache_level);
+search_again:
/* First see if there is a large enough contiguous idle region... */
list_for_each_entry(vma, &vm->inactive_list, mm_list) {
if (mark_free(vma, &unwind_list))
@@ -115,10 +116,17 @@ none:
list_del_init(&vma->exec_list);
}
- /* We expect the caller to unpin, evict all and try again, or give up.
- * So calling i915_gem_evict_vm() is unnecessary.
+ /* Can we unpin some objects such as idle hw contents,
+ * or pending flips?
*/
- return -ENOSPC;
+ ret = nonblocking ? -ENOSPC : i915_gpu_idle(dev);
+ if (ret)
+ return ret;
+
+ /* Only idle the GPU and repeat the search once */
+ i915_gem_retire_requests(dev);
+ nonblocking = true;
+ goto search_again;
found:
/* drm_mm doesn't allow any other other operations while
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b7e787f..a3ba9a8 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
{
struct drm_i915_gem_object *obj;
struct list_head objects;
- int i, ret = 0;
+ int i, ret;
INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock);
@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i);
ret = -ENOENT;
- goto out;
+ goto err;
}
if (!list_empty(&obj->obj_exec_link)) {
@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i);
ret = -EINVAL;
- goto out;
+ goto err;
}
drm_gem_object_reference(&obj->base);
@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
spin_unlock(&file->table_lock);
i = 0;
- list_for_each_entry(obj, &objects, obj_exec_link) {
+ while (!list_empty(&objects)) {
struct i915_vma *vma;
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ obj_exec_link);
+
/*
* NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with
@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma);
- goto out;
+ goto err;
}
+ /* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas);
+ list_del_init(&obj->obj_exec_link);
vma->exec_entry = &exec[i];
if (eb->and < 0) {
@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
++i;
}
+ return 0;
+
-out:
+err:
while (!list_empty(&objects)) {
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
list_del_init(&obj->obj_exec_link);
- if (ret)
- drm_gem_object_unreference(&obj->base);
+ drm_gem_object_unreference(&obj->base);
}
+ /*
+ * Objects already transfered to the vmas list will be unreferenced by
+ * eb_destroy.
+ */
+
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index 38cb8d4..d3c3b5b 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -337,8 +337,8 @@ static void gen8_ppgtt_cleanup(struct i915_address_space *vm)
kfree(ppgtt->gen8_pt_dma_addr[i]);
}
- __free_pages(ppgtt->gen8_pt_pages, ppgtt->num_pt_pages << PAGE_SHIFT);
- __free_pages(ppgtt->pd_pages, ppgtt->num_pd_pages << PAGE_SHIFT);
+ __free_pages(ppgtt->gen8_pt_pages, get_order(ppgtt->num_pt_pages << PAGE_SHIFT));
+ __free_pages(ppgtt->pd_pages, get_order(ppgtt->num_pd_pages << PAGE_SHIFT));
}
/**
@@ -906,14 +906,12 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
WARN_ON(readq(&gtt_entries[i-1])
!= gen8_pte_encode(addr, level, true));
-#if 0 /* TODO: Still needed on GEN8? */
/* This next bit makes the above posting read even more important. We
* want to flush the TLBs only after we're certain all the PTE updates
* have finished.
*/
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
-#endif
}
/*
@@ -1241,6 +1239,11 @@ static inline unsigned int gen8_get_total_gtt_size(u16 bdw_gmch_ctl)
bdw_gmch_ctl &= BDW_GMCH_GGMS_MASK;
if (bdw_gmch_ctl)
bdw_gmch_ctl = 1 << bdw_gmch_ctl;
+ if (bdw_gmch_ctl > 4) {
+ WARN_ON(!i915_preliminary_hw_support);
+ return 4<<20;
+ }
+
return bdw_gmch_ctl << 20;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 080f6fd..769b864 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
uint32_t val;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
- WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
+ WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
@@ -9135,7 +9135,7 @@ intel_pipe_config_compare(struct drm_device *dev,
if (IS_G4X(dev) || INTEL_INFO(dev)->gen >= 5)
PIPE_CONF_CHECK_I(pipe_bpp);
- if (!IS_HASWELL(dev)) {
+ if (!HAS_DDI(dev)) {
PIPE_CONF_CHECK_CLOCK_FUZZY(adjusted_mode.crtc_clock);
PIPE_CONF_CHECK_CLOCK_FUZZY(port_clock);
}
@@ -10541,11 +10541,20 @@ static struct intel_quirk intel_quirks[] = {
/* Sony Vaio Y cannot use SSC on LVDS */
{ 0x0046, 0x104d, 0x9076, quirk_ssc_force_disable },
- /*
- * All GM45 Acer (and its brands eMachines and Packard Bell) laptops
- * seem to use inverted backlight PWM.
- */
- { 0x2a42, 0x1025, PCI_ANY_ID, quirk_invert_brightness },
+ /* Acer Aspire 5734Z must invert backlight brightness */
+ { 0x2a42, 0x1025, 0x0459, quirk_invert_brightness },
+
+ /* Acer/eMachines G725 */
+ { 0x2a42, 0x1025, 0x0210, quirk_invert_brightness },
+
+ /* Acer/eMachines e725 */
+ { 0x2a42, 0x1025, 0x0212, quirk_invert_brightness },
+
+ /* Acer/Packard Bell NCL20 */
+ { 0x2a42, 0x1025, 0x034b, quirk_invert_brightness },
+
+ /* Acer Aspire 4736Z */
+ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness },
/* Dell XPS13 HD Sandy Bridge */
{ 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable },
@@ -11036,8 +11045,6 @@ void intel_modeset_setup_hw_state(struct drm_device *dev,
}
intel_modeset_check_state(dev);
-
- drm_mode_config_reset(dev);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -11046,7 +11053,10 @@ void intel_modeset_gem_init(struct drm_device *dev)
intel_setup_overlay(dev);
+ drm_modeset_lock_all(dev);
+ drm_mode_config_reset(dev);
intel_modeset_setup_hw_state(dev, false);
+ drm_modeset_unlock_all(dev);
}
void intel_modeset_cleanup(struct drm_device *dev)
@@ -11125,14 +11135,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl;
- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl);
+ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
+ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0;
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index a18e88b..79f91f2 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -821,6 +821,7 @@ void intel_update_sprite_watermarks(struct drm_plane *plane,
uint32_t sprite_width, int pixel_size,
bool enabled, bool scaled);
void intel_init_pm(struct drm_device *dev);
+void intel_pm_setup(struct drm_device *dev);
bool intel_fbc_enabled(struct drm_device *dev);
void intel_update_fbc(struct drm_device *dev);
void intel_gpu_ips_init(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index f161ac0..e6f782d 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -451,7 +451,9 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
spin_lock_irqsave(&dev_priv->backlight.lock, flags);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_BROADWELL(dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ } else if (HAS_PCH_SPLIT(dev)) {
val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
} else {
if (IS_VALLEYVIEW(dev))
@@ -479,6 +481,13 @@ static u32 intel_panel_get_backlight(struct drm_device *dev,
return val;
}
+static void intel_bdw_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+}
+
static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -496,7 +505,9 @@ static void intel_panel_actually_set_backlight(struct drm_device *dev,
DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
level = intel_panel_compute_brightness(dev, pipe, level);
- if (HAS_PCH_SPLIT(dev))
+ if (IS_BROADWELL(dev))
+ return intel_bdw_panel_set_backlight(dev, level);
+ else if (HAS_PCH_SPLIT(dev))
return intel_pch_panel_set_backlight(dev, level);
if (is_backlight_combination_mode(dev)) {
@@ -666,7 +677,16 @@ void intel_panel_enable_backlight(struct intel_connector *connector)
POSTING_READ(reg);
I915_WRITE(reg, tmp | BLM_PWM_ENABLE);
- if (HAS_PCH_SPLIT(dev) &&
+ if (IS_BROADWELL(dev)) {
+ /*
+ * Broadwell requires PCH override to drive the PCH
+ * backlight pin. The above will configure the CPU
+ * backlight pin, which we don't plan to use.
+ */
+ tmp = I915_READ(BLC_PWM_PCH_CTL1);
+ tmp |= BLM_PCH_OVERRIDE_ENABLE | BLM_PCH_PWM_ENABLE;
+ I915_WRITE(BLC_PWM_PCH_CTL1, tmp);
+ } else if (HAS_PCH_SPLIT(dev) &&
!(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) {
tmp = I915_READ(BLC_PWM_PCH_CTL1);
tmp |= BLM_PCH_PWM_ENABLE;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 6e0d5e0..26c29c1 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5685,8 +5685,11 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
{
struct drm_i915_private *dev_priv = dev->dev_private;
bool is_enabled, enable_requested;
+ unsigned long irqflags;
uint32_t tmp;
+ WARN_ON(dev_priv->pc8.enabled);
+
tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
@@ -5702,9 +5705,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
HSW_PWR_WELL_STATE_ENABLED), 20))
DRM_ERROR("Timeout enabling power well\n");
}
+
+ if (IS_BROADWELL(dev)) {
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_B),
+ dev_priv->de_irq_mask[PIPE_B]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_B),
+ ~dev_priv->de_irq_mask[PIPE_B] |
+ GEN8_PIPE_VBLANK);
+ I915_WRITE(GEN8_DE_PIPE_IMR(PIPE_C),
+ dev_priv->de_irq_mask[PIPE_C]);
+ I915_WRITE(GEN8_DE_PIPE_IER(PIPE_C),
+ ~dev_priv->de_irq_mask[PIPE_C] |
+ GEN8_PIPE_VBLANK);
+ POSTING_READ(GEN8_DE_PIPE_IER(PIPE_C));
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
} else {
if (enable_requested) {
- unsigned long irqflags;
enum pipe p;
I915_WRITE(HSW_PWR_WELL_DRIVER, 0);
@@ -5731,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well)
{
- if (!power_well->count++)
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (!power_well->count++) {
+ hsw_disable_package_c8(dev_priv);
__intel_set_power_well(dev, true);
+ }
}
static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
WARN_ON(!power_well->count);
- if (!--power_well->count && i915_disable_power_well)
+ if (!--power_well->count && i915_disable_power_well) {
__intel_set_power_well(dev, false);
+ hsw_enable_package_c8(dev_priv);
+ }
}
void intel_display_power_get(struct drm_device *dev,
@@ -6130,10 +6156,19 @@ int vlv_freq_opcode(int ddr_freq, int val)
return val;
}
-void intel_pm_init(struct drm_device *dev)
+void intel_pm_setup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ mutex_init(&dev_priv->rps.hw_lock);
+
+ mutex_init(&dev_priv->pc8.lock);
+ dev_priv->pc8.requirements_met = false;
+ dev_priv->pc8.gpu_idle = false;
+ dev_priv->pc8.irqs_disabled = false;
+ dev_priv->pc8.enabled = false;
+ dev_priv->pc8.disable_count = 2; /* requirements_met + gpu_idle */
+ INIT_DELAYED_WORK(&dev_priv->pc8.enable_work, hsw_enable_pc8_work);
INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work,
intel_gen6_powersave_work);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b620337..c2f09d45 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -965,6 +965,7 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
} else if (IS_GEN6(ring->dev)) {
mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
} else {
+ /* XXX: gen8 returns to sanity */
mmio = RING_HWS_PGA(ring->mmio_base);
}
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 0b02078..25cbe07 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -784,6 +784,7 @@ static int gen6_do_reset(struct drm_device *dev)
int intel_gpu_reset(struct drm_device *dev)
{
switch (INTEL_INFO(dev)->gen) {
+ case 8:
case 7:
case 6: return gen6_do_reset(dev);
case 5: return ironlake_do_reset(dev);
diff --git a/drivers/gpu/drm/nouveau/core/core/subdev.c b/drivers/gpu/drm/nouveau/core/core/subdev.c
index 48f0637..2ea5568 100644
--- a/drivers/gpu/drm/nouveau/core/core/subdev.c
+++ b/drivers/gpu/drm/nouveau/core/core/subdev.c
@@ -104,11 +104,8 @@ nouveau_subdev_create_(struct nouveau_object *parent,
if (parent) {
struct nouveau_device *device = nv_device(parent);
- int subidx = nv_hclass(subdev) & 0xff;
-
subdev->debug = nouveau_dbgopt(device->dbgopt, subname);
subdev->mmio = nv_subdev(device)->mmio;
- device->subdev[subidx] = *pobject;
}
return 0;
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/base.c b/drivers/gpu/drm/nouveau/core/engine/device/base.c
index 9135b25a..dd01c6c 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/base.c
@@ -268,6 +268,8 @@ nouveau_devobj_ctor(struct nouveau_object *parent,
if (ret)
return ret;
+ device->subdev[i] = devobj->subdev[i];
+
/* note: can't init *any* subdevs until devinit has been run
* due to not knowing exactly what the vbios init tables will
* mess with. devinit also can't be run until all of its
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index 8d06eef..dbc5e33 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -161,7 +161,7 @@ nvc0_identify(struct nouveau_device *device)
device->oclass[NVDEV_SUBDEV_THERM ] = &nva3_therm_oclass;
device->oclass[NVDEV_SUBDEV_MXM ] = &nv50_mxm_oclass;
device->oclass[NVDEV_SUBDEV_DEVINIT] = &nvc0_devinit_oclass;
- device->oclass[NVDEV_SUBDEV_MC ] = nvc3_mc_oclass;
+ device->oclass[NVDEV_SUBDEV_MC ] = nvc0_mc_oclass;
device->oclass[NVDEV_SUBDEV_BUS ] = nvc0_bus_oclass;
device->oclass[NVDEV_SUBDEV_TIMER ] = &nv04_timer_oclass;
device->oclass[NVDEV_SUBDEV_FB ] = nvc0_fb_oclass;
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
index 434bb4b..5c8a63d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
@@ -334,7 +334,7 @@ nvc0_graph_mthd(struct nvc0_graph_priv *priv, struct nvc0_graph_mthd *mthds)
while ((mthd = &mthds[i++]) && (init = mthd->init)) {
u32 addr = 0x80000000 | mthd->oclass;
for (data = 0; init->count; init++) {
- if (data != init->data) {
+ if (init == mthd->init || data != init->data) {
nv_wr32(priv, 0x40448c, init->data);
data = init->data;
}
diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
index 8541aa3..d89dbdf 100644
--- a/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
+++ b/drivers/gpu/drm/nouveau/core/include/subdev/fb.h
@@ -75,6 +75,11 @@ struct nouveau_fb {
static inline struct nouveau_fb *
nouveau_fb(void *obj)
{
+ /* fbram uses this before device subdev pointer is valid */
+ if (nv_iclass(obj, NV_SUBDEV_CLASS) &&
+ nv_subidx(obj) == NVDEV_SUBDEV_FB)
+ return obj;
+
return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_FB];
}
diff --git a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
index 420908c..df1b1b4 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/bios/init.c
@@ -365,13 +365,13 @@ static u16
init_script(struct nouveau_bios *bios, int index)
{
struct nvbios_init init = { .bios = bios };
- u16 data;
+ u16 bmp_ver = bmp_version(bios), data;
- if (bmp_version(bios) && bmp_version(bios) < 0x0510) {
- if (index > 1)
+ if (bmp_ver && bmp_ver < 0x0510) {
+ if (index > 1 || bmp_ver < 0x0100)
return 0x0000;
- data = bios->bmp_offset + (bios->version.major < 2 ? 14 : 18);
+ data = bios->bmp_offset + (bmp_ver < 0x0200 ? 14 : 18);
return nv_ro16(bios, data + (index * 2));
}
@@ -1294,7 +1294,11 @@ init_jump(struct nvbios_init *init)
u16 offset = nv_ro16(bios, init->offset + 1);
trace("JUMP\t0x%04x\n", offset);
- init->offset = offset;
+
+ if (init_exec(init))
+ init->offset = offset;
+ else
+ init->offset += 3;
}
/**
diff --git a/drivers/gpu/drm/nouveau/nouveau_abi16.c b/drivers/gpu/drm/nouveau/nouveau_abi16.c
index 6828d81..900fae0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_abi16.c
+++ b/drivers/gpu/drm/nouveau/nouveau_abi16.c
@@ -447,6 +447,8 @@ nouveau_abi16_ioctl_notifierobj_alloc(ABI16_IOCTL_ARGS)
if (ret)
goto done;
+ info->offset = ntfy->node->offset;
+
done:
if (ret)
nouveau_abi16_ntfy_fini(chan, ntfy);
diff --git a/drivers/gpu/drm/nouveau/nouveau_acpi.c b/drivers/gpu/drm/nouveau/nouveau_acpi.c
index 95c7404..ba0183f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_acpi.c
+++ b/drivers/gpu/drm/nouveau/nouveau_acpi.c
@@ -51,6 +51,7 @@ static struct nouveau_dsm_priv {
bool dsm_detected;
bool optimus_detected;
acpi_handle dhandle;
+ acpi_handle other_handle;
acpi_handle rom_handle;
} nouveau_dsm_priv;
@@ -260,9 +261,10 @@ static int nouveau_dsm_pci_probe(struct pci_dev *pdev)
if (!dhandle)
return false;
- if (!acpi_has_method(dhandle, "_DSM"))
+ if (!acpi_has_method(dhandle, "_DSM")) {
+ nouveau_dsm_priv.other_handle = dhandle;
return false;
-
+ }
if (nouveau_test_dsm(dhandle, nouveau_dsm, NOUVEAU_DSM_POWER))
retval |= NOUVEAU_DSM_HAS_MUX;
@@ -338,6 +340,16 @@ static bool nouveau_dsm_detect(void)
printk(KERN_INFO "VGA switcheroo: detected DSM switching method %s handle\n",
acpi_method_name);
nouveau_dsm_priv.dsm_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when _DSM is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(nouveau_dsm_priv.dhandle);
+ acpi_bus_no_hotplug(nouveau_dsm_priv.other_handle);
ret = true;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 29c3efd..25ea82f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -610,7 +610,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
ret = nouveau_fence_sync(fence, chan);
nouveau_fence_unref(&fence);
if (ret)
- return ret;
+ goto fail_free;
if (new_bo != old_bo) {
ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 7a3759f..98a22e6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -858,6 +858,12 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
if (nouveau_runtime_pm == 0)
return -EINVAL;
+ /* are we optimus enabled? */
+ if (nouveau_runtime_pm == -1 && !nouveau_is_optimus() && !nouveau_is_v1_dsm()) {
+ DRM_DEBUG_DRIVER("failing to power off - not optimus\n");
+ return -EINVAL;
+ }
+
nv_debug_level(SILENT);
drm_kms_helper_poll_disable(drm_dev);
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_OFF);
diff --git a/drivers/gpu/drm/qxl/Kconfig b/drivers/gpu/drm/qxl/Kconfig
index 037d324..66ac0ff 100644
--- a/drivers/gpu/drm/qxl/Kconfig
+++ b/drivers/gpu/drm/qxl/Kconfig
@@ -8,5 +8,6 @@ config DRM_QXL
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select DRM_TTM
+ select CRC32
help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 5e827c2..d70aafb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -24,7 +24,7 @@
*/
-#include "linux/crc32.h"
+#include <linux/crc32.h>
#include "qxl_drv.h"
#include "qxl_object.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 80a2012..0b9621c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1143,31 +1143,53 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
}
if (tiling_flags & RADEON_TILING_MACRO) {
- if (rdev->family >= CHIP_BONAIRE)
- tmp = rdev->config.cik.tile_config;
- else if (rdev->family >= CHIP_TAHITI)
- tmp = rdev->config.si.tile_config;
- else if (rdev->family >= CHIP_CAYMAN)
- tmp = rdev->config.cayman.tile_config;
- else
- tmp = rdev->config.evergreen.tile_config;
+ evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
- switch ((tmp & 0xf0) >> 4) {
- case 0: /* 4 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
- break;
- case 1: /* 8 banks */
- default:
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
- break;
- case 2: /* 16 banks */
- fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
- break;
+ /* Set NUM_BANKS. */
+ if (rdev->family >= CHIP_BONAIRE) {
+ unsigned tileb, index, num_banks, tile_split_bytes;
+
+ /* Calculate the macrotile mode index. */
+ tile_split_bytes = 64 << tile_split;
+ tileb = 8 * 8 * target_fb->bits_per_pixel / 8;
+ tileb = min(tile_split_bytes, tileb);
+
+ for (index = 0; tileb > 64; index++) {
+ tileb >>= 1;
+ }
+
+ if (index >= 16) {
+ DRM_ERROR("Wrong screen bpp (%u) or tile split (%u)\n",
+ target_fb->bits_per_pixel, tile_split);
+ return -EINVAL;
+ }
+
+ num_banks = (rdev->config.cik.macrotile_mode_array[index] >> 6) & 0x3;
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(num_banks);
+ } else {
+ /* SI and older. */
+ if (rdev->family >= CHIP_TAHITI)
+ tmp = rdev->config.si.tile_config;
+ else if (rdev->family >= CHIP_CAYMAN)
+ tmp = rdev->config.cayman.tile_config;
+ else
+ tmp = rdev->config.evergreen.tile_config;
+
+ switch ((tmp & 0xf0) >> 4) {
+ case 0: /* 4 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_4_BANK);
+ break;
+ case 1: /* 8 banks */
+ default:
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_8_BANK);
+ break;
+ case 2: /* 16 banks */
+ fb_format |= EVERGREEN_GRPH_NUM_BANKS(EVERGREEN_ADDR_SURF_16_BANK);
+ break;
+ }
}
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1);
-
- evergreen_tiling_fields(tiling_flags, &bankw, &bankh, &mtaspect, &tile_split);
fb_format |= EVERGREEN_GRPH_TILE_SPLIT(tile_split);
fb_format |= EVERGREEN_GRPH_BANK_WIDTH(bankw);
fb_format |= EVERGREEN_GRPH_BANK_HEIGHT(bankh);
@@ -1180,23 +1202,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
if (rdev->family >= CHIP_BONAIRE) {
- u32 num_pipe_configs = rdev->config.cik.max_tile_pipes;
- u32 num_rb = rdev->config.cik.max_backends_per_se;
- if (num_pipe_configs > 8)
- num_pipe_configs = 8;
- if (num_pipe_configs == 8)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P8_32x32_16x16);
- else if (num_pipe_configs == 4) {
- if (num_rb == 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_16x16);
- else if (num_rb < 4)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P4_8x16);
- } else if (num_pipe_configs == 2)
- fb_format |= CIK_GRPH_PIPE_CONFIG(CIK_ADDR_SURF_P2);
+ /* Read the pipe config from the 2D TILED SCANOUT mode.
+ * It should be the same for the other modes too, but not all
+ * modes set the pipe config field. */
+ u32 pipe_config = (rdev->config.cik.tile_mode_array[10] >> 6) & 0x1f;
+
+ fb_format |= CIK_GRPH_PIPE_CONFIG(pipe_config);
} else if ((rdev->family == CHIP_TAHITI) ||
(rdev->family == CHIP_PITCAIRN))
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
- else if (rdev->family == CHIP_VERDE)
+ else if ((rdev->family == CHIP_VERDE) ||
+ (rdev->family == CHIP_OLAND) ||
+ (rdev->family == CHIP_HAINAN)) /* for completeness. HAINAN has no display hw */
fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
switch (radeon_crtc->crtc_id) {
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index b43a3a3..e950fab 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -3057,7 +3057,7 @@ static u32 cik_create_bitmask(u32 bit_width)
* Returns the disabled RB bitmask.
*/
static u32 cik_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -3071,7 +3071,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = cik_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = cik_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
@@ -3088,7 +3088,7 @@ static u32 cik_get_rb_disabled(struct radeon_device *rdev,
*/
static void cik_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -3098,7 +3098,7 @@ static void cik_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
cik_select_se_sh(rdev, i, j);
- data = cik_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = cik_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
if (rdev->family == CHIP_HAWAII)
disabled_rbs |= data << ((i * sh_per_se + j) * HAWAII_RB_BITMAP_WIDTH_PER_SH);
else
@@ -3108,12 +3108,14 @@ static void cik_setup_rb(struct radeon_device *rdev,
cik_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.cik.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
cik_select_se_sh(rdev, i, 0xffffffff);
data = 0;
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index 0300727..d08b83c 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -458,7 +458,7 @@ int cik_copy_dma(struct radeon_device *rdev,
radeon_ring_write(ring, 0); /* src/dst endian swap */
radeon_ring_write(ring, src_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(src_offset) & 0xffffffff);
- radeon_ring_write(ring, dst_offset & 0xfffffffc);
+ radeon_ring_write(ring, dst_offset & 0xffffffff);
radeon_ring_write(ring, upper_32_bits(dst_offset) & 0xffffffff);
src_offset += cur_size_in_bytes;
dst_offset += cur_size_in_bytes;
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index de86493..713a5d3 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.enabled = true;
if (ASIC_IS_DCE8(rdev))
- rdev->audio.num_pins = 7;
+ rdev->audio.num_pins = 6;
+ else if (ASIC_IS_DCE61(rdev))
+ rdev->audio.num_pins = 4;
else
rdev->audio.num_pins = 6;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index aa695c4..0c6d5ce 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return;
}
@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
}
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
- if (sad_count < 0) {
+ if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return;
}
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index 11aab2a..f59a9e9 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) ||
@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2;
+ rdev->config.cayman.max_hw_contexts = 8;
+ rdev->config.cayman.sx_max_export_size = 256;
+ rdev->config.cayman.sx_max_export_pos_size = 64;
+ rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) ||
@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
} else {
rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1;
+ rdev->config.cayman.max_hw_contexts = 4;
+ rdev->config.cayman.sx_max_export_size = 128;
+ rdev->config.cayman.sx_max_export_pos_size = 32;
+ rdev->config.cayman.sx_max_export_smx_size = 96;
}
rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256;
@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8;
- rdev->config.cayman.sx_max_export_size = 256;
- rdev->config.cayman.sx_max_export_pos_size = 64;
- rdev->config.cayman.sx_max_export_smx_size = 192;
- rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40;
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index b1f990d..45e1f44 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1940,7 +1940,7 @@ struct si_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
@@ -1970,7 +1970,7 @@ struct cik_asic {
unsigned sc_earlyz_tile_fifo_size;
unsigned num_tile_pipes;
- unsigned num_backends_per_se;
+ unsigned backend_enable_mask;
unsigned backend_disable_mask_per_asic;
unsigned backend_map;
unsigned num_texture_channel_caches;
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index e354ce9..c0425bb 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -2021,7 +2021,7 @@ static struct radeon_asic ci_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
@@ -2122,7 +2122,7 @@ static struct radeon_asic kv_asic = {
.hdmi_setmode = &evergreen_hdmi_setmode,
},
.copy = {
- .blit = NULL,
+ .blit = &cik_copy_cpdma,
.blit_ring_index = RADEON_RING_TYPE_GFX_INDEX,
.dma = &cik_copy_dma,
.dma_ring_index = R600_RING_TYPE_DMA_INDEX,
diff --git a/drivers/gpu/drm/radeon/radeon_atpx_handler.c b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
index 9d302ea..485848f 100644
--- a/drivers/gpu/drm/radeon/radeon_atpx_handler.c
+++ b/drivers/gpu/drm/radeon/radeon_atpx_handler.c
@@ -33,6 +33,7 @@ static struct radeon_atpx_priv {
bool atpx_detected;
/* handle for device - and atpx */
acpi_handle dhandle;
+ acpi_handle other_handle;
struct radeon_atpx atpx;
} radeon_atpx_priv;
@@ -451,9 +452,10 @@ static bool radeon_atpx_pci_probe_handle(struct pci_dev *pdev)
return false;
status = acpi_get_handle(dhandle, "ATPX", &atpx_handle);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ radeon_atpx_priv.other_handle = dhandle;
return false;
-
+ }
radeon_atpx_priv.dhandle = dhandle;
radeon_atpx_priv.atpx.handle = atpx_handle;
return true;
@@ -530,6 +532,16 @@ static bool radeon_atpx_detect(void)
printk(KERN_INFO "VGA switcheroo: detected switching method %s handle\n",
acpi_method_name);
radeon_atpx_priv.atpx_detected = true;
+ /*
+ * On some systems hotplug events are generated for the device
+ * being switched off when ATPX is executed. They cause ACPI
+ * hotplug to trigger and attempt to remove the device from
+ * the system, which causes it to break down. Prevent that from
+ * happening by setting the no_hotplug flag for the involved
+ * ACPI device objects.
+ */
+ acpi_bus_no_hotplug(radeon_atpx_priv.dhandle);
+ acpi_bus_no_hotplug(radeon_atpx_priv.other_handle);
return true;
}
return false;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 9f5ff28..db39ea3 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -77,9 +77,10 @@
* 2.33.0 - Add SI tiling mode array query
* 2.34.0 - Add CIK tiling mode array query
* 2.35.0 - Add CIK macrotile mode array query
+ * 2.36.0 - Fix CIK DCE tiling setup
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 35
+#define KMS_DRIVER_MINOR 36
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -508,15 +509,6 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-
-static void
-radeon_pci_shutdown(struct pci_dev *pdev)
-{
- struct drm_device *dev = pci_get_drvdata(pdev);
-
- radeon_driver_unload_kms(dev);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
DRIVER_USE_AGP |
@@ -586,7 +578,6 @@ static struct pci_driver radeon_kms_pci_driver = {
.probe = radeon_pci_probe,
.remove = radeon_pci_remove,
.driver.pm = &radeon_pm_ops,
- .shutdown = radeon_pci_shutdown,
};
static int __init radeon_init(void)
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 55d0b47..21d593c 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -461,6 +461,15 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
case RADEON_INFO_SI_CP_DMA_COMPUTE:
*value = 1;
break;
+ case RADEON_INFO_SI_BACKEND_ENABLED_MASK:
+ if (rdev->family >= CHIP_BONAIRE) {
+ *value = rdev->config.cik.backend_enable_mask;
+ } else if (rdev->family >= CHIP_TAHITI) {
+ *value = rdev->config.si.backend_enable_mask;
+ } else {
+ DRM_DEBUG_KMS("BACKEND_ENABLED_MASK is si+ only!\n");
+ }
+ break;
default:
DRM_DEBUG_KMS("Invalid request %d\n", info->request);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index dc75bb6..984097b 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -552,8 +552,7 @@ static ssize_t radeon_hwmon_show_temp_thresh(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
int hyst = to_sensor_dev_attr(attr)->index;
int temp;
@@ -580,8 +579,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
struct attribute *attr, int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
- struct drm_device *ddev = dev_get_drvdata(dev);
- struct radeon_device *rdev = ddev->dev_private;
+ struct radeon_device *rdev = dev_get_drvdata(dev);
/* Skip limit attributes if DPM is not enabled */
if (rdev->pm.pm_method != PM_METHOD_DPM &&
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index 373d088..b9c0529 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -473,7 +473,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p,
return -EINVAL;
}
- if ((start >> 28) != (end >> 28)) {
+ if ((start >> 28) != ((end - 1) >> 28)) {
DRM_ERROR("reloc %LX-%LX crossing 256MB boundary!\n",
start, end);
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 1c56062..e7dab06 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -162,6 +162,16 @@ static void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ /* Some boards seem to be configured for 128MB of sideport memory,
+ * but really only have 64MB. Just skip the sideport and use
+ * UMA memory.
+ */
+ if (rdev->mc.igp_sideport_enabled &&
+ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) {
+ base += 128 * 1024 * 1024;
+ rdev->mc.real_vram_size -= 128 * 1024 * 1024;
+ rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+ }
/* Use K8 direct mapping for fast fb access. */
rdev->fastfb_working = false;
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 913b025..374499d 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0);
+ /* disable ss, causes hangs on some cayman boards */
+ if (rdev->family == CHIP_CAYMAN) {
+ pi->sclk_ss = false;
+ pi->mclk_ss = false;
+ }
+
if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true;
else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index a36736d..85e1edf 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2811,7 +2811,7 @@ static void si_setup_spi(struct radeon_device *rdev,
}
static u32 si_get_rb_disabled(struct radeon_device *rdev,
- u32 max_rb_num, u32 se_num,
+ u32 max_rb_num_per_se,
u32 sh_per_se)
{
u32 data, mask;
@@ -2825,14 +2825,14 @@ static u32 si_get_rb_disabled(struct radeon_device *rdev,
data >>= BACKEND_DISABLE_SHIFT;
- mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
+ mask = si_create_bitmask(max_rb_num_per_se / sh_per_se);
return data & mask;
}
static void si_setup_rb(struct radeon_device *rdev,
u32 se_num, u32 sh_per_se,
- u32 max_rb_num)
+ u32 max_rb_num_per_se)
{
int i, j;
u32 data, mask;
@@ -2842,19 +2842,21 @@ static void si_setup_rb(struct radeon_device *rdev,
for (i = 0; i < se_num; i++) {
for (j = 0; j < sh_per_se; j++) {
si_select_se_sh(rdev, i, j);
- data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
+ data = si_get_rb_disabled(rdev, max_rb_num_per_se, sh_per_se);
disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
}
}
si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
mask = 1;
- for (i = 0; i < max_rb_num; i++) {
+ for (i = 0; i < max_rb_num_per_se * se_num; i++) {
if (!(disabled_rbs & mask))
enabled_rbs |= mask;
mask <<= 1;
}
+ rdev->config.si.backend_enable_mask = enabled_rbs;
+
for (i = 0; i < se_num; i++) {
si_select_se_sh(rdev, i, 0xffffffff);
data = 0;
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 15b86a9..4061521 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* Don't move nonexistent data. Clear destination instead.
*/
if (old_iomap == NULL &&
- (ttm == NULL || ttm->state == tt_unpopulated)) {
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
}
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index b249ab9..6440eea 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -169,9 +169,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
}
page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
- page_last = vma_pages(vma) +
- drm_vma_node_start(&bo->vma_node) - vma->vm_pgoff;
+ vma->vm_pgoff - drm_vma_node_start(&bo->vma_node);
+ page_last = vma_pages(vma) + vma->vm_pgoff -
+ drm_vma_node_start(&bo->vma_node);
if (unlikely(page_offset >= bo->num_pages)) {
retval = VM_FAULT_SIGBUS;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
index a51f48e..45d5b5a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
@@ -68,6 +68,9 @@ int vmw_getparam_ioctl(struct drm_device *dev, void *data,
SVGA_FIFO_3D_HWVERSION));
break;
}
+ case DRM_VMW_PARAM_MAX_SURF_MEMORY:
+ param->value = dev_priv->memory_size;
+ break;
default:
DRM_ERROR("Illegal vmwgfx get param request: %d\n",
param->param);
diff --git a/drivers/hid/hid-kye.c b/drivers/hid/hid-kye.c
index ecb5ca6..e776963 100644
--- a/drivers/hid/hid-kye.c
+++ b/drivers/hid/hid-kye.c
@@ -341,6 +341,7 @@ static __u8 *kye_report_fixup(struct hid_device *hdev, __u8 *rdesc,
case USB_DEVICE_ID_GENIUS_GX_IMPERATOR:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 83,
"Genius Gx Imperator Keyboard");
+ break;
case USB_DEVICE_ID_GENIUS_MANTICORE:
rdesc = kye_consumer_control_fixup(hdev, rdesc, rsize, 104,
"Genius Manticore Keyboard");
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index d87f7cb..8fab828 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -569,6 +569,8 @@ static int sensor_hub_probe(struct hid_device *hdev,
goto err_free_names;
}
sd->hid_sensor_hub_client_devs[
+ sd->hid_sensor_client_cnt].id = PLATFORM_DEVID_AUTO;
+ sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].name = name;
sd->hid_sensor_hub_client_devs[
sd->hid_sensor_client_cnt].platform_data =
diff --git a/drivers/hwmon/hih6130.c b/drivers/hwmon/hih6130.c
index 2dc37c7..7d68a08 100644
--- a/drivers/hwmon/hih6130.c
+++ b/drivers/hwmon/hih6130.c
@@ -43,6 +43,7 @@
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
* @humidity: cached humidity measurement value
+ * @write_length: length for I2C measurement request
*/
struct hih6130 {
struct device *hwmon_dev;
@@ -51,6 +52,7 @@ struct hih6130 {
unsigned long last_update;
int temperature;
int humidity;
+ size_t write_length;
};
/**
@@ -121,8 +123,15 @@ static int hih6130_update_measurements(struct i2c_client *client)
*/
if (time_after(jiffies, hih6130->last_update + HZ) || !hih6130->valid) {
- /* write to slave address, no data, to request a measurement */
- ret = i2c_master_send(client, tmp, 0);
+ /*
+ * Write to slave address to request a measurement.
+ * According with the datasheet it should be with no data, but
+ * for systems with I2C bus drivers that do not allow zero
+ * length packets we write one dummy byte to allow sensor
+ * measurements on them.
+ */
+ tmp[0] = 0;
+ ret = i2c_master_send(client, tmp, hih6130->write_length);
if (ret < 0)
goto out;
@@ -252,6 +261,9 @@ static int hih6130_probe(struct i2c_client *client,
goto fail_remove_sysfs;
}
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_QUICK))
+ hih6130->write_length = 1;
+
return 0;
fail_remove_sysfs:
diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
index 6cf6bff..a2f3b4a 100644
--- a/drivers/hwmon/lm78.c
+++ b/drivers/hwmon/lm78.c
@@ -94,6 +94,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
+ if (rpm > 1350000)
+ return 1;
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
diff --git a/drivers/hwmon/lm90.c b/drivers/hwmon/lm90.c
index 4c4c142..8b8f3aa 100644
--- a/drivers/hwmon/lm90.c
+++ b/drivers/hwmon/lm90.c
@@ -1610,12 +1610,14 @@ static int lm90_probe(struct i2c_client *client,
"lm90", client);
if (err < 0) {
dev_err(dev, "cannot request IRQ %d\n", client->irq);
- goto exit_remove_files;
+ goto exit_unregister;
}
}
return 0;
+exit_unregister:
+ hwmon_device_unregister(data->hwmon_dev);
exit_remove_files:
lm90_remove_files(client, data);
exit_restore:
diff --git a/drivers/hwmon/sis5595.c b/drivers/hwmon/sis5595.c
index 1404e63..72a8897 100644
--- a/drivers/hwmon/sis5595.c
+++ b/drivers/hwmon/sis5595.c
@@ -141,6 +141,8 @@ static inline u8 FAN_TO_REG(long rpm, int div)
{
if (rpm <= 0)
return 255;
+ if (rpm > 1350000)
+ return 1;
return clamp_val((1350000 + rpm * div / 2) / (rpm * div), 1, 254);
}
diff --git a/drivers/hwmon/vt8231.c b/drivers/hwmon/vt8231.c
index 0e70178..aee14e2 100644
--- a/drivers/hwmon/vt8231.c
+++ b/drivers/hwmon/vt8231.c
@@ -145,7 +145,7 @@ static const u8 regtempmin[] = { 0x3a, 0x3e, 0x2c, 0x2e, 0x30, 0x32 };
*/
static inline u8 FAN_TO_REG(long rpm, int div)
{
- if (rpm == 0)
+ if (rpm <= 0 || rpm > 1310720)
return 0;
return clamp_val(1310720 / (rpm * div), 1, 255);
}
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index edb06cd..6ed76ce 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -481,9 +481,11 @@ store_pwm(struct device *dev, struct device_attribute *attr,
if (err)
return err;
val = clamp_val(val, 0, 255);
+ val = DIV_ROUND_CLOSEST(val, 0x11);
mutex_lock(&data->update_lock);
- data->pwm[nr] = val;
+ data->pwm[nr] = val * 0x11;
+ val |= w83l786ng_read_value(client, W83L786NG_REG_PWM[nr]) & 0xf0;
w83l786ng_write_value(client, W83L786NG_REG_PWM[nr], val);
mutex_unlock(&data->update_lock);
return count;
@@ -510,7 +512,7 @@ store_pwm_enable(struct device *dev, struct device_attribute *attr,
mutex_lock(&data->update_lock);
reg = w83l786ng_read_value(client, W83L786NG_REG_FAN_CFG);
data->pwm_enable[nr] = val;
- reg &= ~(0x02 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
+ reg &= ~(0x03 << W83L786NG_PWM_ENABLE_SHIFT[nr]);
reg |= (val - 1) << W83L786NG_PWM_ENABLE_SHIFT[nr];
w83l786ng_write_value(client, W83L786NG_REG_FAN_CFG, reg);
mutex_unlock(&data->update_lock);
@@ -776,9 +778,10 @@ static struct w83l786ng_data *w83l786ng_update_device(struct device *dev)
((pwmcfg >> W83L786NG_PWM_MODE_SHIFT[i]) & 1)
? 0 : 1;
data->pwm_enable[i] =
- ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 2) + 1;
- data->pwm[i] = w83l786ng_read_value(client,
- W83L786NG_REG_PWM[i]);
+ ((pwmcfg >> W83L786NG_PWM_ENABLE_SHIFT[i]) & 3) + 1;
+ data->pwm[i] =
+ (w83l786ng_read_value(client, W83L786NG_REG_PWM[i])
+ & 0x0f) * 0x11;
}
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1d7efa3..d0cfbb4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -312,7 +312,9 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx)
dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__);
- clk_prepare_enable(i2c_imx->clk);
+ result = clk_prepare_enable(i2c_imx->clk);
+ if (result)
+ return result;
imx_i2c_write_reg(i2c_imx->ifdr, i2c_imx, IMX_I2C_IFDR);
/* Enable I2C controller */
imx_i2c_write_reg(i2c_imx->hwdata->i2sr_clr_opcode, i2c_imx, IMX_I2C_I2SR);
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index 797e311..2d0847b 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -139,6 +139,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
priv->adap.algo = &priv->algo;
priv->adap.algo_data = priv;
priv->adap.dev.parent = &parent->dev;
+ priv->adap.retries = parent->retries;
+ priv->adap.timeout = parent->timeout;
/* Sanity check on class */
if (i2c_mux_parent_classes(parent) & class)
diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c
index 92d1206..f80b700 100644
--- a/drivers/idle/intel_idle.c
+++ b/drivers/idle/intel_idle.c
@@ -377,6 +377,9 @@ static int intel_idle(struct cpuidle_device *dev,
if (!current_set_polling_and_test()) {
+ if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
+ clflush((void *)&current_thread_info()->flags);
+
__monitor((void *)&current_thread_info()->flags, 0, 0);
smp_mb();
if (!need_resched())
diff --git a/drivers/iio/adc/ad7887.c b/drivers/iio/adc/ad7887.c
index acb7f90..749a6ca 100644
--- a/drivers/iio/adc/ad7887.c
+++ b/drivers/iio/adc/ad7887.c
@@ -200,7 +200,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 1,
.scan_index = 1,
- .scan_type = IIO_ST('u', 12, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
},
.channel[1] = {
.type = IIO_VOLTAGE,
@@ -210,7 +216,13 @@ static const struct ad7887_chip_info ad7887_chip_info_tbl[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = 0,
.scan_index = 0,
- .scan_type = IIO_ST('u', 12, 16, 0),
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 12,
+ .storagebits = 16,
+ .shift = 0,
+ .endianness = IIO_BE,
+ },
},
.channel[2] = IIO_CHAN_SOFT_TIMESTAMP(2),
.int_vref_mv = 2500,
diff --git a/drivers/iio/imu/adis16400_core.c b/drivers/iio/imu/adis16400_core.c
index 3fb7757..368660d 100644
--- a/drivers/iio/imu/adis16400_core.c
+++ b/drivers/iio/imu/adis16400_core.c
@@ -651,7 +651,12 @@ static const struct iio_chan_spec adis16448_channels[] = {
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
.address = ADIS16448_BARO_OUT,
.scan_index = ADIS16400_SCAN_BARO,
- .scan_type = IIO_ST('s', 16, 16, 0),
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_BE,
+ },
},
ADIS16400_TEMP_CHAN(ADIS16448_TEMP_OUT, 12),
IIO_CHAN_SOFT_TIMESTAMP(11)
diff --git a/drivers/iio/light/cm36651.c b/drivers/iio/light/cm36651.c
index 21df571..0922e39 100644
--- a/drivers/iio/light/cm36651.c
+++ b/drivers/iio/light/cm36651.c
@@ -387,7 +387,7 @@ static int cm36651_read_int_time(struct cm36651_data *cm36651,
return -EINVAL;
}
- return IIO_VAL_INT_PLUS_MICRO;
+ return IIO_VAL_INT;
}
static int cm36651_write_int_time(struct cm36651_data *cm36651,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index c47c203..0717940 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -181,9 +181,16 @@ static void add_ref(struct iw_cm_id *cm_id)
static void rem_ref(struct iw_cm_id *cm_id)
{
struct iwcm_id_private *cm_id_priv;
+ int cb_destroy;
+
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
- if (iwcm_deref_id(cm_id_priv) &&
- test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags)) {
+
+ /*
+ * Test bit before deref in case the cm_id gets freed on another
+ * thread.
+ */
+ cb_destroy = test_bit(IWCM_F_CALLBACK_DESTROY, &cm_id_priv->flags);
+ if (iwcm_deref_id(cm_id_priv) && cb_destroy) {
BUG_ON(!list_empty(&cm_id_priv->work_list));
free_cm_id(cm_id_priv);
}
diff --git a/drivers/infiniband/core/uverbs.h b/drivers/infiniband/core/uverbs.h
index bdc842e..a283274 100644
--- a/drivers/infiniband/core/uverbs.h
+++ b/drivers/infiniband/core/uverbs.h
@@ -49,12 +49,20 @@
#define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
do { \
- (udata)->inbuf = (void __user *) (ibuf); \
+ (udata)->inbuf = (const void __user *) (ibuf); \
(udata)->outbuf = (void __user *) (obuf); \
(udata)->inlen = (ilen); \
(udata)->outlen = (olen); \
} while (0)
+#define INIT_UDATA_BUF_OR_NULL(udata, ibuf, obuf, ilen, olen) \
+ do { \
+ (udata)->inbuf = (ilen) ? (const void __user *) (ibuf) : NULL; \
+ (udata)->outbuf = (olen) ? (void __user *) (obuf) : NULL; \
+ (udata)->inlen = (ilen); \
+ (udata)->outlen = (olen); \
+ } while (0)
+
/*
* Our lifetime rules for these structs are the following:
*
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 65f6e7d..f1cc838 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -2593,6 +2593,9 @@ out_put:
static int kern_spec_to_ib_spec(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
+ if (kern_spec->reserved)
+ return -EINVAL;
+
ib_spec->type = kern_spec->type;
switch (ib_spec->type) {
@@ -2646,6 +2649,9 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
void *ib_spec;
int i;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
if (ucore->outlen < sizeof(resp))
return -ENOSPC;
@@ -2671,6 +2677,10 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
(cmd.flow_attr.num_of_specs * sizeof(struct ib_uverbs_flow_spec)))
return -EINVAL;
+ if (cmd.flow_attr.reserved[0] ||
+ cmd.flow_attr.reserved[1])
+ return -EINVAL;
+
if (cmd.flow_attr.num_of_specs) {
kern_flow_attr = kmalloc(sizeof(*kern_flow_attr) + cmd.flow_attr.size,
GFP_KERNEL);
@@ -2731,6 +2741,7 @@ int ib_uverbs_ex_create_flow(struct ib_uverbs_file *file,
if (cmd.flow_attr.size || (i != flow_attr->num_of_specs)) {
pr_warn("create flow failed, flow %d: %d bytes left from uverb cmd\n",
i, cmd.flow_attr.size);
+ err = -EINVAL;
goto err_free;
}
flow_id = ib_create_flow(qp, flow_attr, IB_FLOW_DOMAIN_USER);
@@ -2791,10 +2802,16 @@ int ib_uverbs_ex_destroy_flow(struct ib_uverbs_file *file,
struct ib_uobject *uobj;
int ret;
+ if (ucore->inlen < sizeof(cmd))
+ return -EINVAL;
+
ret = ib_copy_from_udata(&cmd, ucore, sizeof(cmd));
if (ret)
return ret;
+ if (cmd.comp_mask)
+ return -EINVAL;
+
uobj = idr_write_uobj(&ib_uverbs_rule_idr, cmd.flow_handle,
file->ucontext);
if (!uobj)
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index 3438694..08219fb 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -668,25 +668,30 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
if ((hdr.in_words + ex_hdr.provider_in_words) * 8 != count)
return -EINVAL;
+ if (ex_hdr.cmd_hdr_reserved)
+ return -EINVAL;
+
if (ex_hdr.response) {
if (!hdr.out_words && !ex_hdr.provider_out_words)
return -EINVAL;
+
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *) (unsigned long) ex_hdr.response,
+ (hdr.out_words + ex_hdr.provider_out_words) * 8))
+ return -EFAULT;
} else {
if (hdr.out_words || ex_hdr.provider_out_words)
return -EINVAL;
}
- INIT_UDATA(&ucore,
- (hdr.in_words) ? buf : 0,
- (unsigned long)ex_hdr.response,
- hdr.in_words * 8,
- hdr.out_words * 8);
-
- INIT_UDATA(&uhw,
- (ex_hdr.provider_in_words) ? buf + ucore.inlen : 0,
- (ex_hdr.provider_out_words) ? (unsigned long)ex_hdr.response + ucore.outlen : 0,
- ex_hdr.provider_in_words * 8,
- ex_hdr.provider_out_words * 8);
+ INIT_UDATA_BUF_OR_NULL(&ucore, buf, (unsigned long) ex_hdr.response,
+ hdr.in_words * 8, hdr.out_words * 8);
+
+ INIT_UDATA_BUF_OR_NULL(&uhw,
+ buf + ucore.inlen,
+ (unsigned long) ex_hdr.response + ucore.outlen,
+ ex_hdr.provider_in_words * 8,
+ ex_hdr.provider_out_words * 8);
err = uverbs_ex_cmd_table[command](file,
&ucore,
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 12fef76..4512687 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -524,50 +524,6 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)
return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);
}
-#define VLAN_NONE 0xfff
-#define FILTER_SEL_VLAN_NONE 0xffff
-#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */
-#define FILTER_SEL_WIDTH_VIN_P_FC \
- (6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/
-#define FILTER_SEL_WIDTH_TAG_P_FC \
- (3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */
-#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC)
-
-static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst,
- struct l2t_entry *l2t)
-{
- unsigned int ntuple = 0;
- u32 viid;
-
- switch (dev->rdev.lldi.filt_mode) {
-
- /* default filter mode */
- case HW_TPL_FR_MT_PR_IV_P_FC:
- if (l2t->vlan == VLAN_NONE)
- ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC;
- else {
- ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC;
- ntuple |= 1 << FILTER_SEL_WIDTH_TAG_P_FC;
- }
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- case HW_TPL_FR_MT_PR_OV_P_FC: {
- viid = cxgb4_port_viid(l2t->neigh->dev);
-
- ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC;
- ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC;
- ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC;
- ntuple |= l2t->lport << S_PORT | IPPROTO_TCP <<
- FILTER_SEL_WIDTH_VLD_TAG_P_FC;
- break;
- }
- default:
- break;
- }
- return ntuple;
-}
-
static int send_connect(struct c4iw_ep *ep)
{
struct cpl_act_open_req *req;
@@ -641,8 +597,9 @@ static int send_connect(struct c4iw_ep *ep)
req->local_ip = la->sin_addr.s_addr;
req->peer_ip = ra->sin_addr.s_addr;
req->opt0 = cpu_to_be64(opt0);
- req->params = cpu_to_be32(select_ntuple(ep->com.dev,
- ep->dst, ep->l2t));
+ req->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req->opt2 = cpu_to_be32(opt2);
} else {
req6 = (struct cpl_act_open_req6 *)skb_put(skb, wrlen);
@@ -662,9 +619,9 @@ static int send_connect(struct c4iw_ep *ep)
req6->peer_ip_lo = *((__be64 *)
(ra6->sin6_addr.s6_addr + 8));
req6->opt0 = cpu_to_be64(opt0);
- req6->params = cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst,
- ep->l2t));
+ req6->params = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
req6->opt2 = cpu_to_be32(opt2);
}
} else {
@@ -681,8 +638,9 @@ static int send_connect(struct c4iw_ep *ep)
t5_req->peer_ip = ra->sin_addr.s_addr;
t5_req->opt0 = cpu_to_be64(opt0);
t5_req->params = cpu_to_be64(V_FILTER_TUPLE(
- select_ntuple(ep->com.dev,
- ep->dst, ep->l2t)));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t)));
t5_req->opt2 = cpu_to_be32(opt2);
} else {
t5_req6 = (struct cpl_t5_act_open_req6 *)
@@ -703,7 +661,9 @@ static int send_connect(struct c4iw_ep *ep)
(ra6->sin6_addr.s6_addr + 8));
t5_req6->opt0 = cpu_to_be64(opt0);
t5_req6->params = (__force __be64)cpu_to_be32(
- select_ntuple(ep->com.dev, ep->dst, ep->l2t));
+ cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
+ ep->l2t));
t5_req6->opt2 = cpu_to_be32(opt2);
}
}
@@ -1630,7 +1590,8 @@ static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid)
memset(req, 0, sizeof(*req));
req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR));
req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16)));
- req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst,
+ req->le.filter = cpu_to_be32(cxgb4_select_ntuple(
+ ep->com.dev->rdev.lldi.ports[0],
ep->l2t));
sin = (struct sockaddr_in *)&ep->com.local_addr;
req->le.lport = sin->sin_port;
@@ -2938,7 +2899,8 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)
/*
* Allocate a server TID.
*/
- if (dev->rdev.lldi.enable_fw_ofld_conn)
+ if (dev->rdev.lldi.enable_fw_ofld_conn &&
+ ep->com.local_addr.ss_family == AF_INET)
ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids,
cm_id->local_addr.ss_family, ep);
else
@@ -3323,9 +3285,7 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
/*
* Calculate the server tid from filter hit index from cpl_rx_pkt.
*/
- stid = (__force int) cpu_to_be32((__force u32) rss->hash_val)
- - dev->rdev.lldi.tids->sftid_base
- + dev->rdev.lldi.tids->nstids;
+ stid = (__force int) cpu_to_be32((__force u32) rss->hash_val);
lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid);
if (!lep) {
@@ -3397,7 +3357,9 @@ static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb)
window = (__force u16) htons((__force u16)tcph->window);
/* Calcuate filter portion for LE region. */
- filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e));
+ filter = (__force unsigned int) cpu_to_be32(cxgb4_select_ntuple(
+ dev->rdev.lldi.ports[0],
+ e));
/*
* Synthesize the cpl_pass_accept_req. We have everything except the
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 4cb8eb2..84e4500 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -173,7 +173,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
return ret;
}
-int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
+static int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data)
{
u32 remain = len;
u32 dmalen;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
index c29b5c8..cdc7df4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_netlink.c
@@ -31,6 +31,7 @@
*/
#include <linux/netdevice.h>
+#include <linux/if_arp.h> /* For ARPHRD_xxx */
#include <linux/module.h>
#include <net/rtnetlink.h>
#include "ipoib.h"
@@ -103,7 +104,7 @@ static int ipoib_new_child_link(struct net *src_net, struct net_device *dev,
return -EINVAL;
pdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
- if (!pdev)
+ if (!pdev || pdev->type != ARPHRD_INFINIBAND)
return -ENODEV;
ppriv = netdev_priv(pdev);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 6be57c3..9804fca 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -207,7 +207,9 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
isert_conn->conn_rx_descs = NULL;
}
+static void isert_cq_tx_work(struct work_struct *);
static void isert_cq_tx_callback(struct ib_cq *, void *);
+static void isert_cq_rx_work(struct work_struct *);
static void isert_cq_rx_callback(struct ib_cq *, void *);
static int
@@ -259,26 +261,36 @@ isert_create_device_ib_res(struct isert_device *device)
cq_desc[i].device = device;
cq_desc[i].cq_index = i;
+ INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_rx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_RX_CQ_LEN, i);
- if (IS_ERR(device->dev_rx_cq[i]))
+ if (IS_ERR(device->dev_rx_cq[i])) {
+ ret = PTR_ERR(device->dev_rx_cq[i]);
+ device->dev_rx_cq[i] = NULL;
goto out_cq;
+ }
+ INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
isert_cq_tx_callback,
isert_cq_event_callback,
(void *)&cq_desc[i],
ISER_MAX_TX_CQ_LEN, i);
- if (IS_ERR(device->dev_tx_cq[i]))
+ if (IS_ERR(device->dev_tx_cq[i])) {
+ ret = PTR_ERR(device->dev_tx_cq[i]);
+ device->dev_tx_cq[i] = NULL;
goto out_cq;
+ }
- if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
+ ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
+ if (ret)
goto out_cq;
- if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
+ ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
+ if (ret)
goto out_cq;
}
@@ -1724,7 +1736,6 @@ isert_cq_tx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
- INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
}
@@ -1768,7 +1779,6 @@ isert_cq_rx_callback(struct ib_cq *cq, void *context)
{
struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
- INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
}
diff --git a/drivers/input/input.c b/drivers/input/input.c
index 846ccdd..d2965e4 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -1871,6 +1871,10 @@ void input_set_capability(struct input_dev *dev, unsigned int type, unsigned int
break;
case EV_ABS:
+ input_alloc_absinfo(dev);
+ if (!dev->absinfo)
+ return;
+
__set_bit(code, dev->absbit);
break;
diff --git a/drivers/input/misc/adxl34x.c b/drivers/input/misc/adxl34x.c
index 0735de3..1cb1da2 100644
--- a/drivers/input/misc/adxl34x.c
+++ b/drivers/input/misc/adxl34x.c
@@ -158,7 +158,7 @@
/* ORIENT ADXL346 only */
#define ADXL346_2D_VALID (1 << 6)
-#define ADXL346_2D_ORIENT(x) (((x) & 0x3) >> 4)
+#define ADXL346_2D_ORIENT(x) (((x) & 0x30) >> 4)
#define ADXL346_3D_VALID (1 << 3)
#define ADXL346_3D_ORIENT(x) ((x) & 0x7)
#define ADXL346_2D_PORTRAIT_POS 0 /* +X */
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 98707fb..8f4c4ab 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -455,16 +455,26 @@ static DEVICE_ATTR_RO(type);
static DEVICE_ATTR_RO(proto);
static DEVICE_ATTR_RO(id);
static DEVICE_ATTR_RO(extra);
-static DEVICE_ATTR_RO(modalias);
-static DEVICE_ATTR_WO(drvctl);
-static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
-static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
static struct attribute *serio_device_id_attrs[] = {
&dev_attr_type.attr,
&dev_attr_proto.attr,
&dev_attr_id.attr,
&dev_attr_extra.attr,
+ NULL
+};
+
+static struct attribute_group serio_id_attr_group = {
+ .name = "id",
+ .attrs = serio_device_id_attrs,
+};
+
+static DEVICE_ATTR_RO(modalias);
+static DEVICE_ATTR_WO(drvctl);
+static DEVICE_ATTR(description, S_IRUGO, serio_show_description, NULL);
+static DEVICE_ATTR(bind_mode, S_IWUSR | S_IRUGO, serio_show_bind_mode, serio_set_bind_mode);
+
+static struct attribute *serio_device_attrs[] = {
&dev_attr_modalias.attr,
&dev_attr_description.attr,
&dev_attr_drvctl.attr,
@@ -472,13 +482,13 @@ static struct attribute *serio_device_id_attrs[] = {
NULL
};
-static struct attribute_group serio_id_attr_group = {
- .name = "id",
- .attrs = serio_device_id_attrs,
+static struct attribute_group serio_device_attr_group = {
+ .attrs = serio_device_attrs,
};
static const struct attribute_group *serio_device_attr_groups[] = {
&serio_id_attr_group,
+ &serio_device_attr_group,
NULL
};
diff --git a/drivers/input/touchscreen/zforce_ts.c b/drivers/input/touchscreen/zforce_ts.c
index 75762d6..aa127ba 100644
--- a/drivers/input/touchscreen/zforce_ts.c
+++ b/drivers/input/touchscreen/zforce_ts.c
@@ -455,7 +455,18 @@ static void zforce_complete(struct zforce_ts *ts, int cmd, int result)
}
}
-static irqreturn_t zforce_interrupt(int irq, void *dev_id)
+static irqreturn_t zforce_irq(int irq, void *dev_id)
+{
+ struct zforce_ts *ts = dev_id;
+ struct i2c_client *client = ts->client;
+
+ if (ts->suspended && device_may_wakeup(&client->dev))
+ pm_wakeup_event(&client->dev, 500);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static irqreturn_t zforce_irq_thread(int irq, void *dev_id)
{
struct zforce_ts *ts = dev_id;
struct i2c_client *client = ts->client;
@@ -465,12 +476,10 @@ static irqreturn_t zforce_interrupt(int irq, void *dev_id)
u8 *payload;
/*
- * When suspended, emit a wakeup signal if necessary and return.
+ * When still suspended, return.
* Due to the level-interrupt we will get re-triggered later.
*/
if (ts->suspended) {
- if (device_may_wakeup(&client->dev))
- pm_wakeup_event(&client->dev, 500);
msleep(20);
return IRQ_HANDLED;
}
@@ -763,8 +772,8 @@ static int zforce_probe(struct i2c_client *client,
* Therefore we can trigger the interrupt anytime it is low and do
* not need to limit it to the interrupt edge.
*/
- ret = devm_request_threaded_irq(&client->dev, client->irq, NULL,
- zforce_interrupt,
+ ret = devm_request_threaded_irq(&client->dev, client->irq,
+ zforce_irq, zforce_irq_thread,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
input_dev->name, ts);
if (ret) {
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index 1abfb56..e46a8870 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -392,7 +392,7 @@ struct arm_smmu_domain {
struct arm_smmu_cfg root_cfg;
phys_addr_t output_mask;
- spinlock_t lock;
+ struct mutex lock;
};
static DEFINE_SPINLOCK(arm_smmu_devices_lock);
@@ -900,7 +900,7 @@ static int arm_smmu_domain_init(struct iommu_domain *domain)
goto out_free_domain;
smmu_domain->root_cfg.pgd = pgd;
- spin_lock_init(&smmu_domain->lock);
+ mutex_init(&smmu_domain->lock);
domain->priv = smmu_domain;
return 0;
@@ -1137,7 +1137,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Sanity check the domain. We don't currently support domains
* that cross between different SMMU chains.
*/
- spin_lock(&smmu_domain->lock);
+ mutex_lock(&smmu_domain->lock);
if (!smmu_domain->leaf_smmu) {
/* Now that we have a master, we can finalise the domain */
ret = arm_smmu_init_domain_context(domain, dev);
@@ -1152,7 +1152,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
dev_name(device_smmu->dev));
goto err_unlock;
}
- spin_unlock(&smmu_domain->lock);
+ mutex_unlock(&smmu_domain->lock);
/* Looks ok, so add the device to the domain */
master = find_smmu_master(smmu_domain->leaf_smmu, dev->of_node);
@@ -1162,7 +1162,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return arm_smmu_domain_add_master(smmu_domain, master);
err_unlock:
- spin_unlock(&smmu_domain->lock);
+ mutex_unlock(&smmu_domain->lock);
return ret;
}
@@ -1394,7 +1394,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
if (paddr & ~output_mask)
return -ERANGE;
- spin_lock(&smmu_domain->lock);
+ mutex_lock(&smmu_domain->lock);
pgd += pgd_index(iova);
end = iova + size;
do {
@@ -1410,7 +1410,7 @@ static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain,
} while (pgd++, iova != end);
out_unlock:
- spin_unlock(&smmu_domain->lock);
+ mutex_unlock(&smmu_domain->lock);
/* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
@@ -1423,9 +1423,8 @@ static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int flags)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
- struct arm_smmu_device *smmu = smmu_domain->leaf_smmu;
- if (!smmu_domain || !smmu)
+ if (!smmu_domain)
return -ENODEV;
/* Check for silent address truncation up the SMMU chain. */
@@ -1449,44 +1448,34 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova)
{
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
+ pgd_t *pgdp, pgd;
+ pud_t pud;
+ pmd_t pmd;
+ pte_t pte;
struct arm_smmu_domain *smmu_domain = domain->priv;
struct arm_smmu_cfg *root_cfg = &smmu_domain->root_cfg;
- struct arm_smmu_device *smmu = root_cfg->smmu;
- spin_lock(&smmu_domain->lock);
- pgd = root_cfg->pgd;
- if (!pgd)
- goto err_unlock;
+ pgdp = root_cfg->pgd;
+ if (!pgdp)
+ return 0;
- pgd += pgd_index(iova);
- if (pgd_none_or_clear_bad(pgd))
- goto err_unlock;
+ pgd = *(pgdp + pgd_index(iova));
+ if (pgd_none(pgd))
+ return 0;
- pud = pud_offset(pgd, iova);
- if (pud_none_or_clear_bad(pud))
- goto err_unlock;
+ pud = *pud_offset(&pgd, iova);
+ if (pud_none(pud))
+ return 0;
- pmd = pmd_offset(pud, iova);
- if (pmd_none_or_clear_bad(pmd))
- goto err_unlock;
+ pmd = *pmd_offset(&pud, iova);
+ if (pmd_none(pmd))
+ return 0;
- pte = pmd_page_vaddr(*pmd) + pte_index(iova);
+ pte = *(pmd_page_vaddr(pmd) + pte_index(iova));
if (pte_none(pte))
- goto err_unlock;
-
- spin_unlock(&smmu_domain->lock);
- return __pfn_to_phys(pte_pfn(*pte)) | (iova & ~PAGE_MASK);
+ return 0;
-err_unlock:
- spin_unlock(&smmu_domain->lock);
- dev_warn(smmu->dev,
- "invalid (corrupt?) page tables detected for iova 0x%llx\n",
- (unsigned long long)iova);
- return -EINVAL;
+ return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK);
}
static int arm_smmu_domain_has_cap(struct iommu_domain *domain,
@@ -1863,6 +1852,7 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
dev_err(dev,
"found only %d context interrupt(s) but %d required\n",
smmu->num_context_irqs, smmu->num_context_banks);
+ err = -ENODEV;
goto out_put_parent;
}
diff --git a/drivers/irqchip/irq-renesas-intc-irqpin.c b/drivers/irqchip/irq-renesas-intc-irqpin.c
index 82cec63..3ee78f0 100644
--- a/drivers/irqchip/irq-renesas-intc-irqpin.c
+++ b/drivers/irqchip/irq-renesas-intc-irqpin.c
@@ -149,8 +149,9 @@ static void intc_irqpin_read_modify_write(struct intc_irqpin_priv *p,
static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
int irq, int do_mask)
{
- int bitfield_width = 4; /* PRIO assumed to have fixed bitfield width */
- int shift = (7 - irq) * bitfield_width; /* PRIO assumed to be 32-bit */
+ /* The PRIO register is assumed to be 32-bit with fixed 4-bit fields. */
+ int bitfield_width = 4;
+ int shift = 32 - (irq + 1) * bitfield_width;
intc_irqpin_read_modify_write(p, INTC_IRQPIN_REG_PRIO,
shift, bitfield_width,
@@ -159,8 +160,9 @@ static void intc_irqpin_mask_unmask_prio(struct intc_irqpin_priv *p,
static int intc_irqpin_set_sense(struct intc_irqpin_priv *p, int irq, int value)
{
+ /* The SENSE register is assumed to be 32-bit. */
int bitfield_width = p->config.sense_bitfield_width;
- int shift = (7 - irq) * bitfield_width; /* SENSE assumed to be 32-bit */
+ int shift = 32 - (irq + 1) * bitfield_width;
dev_dbg(&p->pdev->dev, "sense irq = %d, mode = %d\n", irq, value);
diff --git a/drivers/isdn/hisax/hfc_pci.c b/drivers/isdn/hisax/hfc_pci.c
index 497bd02..4a48255 100644
--- a/drivers/isdn/hisax/hfc_pci.c
+++ b/drivers/isdn/hisax/hfc_pci.c
@@ -1643,10 +1643,6 @@ setup_hfcpci(struct IsdnCard *card)
int i;
struct pci_dev *tmp_hfcpci = NULL;
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, hfcpci_revision);
printk(KERN_INFO "HiSax: HFC-PCI driver Rev. %s\n", HiSax_getrev(tmp));
diff --git a/drivers/isdn/hisax/telespci.c b/drivers/isdn/hisax/telespci.c
index f6ab63a..33eeb46 100644
--- a/drivers/isdn/hisax/telespci.c
+++ b/drivers/isdn/hisax/telespci.c
@@ -290,10 +290,6 @@ int setup_telespci(struct IsdnCard *card)
struct IsdnCardState *cs = card->cs;
char tmp[64];
-#ifdef __BIG_ENDIAN
-#error "not running on big endian machines now"
-#endif
-
strcpy(tmp, telespci_revision);
printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ != ISDN_CTYPE_TELESPCI)
diff --git a/drivers/md/bcache/alloc.c b/drivers/md/bcache/alloc.c
index 2b46bf1..4c9852d 100644
--- a/drivers/md/bcache/alloc.c
+++ b/drivers/md/bcache/alloc.c
@@ -421,9 +421,11 @@ out:
if (watermark <= WATERMARK_METADATA) {
SET_GC_MARK(b, GC_MARK_METADATA);
+ SET_GC_MOVE(b, 0);
b->prio = BTREE_PRIO;
} else {
SET_GC_MARK(b, GC_MARK_RECLAIMABLE);
+ SET_GC_MOVE(b, 0);
b->prio = INITIAL_PRIO;
}
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 4beb55a..754f4317 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -197,7 +197,7 @@ struct bucket {
uint8_t disk_gen;
uint8_t last_gc; /* Most out of date gen in the btree */
uint8_t gc_gen;
- uint16_t gc_mark;
+ uint16_t gc_mark; /* Bitfield used by GC. See below for field */
};
/*
@@ -209,7 +209,8 @@ BITMASK(GC_MARK, struct bucket, gc_mark, 0, 2);
#define GC_MARK_RECLAIMABLE 0
#define GC_MARK_DIRTY 1
#define GC_MARK_METADATA 2
-BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 14);
+BITMASK(GC_SECTORS_USED, struct bucket, gc_mark, 2, 13);
+BITMASK(GC_MOVE, struct bucket, gc_mark, 15, 1);
#include "journal.h"
#include "stats.h"
@@ -372,14 +373,14 @@ struct cached_dev {
unsigned char writeback_percent;
unsigned writeback_delay;
- int writeback_rate_change;
- int64_t writeback_rate_derivative;
uint64_t writeback_rate_target;
+ int64_t writeback_rate_proportional;
+ int64_t writeback_rate_derivative;
+ int64_t writeback_rate_change;
unsigned writeback_rate_update_seconds;
unsigned writeback_rate_d_term;
unsigned writeback_rate_p_term_inverse;
- unsigned writeback_rate_d_smooth;
};
enum alloc_watermarks {
@@ -445,7 +446,6 @@ struct cache {
* call prio_write() to keep gens from wrapping.
*/
uint8_t need_save_prio;
- unsigned gc_move_threshold;
/*
* If nonzero, we know we aren't going to find any buckets to invalidate
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765a..31bb53f 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -1561,6 +1561,28 @@ size_t bch_btree_gc_finish(struct cache_set *c)
SET_GC_MARK(PTR_BUCKET(c, &c->uuid_bucket, i),
GC_MARK_METADATA);
+ /* don't reclaim buckets to which writeback keys point */
+ rcu_read_lock();
+ for (i = 0; i < c->nr_uuids; i++) {
+ struct bcache_device *d = c->devices[i];
+ struct cached_dev *dc;
+ struct keybuf_key *w, *n;
+ unsigned j;
+
+ if (!d || UUID_FLASH_ONLY(&c->uuids[i]))
+ continue;
+ dc = container_of(d, struct cached_dev, disk);
+
+ spin_lock(&dc->writeback_keys.lock);
+ rbtree_postorder_for_each_entry_safe(w, n,
+ &dc->writeback_keys.keys, node)
+ for (j = 0; j < KEY_PTRS(&w->key); j++)
+ SET_GC_MARK(PTR_BUCKET(c, &w->key, j),
+ GC_MARK_DIRTY);
+ spin_unlock(&dc->writeback_keys.lock);
+ }
+ rcu_read_unlock();
+
for_each_cache(ca, c, i) {
uint64_t *i;
@@ -1817,7 +1839,8 @@ static bool fix_overlapping_extents(struct btree *b, struct bkey *insert,
if (KEY_START(k) > KEY_START(insert) + sectors_found)
goto check_failed;
- if (KEY_PTRS(replace_key) != KEY_PTRS(k))
+ if (KEY_PTRS(k) != KEY_PTRS(replace_key) ||
+ KEY_DIRTY(k) != KEY_DIRTY(replace_key))
goto check_failed;
/* skip past gen */
@@ -2217,7 +2240,7 @@ struct btree_insert_op {
struct bkey *replace_key;
};
-int btree_insert_fn(struct btree_op *b_op, struct btree *b)
+static int btree_insert_fn(struct btree_op *b_op, struct btree *b)
{
struct btree_insert_op *op = container_of(b_op,
struct btree_insert_op, op);
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e..f2f0998 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -25,10 +25,9 @@ static bool moving_pred(struct keybuf *buf, struct bkey *k)
unsigned i;
for (i = 0; i < KEY_PTRS(k); i++) {
- struct cache *ca = PTR_CACHE(c, k, i);
struct bucket *g = PTR_BUCKET(c, k, i);
- if (GC_SECTORS_USED(g) < ca->gc_move_threshold)
+ if (GC_MOVE(g))
return true;
}
@@ -65,11 +64,16 @@ static void write_moving_finish(struct closure *cl)
static void read_moving_endio(struct bio *bio, int error)
{
+ struct bbio *b = container_of(bio, struct bbio, bio);
struct moving_io *io = container_of(bio->bi_private,
struct moving_io, cl);
if (error)
io->op.error = error;
+ else if (!KEY_DIRTY(&b->key) &&
+ ptr_stale(io->op.c, &b->key, 0)) {
+ io->op.error = -EINTR;
+ }
bch_bbio_endio(io->op.c, bio, error, "reading data to move");
}
@@ -141,6 +145,11 @@ static void read_moving(struct cache_set *c)
if (!w)
break;
+ if (ptr_stale(c, &w->key, 0)) {
+ bch_keybuf_del(&c->moving_gc_keys, w);
+ continue;
+ }
+
io = kzalloc(sizeof(struct moving_io) + sizeof(struct bio_vec)
* DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS),
GFP_KERNEL);
@@ -184,7 +193,8 @@ static bool bucket_cmp(struct bucket *l, struct bucket *r)
static unsigned bucket_heap_top(struct cache *ca)
{
- return GC_SECTORS_USED(heap_peek(&ca->heap));
+ struct bucket *b;
+ return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
}
void bch_moving_gc(struct cache_set *c)
@@ -226,9 +236,8 @@ void bch_moving_gc(struct cache_set *c)
sectors_to_move -= GC_SECTORS_USED(b);
}
- ca->gc_move_threshold = bucket_heap_top(ca);
-
- pr_debug("threshold %u", ca->gc_move_threshold);
+ while (heap_pop(&ca->heap, b, bucket_cmp))
+ SET_GC_MOVE(b, 1);
}
mutex_unlock(&c->bucket_lock);
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index dec15cd..c57bfa0 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1676,7 +1676,7 @@ err:
static bool can_attach_cache(struct cache *ca, struct cache_set *c)
{
return ca->sb.block_size == c->sb.block_size &&
- ca->sb.bucket_size == c->sb.block_size &&
+ ca->sb.bucket_size == c->sb.bucket_size &&
ca->sb.nr_in_set == c->sb.nr_in_set;
}
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 80d4c2b..a1f8561 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -83,7 +83,6 @@ rw_attribute(writeback_rate);
rw_attribute(writeback_rate_update_seconds);
rw_attribute(writeback_rate_d_term);
rw_attribute(writeback_rate_p_term_inverse);
-rw_attribute(writeback_rate_d_smooth);
read_attribute(writeback_rate_debug);
read_attribute(stripe_size);
@@ -129,31 +128,41 @@ SHOW(__bch_cached_dev)
var_printf(writeback_running, "%i");
var_print(writeback_delay);
var_print(writeback_percent);
- sysfs_print(writeback_rate, dc->writeback_rate.rate);
+ sysfs_hprint(writeback_rate, dc->writeback_rate.rate << 9);
var_print(writeback_rate_update_seconds);
var_print(writeback_rate_d_term);
var_print(writeback_rate_p_term_inverse);
- var_print(writeback_rate_d_smooth);
if (attr == &sysfs_writeback_rate_debug) {
+ char rate[20];
char dirty[20];
- char derivative[20];
char target[20];
- bch_hprint(dirty,
- bcache_dev_sectors_dirty(&dc->disk) << 9);
- bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ char proportional[20];
+ char derivative[20];
+ char change[20];
+ s64 next_io;
+
+ bch_hprint(rate, dc->writeback_rate.rate << 9);
+ bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
bch_hprint(target, dc->writeback_rate_target << 9);
+ bch_hprint(proportional,dc->writeback_rate_proportional << 9);
+ bch_hprint(derivative, dc->writeback_rate_derivative << 9);
+ bch_hprint(change, dc->writeback_rate_change << 9);
+
+ next_io = div64_s64(dc->writeback_rate.next - local_clock(),
+ NSEC_PER_MSEC);
return sprintf(buf,
- "rate:\t\t%u\n"
- "change:\t\t%i\n"
+ "rate:\t\t%s/sec\n"
"dirty:\t\t%s\n"
+ "target:\t\t%s\n"
+ "proportional:\t%s\n"
"derivative:\t%s\n"
- "target:\t\t%s\n",
- dc->writeback_rate.rate,
- dc->writeback_rate_change,
- dirty, derivative, target);
+ "change:\t\t%s/sec\n"
+ "next io:\t%llims\n",
+ rate, dirty, target, proportional,
+ derivative, change, next_io);
}
sysfs_hprint(dirty_data,
@@ -189,6 +198,7 @@ STORE(__cached_dev)
struct kobj_uevent_env *env;
#define d_strtoul(var) sysfs_strtoul(var, dc->var)
+#define d_strtoul_nonzero(var) sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
#define d_strtoi_h(var) sysfs_hatoi(var, dc->var)
sysfs_strtoul(data_csum, dc->disk.data_csum);
@@ -197,16 +207,15 @@ STORE(__cached_dev)
d_strtoul(writeback_metadata);
d_strtoul(writeback_running);
d_strtoul(writeback_delay);
- sysfs_strtoul_clamp(writeback_rate,
- dc->writeback_rate.rate, 1, 1000000);
+
sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent, 0, 40);
- d_strtoul(writeback_rate_update_seconds);
+ sysfs_strtoul_clamp(writeback_rate,
+ dc->writeback_rate.rate, 1, INT_MAX);
+
+ d_strtoul_nonzero(writeback_rate_update_seconds);
d_strtoul(writeback_rate_d_term);
- d_strtoul(writeback_rate_p_term_inverse);
- sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
- dc->writeback_rate_p_term_inverse, 1, INT_MAX);
- d_strtoul(writeback_rate_d_smooth);
+ d_strtoul_nonzero(writeback_rate_p_term_inverse);
d_strtoi_h(sequential_cutoff);
d_strtoi_h(readahead);
@@ -313,7 +322,6 @@ static struct attribute *bch_cached_dev_files[] = {
&sysfs_writeback_rate_update_seconds,
&sysfs_writeback_rate_d_term,
&sysfs_writeback_rate_p_term_inverse,
- &sysfs_writeback_rate_d_smooth,
&sysfs_writeback_rate_debug,
&sysfs_dirty_data,
&sysfs_stripe_size,
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214e..bb37618 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -209,7 +209,13 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
{
uint64_t now = local_clock();
- d->next += div_u64(done, d->rate);
+ d->next += div_u64(done * NSEC_PER_SEC, d->rate);
+
+ if (time_before64(now + NSEC_PER_SEC, d->next))
+ d->next = now + NSEC_PER_SEC;
+
+ if (time_after64(now - NSEC_PER_SEC * 2, d->next))
+ d->next = now - NSEC_PER_SEC * 2;
return time_after64(d->next, now)
? div_u64(d->next - now, NSEC_PER_SEC / HZ)
diff --git a/drivers/md/bcache/util.h b/drivers/md/bcache/util.h
index 362c4b3..1030c60 100644
--- a/drivers/md/bcache/util.h
+++ b/drivers/md/bcache/util.h
@@ -110,7 +110,7 @@ do { \
_r; \
})
-#define heap_peek(h) ((h)->size ? (h)->data[0] : NULL)
+#define heap_peek(h) ((h)->used ? (h)->data[0] : NULL)
#define heap_full(h) ((h)->used == (h)->size)
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1..6c44fe0 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -30,38 +30,40 @@ static void __update_writeback_rate(struct cached_dev *dc)
/* PD controller */
- int change = 0;
- int64_t error;
int64_t dirty = bcache_dev_sectors_dirty(&dc->disk);
int64_t derivative = dirty - dc->disk.sectors_dirty_last;
+ int64_t proportional = dirty - target;
+ int64_t change;
dc->disk.sectors_dirty_last = dirty;
- derivative *= dc->writeback_rate_d_term;
- derivative = clamp(derivative, -dirty, dirty);
+ /* Scale to sectors per second */
- derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
- dc->writeback_rate_d_smooth, 0);
+ proportional *= dc->writeback_rate_update_seconds;
+ proportional = div_s64(proportional, dc->writeback_rate_p_term_inverse);
- /* Avoid divide by zero */
- if (!target)
- goto out;
+ derivative = div_s64(derivative, dc->writeback_rate_update_seconds);
- error = div64_s64((dirty + derivative - target) << 8, target);
+ derivative = ewma_add(dc->disk.sectors_dirty_derivative, derivative,
+ (dc->writeback_rate_d_term /
+ dc->writeback_rate_update_seconds) ?: 1, 0);
+
+ derivative *= dc->writeback_rate_d_term;
+ derivative = div_s64(derivative, dc->writeback_rate_p_term_inverse);
- change = div_s64((dc->writeback_rate.rate * error) >> 8,
- dc->writeback_rate_p_term_inverse);
+ change = proportional + derivative;
/* Don't increase writeback rate if the device isn't keeping up */
if (change > 0 &&
time_after64(local_clock(),
- dc->writeback_rate.next + 10 * NSEC_PER_MSEC))
+ dc->writeback_rate.next + NSEC_PER_MSEC))
change = 0;
dc->writeback_rate.rate =
- clamp_t(int64_t, dc->writeback_rate.rate + change,
+ clamp_t(int64_t, (int64_t) dc->writeback_rate.rate + change,
1, NSEC_PER_MSEC);
-out:
+
+ dc->writeback_rate_proportional = proportional;
dc->writeback_rate_derivative = derivative;
dc->writeback_rate_change = change;
dc->writeback_rate_target = target;
@@ -87,15 +89,11 @@ static void update_writeback_rate(struct work_struct *work)
static unsigned writeback_delay(struct cached_dev *dc, unsigned sectors)
{
- uint64_t ret;
-
if (test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags) ||
!dc->writeback_percent)
return 0;
- ret = bch_next_delay(&dc->writeback_rate, sectors * 10000000ULL);
-
- return min_t(uint64_t, ret, HZ);
+ return bch_next_delay(&dc->writeback_rate, sectors);
}
struct dirty_io {
@@ -241,7 +239,7 @@ static void read_dirty(struct cached_dev *dc)
if (KEY_START(&w->key) != dc->last_read ||
jiffies_to_msecs(delay) > 50)
while (!kthread_should_stop() && delay)
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
dc->last_read = KEY_OFFSET(&w->key);
@@ -438,7 +436,7 @@ static int bch_writeback_thread(void *arg)
while (delay &&
!kthread_should_stop() &&
!test_bit(BCACHE_DEV_DETACHING, &dc->disk.flags))
- delay = schedule_timeout_interruptible(delay);
+ delay = schedule_timeout_uninterruptible(delay);
}
}
@@ -476,6 +474,8 @@ void bch_sectors_dirty_init(struct cached_dev *dc)
bch_btree_map_keys(&op.op, dc->disk.c, &KEY(op.inode, 0, 0),
sectors_dirty_init_fn, 0);
+
+ dc->disk.sectors_dirty_last = bcache_dev_sectors_dirty(&dc->disk);
}
int bch_cached_dev_writeback_init(struct cached_dev *dc)
@@ -490,18 +490,15 @@ int bch_cached_dev_writeback_init(struct cached_dev *dc)
dc->writeback_delay = 30;
dc->writeback_rate.rate = 1024;
- dc->writeback_rate_update_seconds = 30;
- dc->writeback_rate_d_term = 16;
- dc->writeback_rate_p_term_inverse = 64;
- dc->writeback_rate_d_smooth = 8;
+ dc->writeback_rate_update_seconds = 5;
+ dc->writeback_rate_d_term = 30;
+ dc->writeback_rate_p_term_inverse = 6000;
dc->writeback_thread = kthread_create(bch_writeback_thread, dc,
"bcache_writeback");
if (IS_ERR(dc->writeback_thread))
return PTR_ERR(dc->writeback_thread);
- set_task_state(dc->writeback_thread, TASK_INTERRUPTIBLE);
-
INIT_DELAYED_WORK(&dc->writeback_rate_update, update_writeback_rate);
schedule_delayed_work(&dc->writeback_rate_update,
dc->writeback_rate_update_seconds * HZ);
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb2..54bdd923 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1717,6 +1717,11 @@ static int __init dm_bufio_init(void)
{
__u64 mem;
+ dm_bufio_allocated_kmem_cache = 0;
+ dm_bufio_allocated_get_free_pages = 0;
+ dm_bufio_allocated_vmalloc = 0;
+ dm_bufio_current_allocated = 0;
+
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 416b7b7..64780ad 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -730,15 +730,18 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
int r = 0;
bool updated = updated_this_tick(mq, e);
- requeue_and_update_tick(mq, e);
-
if ((!discarded_oblock && updated) ||
- !should_promote(mq, e, discarded_oblock, data_dir))
+ !should_promote(mq, e, discarded_oblock, data_dir)) {
+ requeue_and_update_tick(mq, e);
result->op = POLICY_MISS;
- else if (!can_migrate)
+
+ } else if (!can_migrate)
r = -EWOULDBLOCK;
- else
+
+ else {
+ requeue_and_update_tick(mq, e);
r = pre_cache_to_cache(mq, e, result);
+ }
return r;
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9efcf10..1b1469e 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2755,7 +2755,7 @@ static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
{
int r;
- r = dm_cache_resize(cache->cmd, cache->cache_size);
+ r = dm_cache_resize(cache->cmd, new_size);
if (r) {
DMERR("could not resize cache metadata");
return r;
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3..2f91d6d 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -20,6 +20,7 @@
struct delay_c {
struct timer_list delay_timer;
struct mutex timer_lock;
+ struct workqueue_struct *kdelayd_wq;
struct work_struct flush_expired_bios;
struct list_head delayed_bios;
atomic_t may_delay;
@@ -45,14 +46,13 @@ struct dm_delay_info {
static DEFINE_MUTEX(delayed_bios_lock);
-static struct workqueue_struct *kdelayd_wq;
static struct kmem_cache *delayed_cache;
static void handle_delayed_timer(unsigned long data)
{
struct delay_c *dc = (struct delay_c *)data;
- queue_work(kdelayd_wq, &dc->flush_expired_bios);
+ queue_work(dc->kdelayd_wq, &dc->flush_expired_bios);
}
static void queue_timeout(struct delay_c *dc, unsigned long expires)
@@ -191,6 +191,12 @@ out:
goto bad_dev_write;
}
+ dc->kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
+ if (!dc->kdelayd_wq) {
+ DMERR("Couldn't start kdelayd");
+ goto bad_queue;
+ }
+
setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc);
INIT_WORK(&dc->flush_expired_bios, flush_expired_bios);
@@ -203,6 +209,8 @@ out:
ti->private = dc;
return 0;
+bad_queue:
+ mempool_destroy(dc->delayed_pool);
bad_dev_write:
if (dc->dev_write)
dm_put_device(ti, dc->dev_write);
@@ -217,7 +225,7 @@ static void delay_dtr(struct dm_target *ti)
{
struct delay_c *dc = ti->private;
- flush_workqueue(kdelayd_wq);
+ destroy_workqueue(dc->kdelayd_wq);
dm_put_device(ti, dc->dev_read);
@@ -350,12 +358,6 @@ static int __init dm_delay_init(void)
{
int r = -ENOMEM;
- kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0);
- if (!kdelayd_wq) {
- DMERR("Couldn't start kdelayd");
- goto bad_queue;
- }
-
delayed_cache = KMEM_CACHE(dm_delay_info, 0);
if (!delayed_cache) {
DMERR("Couldn't create delayed bio cache.");
@@ -373,8 +375,6 @@ static int __init dm_delay_init(void)
bad_register:
kmem_cache_destroy(delayed_cache);
bad_memcache:
- destroy_workqueue(kdelayd_wq);
-bad_queue:
return r;
}
@@ -382,7 +382,6 @@ static void __exit dm_delay_exit(void)
{
dm_unregister_target(&delay_target);
kmem_cache_destroy(delayed_cache);
- destroy_workqueue(kdelayd_wq);
}
/* Module hooks */
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d7..944690b 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -66,6 +66,18 @@ struct dm_snapshot {
atomic_t pending_exceptions_count;
+ /* Protected by "lock" */
+ sector_t exception_start_sequence;
+
+ /* Protected by kcopyd single-threaded callback */
+ sector_t exception_complete_sequence;
+
+ /*
+ * A list of pending exceptions that completed out of order.
+ * Protected by kcopyd single-threaded callback.
+ */
+ struct list_head out_of_order_list;
+
mempool_t *pending_pool;
struct dm_exception_table pending;
@@ -173,6 +185,14 @@ struct dm_snap_pending_exception {
*/
int started;
+ /* There was copying error. */
+ int copy_error;
+
+ /* A sequence number, it is used for in-order completion. */
+ sector_t exception_sequence;
+
+ struct list_head out_of_order_entry;
+
/*
* For writing a complete chunk, bypassing the copy.
*/
@@ -1094,6 +1114,9 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv)
s->valid = 1;
s->active = 0;
atomic_set(&s->pending_exceptions_count, 0);
+ s->exception_start_sequence = 0;
+ s->exception_complete_sequence = 0;
+ INIT_LIST_HEAD(&s->out_of_order_list);
init_rwsem(&s->lock);
INIT_LIST_HEAD(&s->list);
spin_lock_init(&s->pe_lock);
@@ -1443,6 +1466,19 @@ static void commit_callback(void *context, int success)
pending_complete(pe, success);
}
+static void complete_exception(struct dm_snap_pending_exception *pe)
+{
+ struct dm_snapshot *s = pe->snap;
+
+ if (unlikely(pe->copy_error))
+ pending_complete(pe, 0);
+
+ else
+ /* Update the metadata if we are persistent */
+ s->store->type->commit_exception(s->store, &pe->e,
+ commit_callback, pe);
+}
+
/*
* Called when the copy I/O has finished. kcopyd actually runs
* this code so don't block.
@@ -1452,13 +1488,32 @@ static void copy_callback(int read_err, unsigned long write_err, void *context)
struct dm_snap_pending_exception *pe = context;
struct dm_snapshot *s = pe->snap;
- if (read_err || write_err)
- pending_complete(pe, 0);
+ pe->copy_error = read_err || write_err;
- else
- /* Update the metadata if we are persistent */
- s->store->type->commit_exception(s->store, &pe->e,
- commit_callback, pe);
+ if (pe->exception_sequence == s->exception_complete_sequence) {
+ s->exception_complete_sequence++;
+ complete_exception(pe);
+
+ while (!list_empty(&s->out_of_order_list)) {
+ pe = list_entry(s->out_of_order_list.next,
+ struct dm_snap_pending_exception, out_of_order_entry);
+ if (pe->exception_sequence != s->exception_complete_sequence)
+ break;
+ s->exception_complete_sequence++;
+ list_del(&pe->out_of_order_entry);
+ complete_exception(pe);
+ }
+ } else {
+ struct list_head *lh;
+ struct dm_snap_pending_exception *pe2;
+
+ list_for_each_prev(lh, &s->out_of_order_list) {
+ pe2 = list_entry(lh, struct dm_snap_pending_exception, out_of_order_entry);
+ if (pe2->exception_sequence < pe->exception_sequence)
+ break;
+ }
+ list_add(&pe->out_of_order_entry, lh);
+ }
}
/*
@@ -1553,6 +1608,8 @@ __find_pending_exception(struct dm_snapshot *s,
return NULL;
}
+ pe->exception_sequence = s->exception_start_sequence++;
+
dm_insert_exception(&s->pending, &pe->e);
return pe;
@@ -2192,7 +2249,7 @@ static struct target_type origin_target = {
static struct target_type snapshot_target = {
.name = "snapshot",
- .version = {1, 11, 1},
+ .version = {1, 12, 0},
.module = THIS_MODULE,
.ctr = snapshot_ctr,
.dtr = snapshot_dtr,
diff --git a/drivers/md/dm-stats.c b/drivers/md/dm-stats.c
index 3d404c1..28a9012 100644
--- a/drivers/md/dm-stats.c
+++ b/drivers/md/dm-stats.c
@@ -964,6 +964,7 @@ int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
int __init dm_statistics_init(void)
{
+ shared_memory_amount = 0;
dm_stat_need_rcu_barrier = 0;
return 0;
}
diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
index 465f08c..3ba6a38 100644
--- a/drivers/md/dm-table.c
+++ b/drivers/md/dm-table.c
@@ -200,6 +200,11 @@ int dm_table_create(struct dm_table **result, fmode_t mode,
num_targets = dm_round_up(num_targets, KEYS_PER_NODE);
+ if (!num_targets) {
+ kfree(t);
+ return -ENOMEM;
+ }
+
if (alloc_targets(t, num_targets)) {
kfree(t);
return -ENOMEM;
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 60bce43..8a30ad5 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1697,6 +1697,14 @@ void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd)
up_write(&pmd->root_lock);
}
+void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd)
+{
+ down_write(&pmd->root_lock);
+ pmd->read_only = false;
+ dm_bm_set_read_write(pmd->bm);
+ up_write(&pmd->root_lock);
+}
+
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
dm_sm_threshold_fn fn,
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index 845ebbe..7bcc0e1 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -193,6 +193,7 @@ int dm_pool_resize_metadata_dev(struct dm_pool_metadata *pmd, dm_block_t new_siz
* that nothing is changing.
*/
void dm_pool_metadata_read_only(struct dm_pool_metadata *pmd);
+void dm_pool_metadata_read_write(struct dm_pool_metadata *pmd);
int dm_pool_register_metadata_threshold(struct dm_pool_metadata *pmd,
dm_block_t threshold,
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf51..ee29037 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -640,7 +640,9 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
*/
r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
if (r) {
- DMERR_LIMIT("dm_thin_insert_block() failed");
+ DMERR_LIMIT("%s: dm_thin_insert_block() failed: error = %d",
+ dm_device_name(pool->pool_md), r);
+ set_pool_mode(pool, PM_READ_ONLY);
cell_error(pool, m->cell);
goto out;
}
@@ -881,32 +883,23 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
}
}
-static int commit(struct pool *pool)
-{
- int r;
-
- r = dm_pool_commit_metadata(pool->pmd);
- if (r)
- DMERR_LIMIT("%s: commit failed: error = %d",
- dm_device_name(pool->pool_md), r);
-
- return r;
-}
-
/*
* A non-zero return indicates read_only or fail_io mode.
* Many callers don't care about the return value.
*/
-static int commit_or_fallback(struct pool *pool)
+static int commit(struct pool *pool)
{
int r;
if (get_pool_mode(pool) != PM_WRITE)
return -EINVAL;
- r = commit(pool);
- if (r)
+ r = dm_pool_commit_metadata(pool->pmd);
+ if (r) {
+ DMERR_LIMIT("%s: dm_pool_commit_metadata failed: error = %d",
+ dm_device_name(pool->pool_md), r);
set_pool_mode(pool, PM_READ_ONLY);
+ }
return r;
}
@@ -943,7 +936,9 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* Try to commit to see if that will free up some
* more space.
*/
- (void) commit_or_fallback(pool);
+ r = commit(pool);
+ if (r)
+ return r;
r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
if (r)
@@ -957,7 +952,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
* table reload).
*/
if (!free_blocks) {
- DMWARN("%s: no free space available.",
+ DMWARN("%s: no free data space available.",
dm_device_name(pool->pool_md));
spin_lock_irqsave(&pool->lock, flags);
pool->no_free_space = 1;
@@ -967,8 +962,16 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
}
r = dm_pool_alloc_data_block(pool->pmd, result);
- if (r)
+ if (r) {
+ if (r == -ENOSPC &&
+ !dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks) &&
+ !free_blocks) {
+ DMWARN("%s: no free metadata space available.",
+ dm_device_name(pool->pool_md));
+ set_pool_mode(pool, PM_READ_ONLY);
+ }
return r;
+ }
return 0;
}
@@ -1349,7 +1352,7 @@ static void process_deferred_bios(struct pool *pool)
if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
return;
- if (commit_or_fallback(pool)) {
+ if (commit(pool)) {
while ((bio = bio_list_pop(&bios)))
bio_io_error(bio);
return;
@@ -1397,6 +1400,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
case PM_FAIL:
DMERR("%s: switching pool to failure mode",
dm_device_name(pool->pool_md));
+ dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_fail;
pool->process_discard = process_bio_fail;
pool->process_prepared_mapping = process_prepared_mapping_fail;
@@ -1421,6 +1425,7 @@ static void set_pool_mode(struct pool *pool, enum pool_mode mode)
break;
case PM_WRITE:
+ dm_pool_metadata_read_write(pool->pmd);
pool->process_bio = process_bio;
pool->process_discard = process_discard;
pool->process_prepared_mapping = process_prepared_mapping;
@@ -1637,12 +1642,19 @@ static int bind_control_target(struct pool *pool, struct dm_target *ti)
struct pool_c *pt = ti->private;
/*
- * We want to make sure that degraded pools are never upgraded.
+ * We want to make sure that a pool in PM_FAIL mode is never upgraded.
*/
enum pool_mode old_mode = pool->pf.mode;
enum pool_mode new_mode = pt->adjusted_pf.mode;
- if (old_mode > new_mode)
+ /*
+ * If we were in PM_FAIL mode, rollback of metadata failed. We're
+ * not going to recover without a thin_repair. So we never let the
+ * pool move out of the old mode. On the other hand a PM_READ_ONLY
+ * may have been due to a lack of metadata or data space, and may
+ * now work (ie. if the underlying devices have been resized).
+ */
+ if (old_mode == PM_FAIL)
new_mode = old_mode;
pool->ti = ti;
@@ -2266,7 +2278,7 @@ static int pool_preresume(struct dm_target *ti)
return r;
if (need_commit1 || need_commit2)
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
return 0;
}
@@ -2293,7 +2305,7 @@ static void pool_postsuspend(struct dm_target *ti)
cancel_delayed_work(&pool->waker);
flush_workqueue(pool->wq);
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
}
static int check_arg_count(unsigned argc, unsigned args_required)
@@ -2427,7 +2439,7 @@ static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct
if (r)
return r;
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
r = dm_pool_reserve_metadata_snap(pool->pmd);
if (r)
@@ -2489,7 +2501,7 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
if (!r)
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
return r;
}
@@ -2544,7 +2556,7 @@ static void pool_status(struct dm_target *ti, status_type_t type,
/* Commit to ensure statistics aren't out-of-date */
if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
- (void) commit_or_fallback(pool);
+ (void) commit(pool);
r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
if (r) {
diff --git a/drivers/md/persistent-data/dm-array.c b/drivers/md/persistent-data/dm-array.c
index af96e24..1d75b1d 100644
--- a/drivers/md/persistent-data/dm-array.c
+++ b/drivers/md/persistent-data/dm-array.c
@@ -317,8 +317,16 @@ static int shadow_ablock(struct dm_array_info *info, dm_block_t *root,
* The shadow op will often be a noop. Only insert if it really
* copied data.
*/
- if (dm_block_location(*block) != b)
+ if (dm_block_location(*block) != b) {
+ /*
+ * dm_tm_shadow_block will have already decremented the old
+ * block, but it is still referenced by the btree. We
+ * increment to stop the insert decrementing it below zero
+ * when overwriting the old value.
+ */
+ dm_tm_inc(info->btree_info.tm, b);
r = insert_ablock(info, index, *block, root);
+ }
return r;
}
diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
index a7e8bf2..064a3c2 100644
--- a/drivers/md/persistent-data/dm-block-manager.c
+++ b/drivers/md/persistent-data/dm-block-manager.c
@@ -626,6 +626,12 @@ void dm_bm_set_read_only(struct dm_block_manager *bm)
}
EXPORT_SYMBOL_GPL(dm_bm_set_read_only);
+void dm_bm_set_read_write(struct dm_block_manager *bm)
+{
+ bm->read_only = false;
+}
+EXPORT_SYMBOL_GPL(dm_bm_set_read_write);
+
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor)
{
return crc32c(~(u32) 0, data, len) ^ init_xor;
diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
index 9a82083..13cd58e1 100644
--- a/drivers/md/persistent-data/dm-block-manager.h
+++ b/drivers/md/persistent-data/dm-block-manager.h
@@ -108,9 +108,9 @@ int dm_bm_unlock(struct dm_block *b);
int dm_bm_flush_and_unlock(struct dm_block_manager *bm,
struct dm_block *superblock);
- /*
- * Request data be prefetched into the cache.
- */
+/*
+ * Request data is prefetched into the cache.
+ */
void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
/*
@@ -125,6 +125,7 @@ void dm_bm_prefetch(struct dm_block_manager *bm, dm_block_t b);
* be returned if you do.
*/
void dm_bm_set_read_only(struct dm_block_manager *bm);
+void dm_bm_set_read_write(struct dm_block_manager *bm);
u32 dm_bm_checksum(const void *data, size_t len, u32 init_xor);
diff --git a/drivers/md/persistent-data/dm-space-map-common.c b/drivers/md/persistent-data/dm-space-map-common.c
index 6058569..466a60b 100644
--- a/drivers/md/persistent-data/dm-space-map-common.c
+++ b/drivers/md/persistent-data/dm-space-map-common.c
@@ -381,7 +381,7 @@ int sm_ll_find_free_block(struct ll_disk *ll, dm_block_t begin,
}
static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
- uint32_t (*mutator)(void *context, uint32_t old),
+ int (*mutator)(void *context, uint32_t old, uint32_t *new),
void *context, enum allocation_event *ev)
{
int r;
@@ -410,11 +410,17 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
if (old > 2) {
r = sm_ll_lookup_big_ref_count(ll, b, &old);
- if (r < 0)
+ if (r < 0) {
+ dm_tm_unlock(ll->tm, nb);
return r;
+ }
}
- ref_count = mutator(context, old);
+ r = mutator(context, old, &ref_count);
+ if (r) {
+ dm_tm_unlock(ll->tm, nb);
+ return r;
+ }
if (ref_count <= 2) {
sm_set_bitmap(bm_le, bit, ref_count);
@@ -465,9 +471,10 @@ static int sm_ll_mutate(struct ll_disk *ll, dm_block_t b,
return ll->save_ie(ll, index, &ie_disk);
}
-static uint32_t set_ref_count(void *context, uint32_t old)
+static int set_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return *((uint32_t *) context);
+ *new = *((uint32_t *) context);
+ return 0;
}
int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
@@ -476,9 +483,10 @@ int sm_ll_insert(struct ll_disk *ll, dm_block_t b,
return sm_ll_mutate(ll, b, set_ref_count, &ref_count, ev);
}
-static uint32_t inc_ref_count(void *context, uint32_t old)
+static int inc_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return old + 1;
+ *new = old + 1;
+ return 0;
}
int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
@@ -486,9 +494,15 @@ int sm_ll_inc(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
return sm_ll_mutate(ll, b, inc_ref_count, NULL, ev);
}
-static uint32_t dec_ref_count(void *context, uint32_t old)
+static int dec_ref_count(void *context, uint32_t old, uint32_t *new)
{
- return old - 1;
+ if (!old) {
+ DMERR_LIMIT("unable to decrement a reference count below 0");
+ return -EINVAL;
+ }
+
+ *new = old - 1;
+ return 0;
}
int sm_ll_dec(struct ll_disk *ll, dm_block_t b, enum allocation_event *ev)
diff --git a/drivers/md/persistent-data/dm-space-map-metadata.c b/drivers/md/persistent-data/dm-space-map-metadata.c
index 1c95968..58fc1ee 100644
--- a/drivers/md/persistent-data/dm-space-map-metadata.c
+++ b/drivers/md/persistent-data/dm-space-map-metadata.c
@@ -384,12 +384,16 @@ static int sm_metadata_new_block(struct dm_space_map *sm, dm_block_t *b)
struct sm_metadata *smm = container_of(sm, struct sm_metadata, sm);
int r = sm_metadata_new_block_(sm, b);
- if (r)
+ if (r) {
DMERR("unable to allocate new metadata block");
+ return r;
+ }
r = sm_metadata_get_nr_free(sm, &count);
- if (r)
+ if (r) {
DMERR("couldn't get free block count");
+ return r;
+ }
check_threshold(&smm->threshold, count);
diff --git a/drivers/media/common/siano/smscoreapi.h b/drivers/media/common/siano/smscoreapi.h
index d0799e3..9c9063c 100644
--- a/drivers/media/common/siano/smscoreapi.h
+++ b/drivers/media/common/siano/smscoreapi.h
@@ -955,7 +955,7 @@ struct sms_rx_stats {
u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
s32 SNR; /* dB */
u32 ber; /* Post Viterbi ber [1E-5] */
- u32 ber_error_count; /* Number of erronous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
u32 ber_bit_count; /* Total number of SYNC bits. */
u32 ts_per; /* Transport stream PER,
0xFFFFFFFF indicate N/A */
@@ -981,7 +981,7 @@ struct sms_rx_stats_ex {
u32 modem_state; /* from SMSHOSTLIB_DVB_MODEM_STATE_ET */
s32 SNR; /* dB */
u32 ber; /* Post Viterbi ber [1E-5] */
- u32 ber_error_count; /* Number of erronous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
u32 ber_bit_count; /* Total number of SYNC bits. */
u32 ts_per; /* Transport stream PER,
0xFFFFFFFF indicate N/A */
diff --git a/drivers/media/common/siano/smsdvb.h b/drivers/media/common/siano/smsdvb.h
index 92c413b..ae36d0a 100644
--- a/drivers/media/common/siano/smsdvb.h
+++ b/drivers/media/common/siano/smsdvb.h
@@ -95,7 +95,7 @@ struct RECEPTION_STATISTICS_PER_SLICES_S {
u32 is_demod_locked; /* 0 - not locked, 1 - locked */
u32 ber_bit_count; /* Total number of SYNC bits. */
- u32 ber_error_count; /* Number of erronous SYNC bits. */
+ u32 ber_error_count; /* Number of erroneous SYNC bits. */
s32 MRC_SNR; /* dB */
s32 mrc_in_band_pwr; /* In band power in dBM */
diff --git a/drivers/media/dvb-core/dvb_demux.c b/drivers/media/dvb-core/dvb_demux.c
index 58de441..6c7ff0c 100644
--- a/drivers/media/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb-core/dvb_demux.c
@@ -435,7 +435,7 @@ static void dvb_dmx_swfilter_packet(struct dvb_demux *demux, const u8 *buf)
dprintk_tscheck("TEI detected. "
"PID=0x%x data1=0x%x\n",
pid, buf[1]);
- /* data in this packet cant be trusted - drop it unless
+ /* data in this packet can't be trusted - drop it unless
* module option dvb_demux_feed_err_pkts is set */
if (!dvb_demux_feed_err_pkts)
return;
@@ -1032,8 +1032,13 @@ static int dmx_section_feed_release_filter(struct dmx_section_feed *feed,
return -EINVAL;
}
- if (feed->is_filtering)
+ if (feed->is_filtering) {
+ /* release dvbdmx->mutex as far as it is
+ acquired by stop_filtering() itself */
+ mutex_unlock(&dvbdmx->mutex);
feed->stop_filtering(feed);
+ mutex_lock(&dvbdmx->mutex);
+ }
spin_lock_irq(&dvbdmx->lock);
f = dvbdmxfeed->filter;
diff --git a/drivers/media/dvb-frontends/af9033.c b/drivers/media/dvb-frontends/af9033.c
index 30ee590..65728c2 100644
--- a/drivers/media/dvb-frontends/af9033.c
+++ b/drivers/media/dvb-frontends/af9033.c
@@ -170,18 +170,18 @@ static int af9033_rd_reg_mask(struct af9033_state *state, u32 reg, u8 *val,
static int af9033_wr_reg_val_tab(struct af9033_state *state,
const struct reg_val *tab, int tab_len)
{
+#define MAX_TAB_LEN 212
int ret, i, j;
- u8 buf[MAX_XFER_SIZE];
+ u8 buf[1 + MAX_TAB_LEN];
+
+ dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
if (tab_len > sizeof(buf)) {
- dev_warn(&state->i2c->dev,
- "%s: i2c wr len=%d is too big!\n",
- KBUILD_MODNAME, tab_len);
+ dev_warn(&state->i2c->dev, "%s: tab len %d is too big\n",
+ KBUILD_MODNAME, tab_len);
return -EINVAL;
}
- dev_dbg(&state->i2c->dev, "%s: tab_len=%d\n", __func__, tab_len);
-
for (i = 0, j = 0; i < tab_len; i++) {
buf[j] = tab[i].val;
diff --git a/drivers/media/dvb-frontends/cxd2820r_c.c b/drivers/media/dvb-frontends/cxd2820r_c.c
index 125a440..5c6ab49 100644
--- a/drivers/media/dvb-frontends/cxd2820r_c.c
+++ b/drivers/media/dvb-frontends/cxd2820r_c.c
@@ -78,7 +78,7 @@ int cxd2820r_set_frontend_c(struct dvb_frontend *fe)
num = if_freq / 1000; /* Hz => kHz */
num *= 0x4000;
- if_ctl = cxd2820r_div_u64_round_closest(num, 41000);
+ if_ctl = 0x4000 - cxd2820r_div_u64_round_closest(num, 41000);
buf[0] = (if_ctl >> 8) & 0x3f;
buf[1] = (if_ctl >> 0) & 0xff;
diff --git a/drivers/media/dvb-frontends/dib8000.c b/drivers/media/dvb-frontends/dib8000.c
index 9053614..6dbbee4 100644
--- a/drivers/media/dvb-frontends/dib8000.c
+++ b/drivers/media/dvb-frontends/dib8000.c
@@ -3048,7 +3048,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
dib8000_set_diversity_in(state->fe[0], state->diversity_onoff);
locks = (dib8000_read_word(state, 180) >> 6) & 0x3f; /* P_coff_winlen ? */
- /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this lenght to lock */
+ /* coff should lock over P_coff_winlen ofdm symbols : give 3 times this length to lock */
*timeout = dib8000_get_timeout(state, 2 * locks, SYMBOL_DEPENDENT_ON);
*tune_state = CT_DEMOD_STEP_5;
break;
@@ -3115,7 +3115,7 @@ static int dib8000_tune(struct dvb_frontend *fe)
case CT_DEMOD_STEP_9: /* 39 */
if ((state->revision == 0x8090) || ((dib8000_read_word(state, 1291) >> 9) & 0x1)) { /* fe capable of deinterleaving : esram */
- /* defines timeout for mpeg lock depending on interleaver lenght of longest layer */
+ /* defines timeout for mpeg lock depending on interleaver length of longest layer */
for (i = 0; i < 3; i++) {
if (c->layer[i].interleaving >= deeper_interleaver) {
dprintk("layer%i: time interleaver = %d ", i, c->layer[i].interleaving);
diff --git a/drivers/media/dvb-frontends/drxk_hard.c b/drivers/media/dvb-frontends/drxk_hard.c
index d416c15..bf29a3f 100644
--- a/drivers/media/dvb-frontends/drxk_hard.c
+++ b/drivers/media/dvb-frontends/drxk_hard.c
@@ -1191,7 +1191,7 @@ static int mpegts_configure_pins(struct drxk_state *state, bool mpeg_enable)
goto error;
if (state->m_enable_parallel == true) {
- /* paralel -> enable MD1 to MD7 */
+ /* parallel -> enable MD1 to MD7 */
status = write16(state, SIO_PDR_MD1_CFG__A,
sio_pdr_mdx_cfg);
if (status < 0)
@@ -1428,7 +1428,7 @@ static int mpegts_stop(struct drxk_state *state)
dprintk(1, "\n");
- /* Gracefull shutdown (byte boundaries) */
+ /* Graceful shutdown (byte boundaries) */
status = read16(state, FEC_OC_SNC_MODE__A, &fec_oc_snc_mode);
if (status < 0)
goto error;
@@ -2021,7 +2021,7 @@ static int mpegts_dto_setup(struct drxk_state *state,
fec_oc_dto_burst_len = 204;
}
- /* Check serial or parrallel output */
+ /* Check serial or parallel output */
fec_oc_reg_ipr_mode &= (~(FEC_OC_IPR_MODE_SERIAL__M));
if (state->m_enable_parallel == false) {
/* MPEG data output is serial -> set ipr_mode[0] */
@@ -2908,7 +2908,7 @@ static int adc_synchronization(struct drxk_state *state)
goto error;
if (count == 1) {
- /* Try sampling on a diffrent edge */
+ /* Try sampling on a different edge */
u16 clk_neg = 0;
status = read16(state, IQM_AF_CLKNEG__A, &clk_neg);
@@ -3306,7 +3306,7 @@ static int dvbt_sc_command(struct drxk_state *state,
if (status < 0)
goto error;
- /* Retreive results parameters from SC */
+ /* Retrieve results parameters from SC */
switch (cmd) {
/* All commands yielding 5 results */
/* All commands yielding 4 results */
@@ -3849,7 +3849,7 @@ static int set_dvbt(struct drxk_state *state, u16 intermediate_freqk_hz,
break;
}
#if 0
- /* No hierachical channels support in BDA */
+ /* No hierarchical channels support in BDA */
/* Priority (only for hierarchical channels) */
switch (channel->priority) {
case DRX_PRIORITY_LOW:
@@ -4081,7 +4081,7 @@ error:
/*============================================================================*/
/**
-* \brief Retreive lock status .
+* \brief Retrieve lock status .
* \param demod Pointer to demodulator instance.
* \param lockStat Pointer to lock status structure.
* \return DRXStatus_t.
@@ -6174,7 +6174,7 @@ static int init_drxk(struct drxk_state *state)
goto error;
/* Stamp driver version number in SCU data RAM in BCD code
- Done to enable field application engineers to retreive drxdriver version
+ Done to enable field application engineers to retrieve drxdriver version
via I2C from SCU RAM.
Not using SCU command interface for SCU register access since no
microcode may be present.
@@ -6399,7 +6399,7 @@ static int drxk_set_parameters(struct dvb_frontend *fe)
fe->ops.tuner_ops.get_if_frequency(fe, &IF);
start(state, 0, IF);
- /* After set_frontend, stats aren't avaliable */
+ /* After set_frontend, stats aren't available */
p->strength.stat[0].scale = FE_SCALE_RELATIVE;
p->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
p->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
diff --git a/drivers/media/dvb-frontends/rtl2830.c b/drivers/media/dvb-frontends/rtl2830.c
index 7efb796..50e8b63 100644
--- a/drivers/media/dvb-frontends/rtl2830.c
+++ b/drivers/media/dvb-frontends/rtl2830.c
@@ -710,6 +710,7 @@ struct dvb_frontend *rtl2830_attach(const struct rtl2830_config *cfg,
sizeof(priv->tuner_i2c_adapter.name));
priv->tuner_i2c_adapter.algo = &rtl2830_tuner_i2c_algo;
priv->tuner_i2c_adapter.algo_data = NULL;
+ priv->tuner_i2c_adapter.dev.parent = &i2c->dev;
i2c_set_adapdata(&priv->tuner_i2c_adapter, priv);
if (i2c_add_adapter(&priv->tuner_i2c_adapter) < 0) {
dev_err(&i2c->dev,
diff --git a/drivers/media/i2c/adv7183_regs.h b/drivers/media/i2c/adv7183_regs.h
index 4a5b7d2..b253d40 100644
--- a/drivers/media/i2c/adv7183_regs.h
+++ b/drivers/media/i2c/adv7183_regs.h
@@ -52,9 +52,9 @@
#define ADV7183_VS_FIELD_CTRL_1 0x31 /* Vsync field control 1 */
#define ADV7183_VS_FIELD_CTRL_2 0x32 /* Vsync field control 2 */
#define ADV7183_VS_FIELD_CTRL_3 0x33 /* Vsync field control 3 */
-#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync positon control 1 */
-#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync positon control 2 */
-#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync positon control 3 */
+#define ADV7183_HS_POS_CTRL_1 0x34 /* Hsync position control 1 */
+#define ADV7183_HS_POS_CTRL_2 0x35 /* Hsync position control 2 */
+#define ADV7183_HS_POS_CTRL_3 0x36 /* Hsync position control 3 */
#define ADV7183_POLARITY 0x37 /* Polarity */
#define ADV7183_NTSC_COMB_CTRL 0x38 /* NTSC comb control */
#define ADV7183_PAL_COMB_CTRL 0x39 /* PAL comb control */
diff --git a/drivers/media/i2c/adv7604.c b/drivers/media/i2c/adv7604.c
index fbfdd2f..a324106b 100644
--- a/drivers/media/i2c/adv7604.c
+++ b/drivers/media/i2c/adv7604.c
@@ -877,7 +877,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
break;
case ADV7604_MODE_HDMI:
/* set default prim_mode/vid_std for HDMI
- accoring to [REF_03, c. 4.2] */
+ according to [REF_03, c. 4.2] */
io_write(sd, 0x00, 0x02); /* video std */
io_write(sd, 0x01, 0x06); /* prim mode */
break;
diff --git a/drivers/media/i2c/adv7842.c b/drivers/media/i2c/adv7842.c
index 22f729d..b154f36 100644
--- a/drivers/media/i2c/adv7842.c
+++ b/drivers/media/i2c/adv7842.c
@@ -1013,7 +1013,7 @@ static void configure_custom_video_timings(struct v4l2_subdev *sd,
break;
case ADV7842_MODE_HDMI:
/* set default prim_mode/vid_std for HDMI
- accoring to [REF_03, c. 4.2] */
+ according to [REF_03, c. 4.2] */
io_write(sd, 0x00, 0x02); /* video std */
io_write(sd, 0x01, 0x06); /* prim mode */
break;
diff --git a/drivers/media/i2c/ir-kbd-i2c.c b/drivers/media/i2c/ir-kbd-i2c.c
index 82bf567..99ee456 100644
--- a/drivers/media/i2c/ir-kbd-i2c.c
+++ b/drivers/media/i2c/ir-kbd-i2c.c
@@ -394,7 +394,7 @@ static int ir_probe(struct i2c_client *client, const struct i2c_device_id *id)
if (!rc) {
/*
- * If platform_data doesn't specify rc_dev, initilize it
+ * If platform_data doesn't specify rc_dev, initialize it
* internally
*/
rc = rc_allocate_device();
diff --git a/drivers/media/i2c/m5mols/m5mols_controls.c b/drivers/media/i2c/m5mols/m5mols_controls.c
index f34429e..a60931e 100644
--- a/drivers/media/i2c/m5mols/m5mols_controls.c
+++ b/drivers/media/i2c/m5mols/m5mols_controls.c
@@ -544,7 +544,7 @@ int m5mols_init_controls(struct v4l2_subdev *sd)
u16 zoom_step;
int ret;
- /* Determine the firmware dependant control range and step values */
+ /* Determine the firmware dependent control range and step values */
ret = m5mols_read_u16(sd, AE_MAX_GAIN_MON, &exposure_max);
if (ret < 0)
return ret;
diff --git a/drivers/media/i2c/mt9p031.c b/drivers/media/i2c/mt9p031.c
index 4734836..1c2303d 100644
--- a/drivers/media/i2c/mt9p031.c
+++ b/drivers/media/i2c/mt9p031.c
@@ -19,6 +19,7 @@
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/of_gpio.h>
#include <linux/pm.h>
#include <linux/regulator/consumer.h>
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3-core.c b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
index 6fec938..e7f555c 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3-core.c
+++ b/drivers/media/i2c/s5c73m3/s5c73m3-core.c
@@ -1460,7 +1460,7 @@ static int s5c73m3_oif_registered(struct v4l2_subdev *sd)
mutex_unlock(&state->lock);
v4l2_dbg(1, s5c73m3_dbg, sd, "%s: Booting %s (%d)\n",
- __func__, ret ? "failed" : "succeded", ret);
+ __func__, ret ? "failed" : "succeeded", ret);
return ret;
}
diff --git a/drivers/media/i2c/s5c73m3/s5c73m3.h b/drivers/media/i2c/s5c73m3/s5c73m3.h
index 9d2c086..9dfa516 100644
--- a/drivers/media/i2c/s5c73m3/s5c73m3.h
+++ b/drivers/media/i2c/s5c73m3/s5c73m3.h
@@ -393,7 +393,7 @@ struct s5c73m3 {
/* External master clock frequency */
u32 mclk_frequency;
- /* Video bus type - MIPI-CSI2/paralell */
+ /* Video bus type - MIPI-CSI2/parallel */
enum v4l2_mbus_type bus_type;
const struct s5c73m3_frame_size *sensor_pix_size[2];
diff --git a/drivers/media/i2c/saa7115.c b/drivers/media/i2c/saa7115.c
index 637d026..afdbcb0 100644
--- a/drivers/media/i2c/saa7115.c
+++ b/drivers/media/i2c/saa7115.c
@@ -1699,7 +1699,7 @@ static void saa711x_write_platform_data(struct saa711x_state *state,
* the analog demod.
* If the tuner is not found, it returns -ENODEV.
* If auto-detection is disabled and the tuner doesn't match what it was
- * requred, it returns -EINVAL and fills 'name'.
+ * required, it returns -EINVAL and fills 'name'.
* If the chip is found, it returns the chip ID and fills 'name'.
*/
static int saa711x_detect_chip(struct i2c_client *client,
diff --git a/drivers/media/i2c/soc_camera/ov5642.c b/drivers/media/i2c/soc_camera/ov5642.c
index 0a5c5d4..d2daa6a 100644
--- a/drivers/media/i2c/soc_camera/ov5642.c
+++ b/drivers/media/i2c/soc_camera/ov5642.c
@@ -642,7 +642,7 @@ static const struct ov5642_datafmt
static int reg_read(struct i2c_client *client, u16 reg, u8 *val)
{
int ret;
- /* We have 16-bit i2c addresses - care for endianess */
+ /* We have 16-bit i2c addresses - care for endianness */
unsigned char data[2] = { reg >> 8, reg & 0xff };
ret = i2c_master_send(client, data, 2);
diff --git a/drivers/media/i2c/ths7303.c b/drivers/media/i2c/ths7303.c
index 42276d9..ed9ae88 100644
--- a/drivers/media/i2c/ths7303.c
+++ b/drivers/media/i2c/ths7303.c
@@ -83,7 +83,8 @@ static int ths7303_write(struct v4l2_subdev *sd, u8 reg, u8 val)
}
/* following function is used to set ths7303 */
-int ths7303_setval(struct v4l2_subdev *sd, enum ths7303_filter_mode mode)
+static int ths7303_setval(struct v4l2_subdev *sd,
+ enum ths7303_filter_mode mode)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct ths7303_state *state = to_state(sd);
diff --git a/drivers/media/i2c/wm8775.c b/drivers/media/i2c/wm8775.c
index 3f584a7..bee7946 100644
--- a/drivers/media/i2c/wm8775.c
+++ b/drivers/media/i2c/wm8775.c
@@ -130,12 +130,10 @@ static int wm8775_s_routing(struct v4l2_subdev *sd,
return -EINVAL;
}
state->input = input;
- if (!v4l2_ctrl_g_ctrl(state->mute))
+ if (v4l2_ctrl_g_ctrl(state->mute))
return 0;
if (!v4l2_ctrl_g_ctrl(state->vol))
return 0;
- if (!v4l2_ctrl_g_ctrl(state->bal))
- return 0;
wm8775_set_audio(sd, 1);
return 0;
}
diff --git a/drivers/media/pci/bt8xx/bttv-driver.c b/drivers/media/pci/bt8xx/bttv-driver.c
index a3b1ee9..92a06fd 100644
--- a/drivers/media/pci/bt8xx/bttv-driver.c
+++ b/drivers/media/pci/bt8xx/bttv-driver.c
@@ -4182,7 +4182,8 @@ static int bttv_probe(struct pci_dev *dev, const struct pci_device_id *pci_id)
}
btv->std = V4L2_STD_PAL;
init_irqreg(btv);
- v4l2_ctrl_handler_setup(hdl);
+ if (!bttv_tvcards[btv->c.type].no_video)
+ v4l2_ctrl_handler_setup(hdl);
if (hdl->error) {
result = hdl->error;
goto fail2;
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index 2767c64..57f4688 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -262,7 +262,7 @@ struct cx18_options {
};
/* per-mdl bit flags */
-#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianess swapped */
+#define CX18_F_M_NEED_SWAP 0 /* mdl buffer data must be endianness swapped */
/* per-stream, s_flags */
#define CX18_F_S_CLAIMED 3 /* this stream is claimed */
diff --git a/drivers/media/pci/cx23885/cx23885-417.c b/drivers/media/pci/cx23885/cx23885-417.c
index e3fc2c7..95666ee 100644
--- a/drivers/media/pci/cx23885/cx23885-417.c
+++ b/drivers/media/pci/cx23885/cx23885-417.c
@@ -427,7 +427,7 @@ int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value)
cx_write(MC417_RWD, regval);
/* Transition RD to effect read transaction across bus.
- * Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
+ * Transition 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
* Should it be 0x9000 -> 0xF000 (also why is RDY being set, its
* input only...)
*/
diff --git a/drivers/media/pci/pluto2/pluto2.c b/drivers/media/pci/pluto2/pluto2.c
index 8164d74..655d6854 100644
--- a/drivers/media/pci/pluto2/pluto2.c
+++ b/drivers/media/pci/pluto2/pluto2.c
@@ -401,7 +401,7 @@ static int pluto_hw_init(struct pluto *pluto)
/* set automatic LED control by FPGA */
pluto_rw(pluto, REG_MISC, MISC_ALED, MISC_ALED);
- /* set data endianess */
+ /* set data endianness */
#ifdef __LITTLE_ENDIAN
pluto_rw(pluto, REG_PIDn(0), PID0_END, PID0_END);
#else
diff --git a/drivers/media/pci/saa7164/saa7164-core.c b/drivers/media/pci/saa7164/saa7164-core.c
index 57ef545..1bf0697 100644
--- a/drivers/media/pci/saa7164/saa7164-core.c
+++ b/drivers/media/pci/saa7164/saa7164-core.c
@@ -1354,9 +1354,11 @@ static int saa7164_initdev(struct pci_dev *pci_dev,
if (fw_debug) {
dev->kthread = kthread_run(saa7164_thread_function, dev,
"saa7164 debug");
- if (!dev->kthread)
+ if (IS_ERR(dev->kthread)) {
+ dev->kthread = NULL;
printk(KERN_ERR "%s() Failed to create "
"debug kernel thread\n", __func__);
+ }
}
} /* != BOARD_UNKNOWN */
diff --git a/drivers/media/platform/coda.c b/drivers/media/platform/coda.c
index bd72fb9..61f3dbc 100644
--- a/drivers/media/platform/coda.c
+++ b/drivers/media/platform/coda.c
@@ -1434,7 +1434,7 @@ static void coda_buf_queue(struct vb2_buffer *vb)
if (q_data->fourcc == V4L2_PIX_FMT_H264 &&
vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) {
/*
- * For backwards compatiblity, queuing an empty buffer marks
+ * For backwards compatibility, queuing an empty buffer marks
* the stream end
*/
if (vb2_get_plane_payload(vb, 0) == 0)
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index 3d66d88..f791569 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -1039,7 +1039,7 @@ static int fimc_runtime_resume(struct device *dev)
dbg("fimc%d: state: 0x%lx", fimc->id, fimc->state);
- /* Enable clocks and perform basic initalization */
+ /* Enable clocks and perform basic initialization */
clk_enable(fimc->clock[CLK_GATE]);
fimc_hw_reset(fimc);
diff --git a/drivers/media/platform/exynos4-is/media-dev.c b/drivers/media/platform/exynos4-is/media-dev.c
index 7a4ee4c..c1bce17 100644
--- a/drivers/media/platform/exynos4-is/media-dev.c
+++ b/drivers/media/platform/exynos4-is/media-dev.c
@@ -759,7 +759,7 @@ static int fimc_md_register_platform_entity(struct fimc_md *fmd,
goto dev_unlock;
drvdata = dev_get_drvdata(dev);
- /* Some subdev didn't probe succesfully id drvdata is NULL */
+ /* Some subdev didn't probe successfully id drvdata is NULL */
if (drvdata) {
switch (plat_entity) {
case IDX_FIMC:
diff --git a/drivers/media/platform/marvell-ccic/mmp-driver.c b/drivers/media/platform/marvell-ccic/mmp-driver.c
index 3458fa0..054507f 100644
--- a/drivers/media/platform/marvell-ccic/mmp-driver.c
+++ b/drivers/media/platform/marvell-ccic/mmp-driver.c
@@ -142,12 +142,6 @@ static int mmpcam_power_up(struct mcam_camera *mcam)
struct mmp_camera *cam = mcam_to_cam(mcam);
struct mmp_camera_platform_data *pdata;
- if (mcam->bus_type == V4L2_MBUS_CSI2) {
- cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
- if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
- return PTR_ERR(cam->mipi_clk);
- }
-
/*
* Turn on power and clocks to the controller.
*/
@@ -186,12 +180,6 @@ static void mmpcam_power_down(struct mcam_camera *mcam)
gpio_set_value(pdata->sensor_power_gpio, 0);
gpio_set_value(pdata->sensor_reset_gpio, 0);
- if (mcam->bus_type == V4L2_MBUS_CSI2 && !IS_ERR(cam->mipi_clk)) {
- if (cam->mipi_clk)
- devm_clk_put(mcam->dev, cam->mipi_clk);
- cam->mipi_clk = NULL;
- }
-
mcam_clk_disable(mcam);
}
@@ -292,8 +280,9 @@ void mmpcam_calc_dphy(struct mcam_camera *mcam)
return;
/* get the escape clk, this is hard coded */
+ clk_prepare_enable(cam->mipi_clk);
tx_clk_esc = (clk_get_rate(cam->mipi_clk) / 1000000) / 12;
-
+ clk_disable_unprepare(cam->mipi_clk);
/*
* dphy[2] - CSI2_DPHY6:
* bit 0 ~ bit 7: CK Term Enable
@@ -325,19 +314,6 @@ static irqreturn_t mmpcam_irq(int irq, void *data)
return IRQ_RETVAL(handled);
}
-static void mcam_deinit_clk(struct mcam_camera *mcam)
-{
- unsigned int i;
-
- for (i = 0; i < NR_MCAM_CLK; i++) {
- if (!IS_ERR(mcam->clk[i])) {
- if (mcam->clk[i])
- devm_clk_put(mcam->dev, mcam->clk[i]);
- }
- mcam->clk[i] = NULL;
- }
-}
-
static void mcam_init_clk(struct mcam_camera *mcam)
{
unsigned int i;
@@ -371,7 +347,6 @@ static int mmpcam_probe(struct platform_device *pdev)
if (cam == NULL)
return -ENOMEM;
cam->pdev = pdev;
- cam->mipi_clk = NULL;
INIT_LIST_HEAD(&cam->devlist);
mcam = &cam->mcam;
@@ -387,6 +362,11 @@ static int mmpcam_probe(struct platform_device *pdev)
mcam->mclk_div = pdata->mclk_div;
mcam->bus_type = pdata->bus_type;
mcam->dphy = pdata->dphy;
+ if (mcam->bus_type == V4L2_MBUS_CSI2) {
+ cam->mipi_clk = devm_clk_get(mcam->dev, "mipi");
+ if ((IS_ERR(cam->mipi_clk) && mcam->dphy[2] == 0))
+ return PTR_ERR(cam->mipi_clk);
+ }
mcam->mipi_enabled = false;
mcam->lane = pdata->lane;
mcam->chip_id = MCAM_ARMADA610;
@@ -444,7 +424,7 @@ static int mmpcam_probe(struct platform_device *pdev)
*/
ret = mmpcam_power_up(mcam);
if (ret)
- goto out_deinit_clk;
+ return ret;
ret = mccic_register(mcam);
if (ret)
goto out_power_down;
@@ -469,8 +449,6 @@ out_unregister:
mccic_shutdown(mcam);
out_power_down:
mmpcam_power_down(mcam);
-out_deinit_clk:
- mcam_deinit_clk(mcam);
return ret;
}
@@ -478,18 +456,10 @@ out_deinit_clk:
static int mmpcam_remove(struct mmp_camera *cam)
{
struct mcam_camera *mcam = &cam->mcam;
- struct mmp_camera_platform_data *pdata;
mmpcam_remove_device(cam);
mccic_shutdown(mcam);
mmpcam_power_down(mcam);
- pdata = cam->pdev->dev.platform_data;
- gpio_free(pdata->sensor_reset_gpio);
- gpio_free(pdata->sensor_power_gpio);
- mcam_deinit_clk(mcam);
- iounmap(cam->power_regs);
- iounmap(mcam->regs);
- kfree(cam);
return 0;
}
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 1c36080..561bce8 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -1673,7 +1673,7 @@ void omap3isp_print_status(struct isp_device *isp)
* ISP clocks get disabled in suspend(). Similarly, the clocks are reenabled in
* resume(), and the the pipelines are restarted in complete().
*
- * TODO: PM dependencies between the ISP and sensors are not modeled explicitly
+ * TODO: PM dependencies between the ISP and sensors are not modelled explicitly
* yet.
*/
static int isp_pm_prepare(struct device *dev)
diff --git a/drivers/media/platform/omap3isp/ispvideo.c b/drivers/media/platform/omap3isp/ispvideo.c
index a908d00..f6304bb 100644
--- a/drivers/media/platform/omap3isp/ispvideo.c
+++ b/drivers/media/platform/omap3isp/ispvideo.c
@@ -339,14 +339,11 @@ __isp_video_get_format(struct isp_video *video, struct v4l2_format *format)
if (subdev == NULL)
return -EINVAL;
- mutex_lock(&video->mutex);
-
fmt.pad = pad;
fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE;
- ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
- if (ret == -ENOIOCTLCMD)
- ret = -EINVAL;
+ mutex_lock(&video->mutex);
+ ret = v4l2_subdev_call(subdev, pad, get_fmt, NULL, &fmt);
mutex_unlock(&video->mutex);
if (ret)
diff --git a/drivers/media/platform/s5p-mfc/regs-mfc.h b/drivers/media/platform/s5p-mfc/regs-mfc.h
index 9319e93..6ccc3f8 100644
--- a/drivers/media/platform/s5p-mfc/regs-mfc.h
+++ b/drivers/media/platform/s5p-mfc/regs-mfc.h
@@ -382,7 +382,7 @@
#define S5P_FIMV_R2H_CMD_EDFU_INIT_RET 16
#define S5P_FIMV_R2H_CMD_ERR_RET 32
-/* Dummy definition for MFCv6 compatibilty */
+/* Dummy definition for MFCv6 compatibility */
#define S5P_FIMV_CODEC_H264_MVC_DEC -1
#define S5P_FIMV_R2H_CMD_FIELD_DONE_RET -1
#define S5P_FIMV_MFC_RESET -1
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc.c b/drivers/media/platform/s5p-mfc/s5p_mfc.c
index 5f2c4ad..e46067a 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc.c
@@ -239,7 +239,7 @@ static void s5p_mfc_handle_frame_copy_time(struct s5p_mfc_ctx *ctx)
frame_type = s5p_mfc_hw_call(dev->mfc_ops, get_dec_frame_type, dev);
/* Copy timestamp / timecode from decoded src to dst and set
- appropraite flags */
+ appropriate flags */
src_buf = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list);
list_for_each_entry(dst_buf, &ctx->dst_queue, list) {
if (vb2_dma_contig_plane_dma_addr(dst_buf->b, 0) == dec_y_addr) {
@@ -428,7 +428,7 @@ static void s5p_mfc_handle_error(struct s5p_mfc_dev *dev,
case MFCINST_FINISHING:
case MFCINST_FINISHED:
case MFCINST_RUNNING:
- /* It is higly probable that an error occured
+ /* It is highly probable that an error occurred
* while decoding a frame */
clear_work_bit(ctx);
ctx->state = MFCINST_ERROR;
@@ -611,7 +611,7 @@ static irqreturn_t s5p_mfc_irq(int irq, void *priv)
mfc_debug(1, "Int reason: %d (err: %08x)\n", reason, err);
switch (reason) {
case S5P_MFC_R2H_CMD_ERR_RET:
- /* An error has occured */
+ /* An error has occurred */
if (ctx->state == MFCINST_RUNNING &&
s5p_mfc_hw_call(dev->mfc_ops, err_dec, err) >=
dev->warn_start)
@@ -840,7 +840,7 @@ static int s5p_mfc_open(struct file *file)
mutex_unlock(&dev->mfc_mutex);
mfc_debug_leave();
return ret;
- /* Deinit when failure occured */
+ /* Deinit when failure occurred */
err_queue_init:
if (dev->num_inst == 1)
s5p_mfc_deinit_hw(dev);
@@ -881,14 +881,14 @@ static int s5p_mfc_release(struct file *file)
/* Mark context as idle */
clear_work_bit_irqsave(ctx);
/* If instance was initialised then
- * return instance and free reosurces */
+ * return instance and free resources */
if (ctx->inst_no != MFC_NO_INSTANCE_SET) {
mfc_debug(2, "Has to free instance\n");
ctx->state = MFCINST_RETURN_INST;
set_work_bit_irqsave(ctx);
s5p_mfc_clean_ctx_int_flags(ctx);
s5p_mfc_hw_call(dev->mfc_ops, try_run, dev);
- /* Wait until instance is returned or timeout occured */
+ /* Wait until instance is returned or timeout occurred */
if (s5p_mfc_wait_for_done_ctx
(ctx, S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET, 0)) {
s5p_mfc_clock_off();
diff --git a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
index 7cab684..2475a3c 100644
--- a/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
+++ b/drivers/media/platform/s5p-mfc/s5p_mfc_ctrl.c
@@ -69,7 +69,7 @@ int s5p_mfc_alloc_firmware(struct s5p_mfc_dev *dev)
} else {
/* In this case bank2 can point to the same address as bank1.
- * Firmware will always occupy the beggining of this area so it is
+ * Firmware will always occupy the beginning of this area so it is
* impossible having a video frame buffer with zero address. */
dev->bank2 = dev->bank1;
}
diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
index 04e6490..fb2acc5 100644
--- a/drivers/media/platform/s5p-tv/mixer.h
+++ b/drivers/media/platform/s5p-tv/mixer.h
@@ -65,7 +65,7 @@ struct mxr_format {
int num_subframes;
/** specifies to which subframe belong given plane */
int plane2subframe[MXR_MAX_PLANES];
- /** internal code, driver dependant */
+ /** internal code, driver dependent */
unsigned long cookie;
};
diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
index 641b1f0..81b97db 100644
--- a/drivers/media/platform/s5p-tv/mixer_video.c
+++ b/drivers/media/platform/s5p-tv/mixer_video.c
@@ -528,7 +528,7 @@ static int mxr_s_dv_timings(struct file *file, void *fh,
mutex_lock(&mdev->mutex);
/* timings change cannot be done while there is an entity
- * dependant on output configuration
+ * dependent on output configuration
*/
if (mdev->n_output > 0) {
mutex_unlock(&mdev->mutex);
@@ -585,7 +585,7 @@ static int mxr_s_std(struct file *file, void *fh, v4l2_std_id norm)
mutex_lock(&mdev->mutex);
/* standard change cannot be done while there is an entity
- * dependant on output configuration
+ * dependent on output configuration
*/
if (mdev->n_output > 0) {
mutex_unlock(&mdev->mutex);
diff --git a/drivers/media/platform/soc_camera/omap1_camera.c b/drivers/media/platform/soc_camera/omap1_camera.c
index 6769193..74ce8b6 100644
--- a/drivers/media/platform/soc_camera/omap1_camera.c
+++ b/drivers/media/platform/soc_camera/omap1_camera.c
@@ -1495,7 +1495,7 @@ static int omap1_cam_set_bus_param(struct soc_camera_device *icd)
if (ctrlclock & LCLK_EN)
CAM_WRITE(pcdev, CTRLCLOCK, ctrlclock);
- /* select bus endianess */
+ /* select bus endianness */
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
fmt = xlate->host_fmt;
diff --git a/drivers/media/platform/vivi.c b/drivers/media/platform/vivi.c
index 1d3f119..2d4e73b 100644
--- a/drivers/media/platform/vivi.c
+++ b/drivers/media/platform/vivi.c
@@ -1108,7 +1108,7 @@ static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
return 0;
}
-/* timeperframe is arbitrary and continous */
+/* timeperframe is arbitrary and continuous */
static int vidioc_enum_frameintervals(struct file *file, void *priv,
struct v4l2_frmivalenum *fival)
{
@@ -1125,7 +1125,7 @@ static int vidioc_enum_frameintervals(struct file *file, void *priv,
fival->type = V4L2_FRMIVAL_TYPE_CONTINUOUS;
- /* fill in stepwise (step=1.0 is requred by V4L2 spec) */
+ /* fill in stepwise (step=1.0 is required by V4L2 spec) */
fival->stepwise.min = tpf_min;
fival->stepwise.max = tpf_max;
fival->stepwise.step = (struct v4l2_fract) {1, 1};
diff --git a/drivers/media/platform/vsp1/vsp1_drv.c b/drivers/media/platform/vsp1/vsp1_drv.c
index 1c9e771..d16bf0f 100644
--- a/drivers/media/platform/vsp1/vsp1_drv.c
+++ b/drivers/media/platform/vsp1/vsp1_drv.c
@@ -323,7 +323,7 @@ static void vsp1_clocks_disable(struct vsp1_device *vsp1)
* Increment the VSP1 reference count and initialize the device if the first
* reference is taken.
*
- * Return a pointer to the VSP1 device or NULL if an error occured.
+ * Return a pointer to the VSP1 device or NULL if an error occurred.
*/
struct vsp1_device *vsp1_device_get(struct vsp1_device *vsp1)
{
diff --git a/drivers/media/platform/vsp1/vsp1_video.c b/drivers/media/platform/vsp1/vsp1_video.c
index 714c53e..4b0ac07 100644
--- a/drivers/media/platform/vsp1/vsp1_video.c
+++ b/drivers/media/platform/vsp1/vsp1_video.c
@@ -1026,8 +1026,10 @@ int vsp1_video_init(struct vsp1_video *video, struct vsp1_entity *rwpf)
/* ... and the buffers queue... */
video->alloc_ctx = vb2_dma_contig_init_ctx(video->vsp1->dev);
- if (IS_ERR(video->alloc_ctx))
+ if (IS_ERR(video->alloc_ctx)) {
+ ret = PTR_ERR(video->alloc_ctx);
goto error;
+ }
video->queue.type = video->type;
video->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF;
diff --git a/drivers/media/radio/radio-shark.c b/drivers/media/radio/radio-shark.c
index 3db8a8c..050b3bb 100644
--- a/drivers/media/radio/radio-shark.c
+++ b/drivers/media/radio/radio-shark.c
@@ -271,8 +271,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
-#ifdef CONFIG_PM
-static void shark_resume_leds(struct shark_device *shark)
+static inline void shark_resume_leds(struct shark_device *shark)
{
if (test_bit(BLUE_IS_PULSE, &shark->brightness_new))
set_bit(BLUE_PULSE_LED, &shark->brightness_new);
@@ -281,7 +280,6 @@ static void shark_resume_leds(struct shark_device *shark)
set_bit(RED_LED, &shark->brightness_new);
schedule_work(&shark->led_work);
}
-#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-shark2.c b/drivers/media/radio/radio-shark2.c
index d86d90d..8654e0d 100644
--- a/drivers/media/radio/radio-shark2.c
+++ b/drivers/media/radio/radio-shark2.c
@@ -237,8 +237,7 @@ static void shark_unregister_leds(struct shark_device *shark)
cancel_work_sync(&shark->led_work);
}
-#ifdef CONFIG_PM
-static void shark_resume_leds(struct shark_device *shark)
+static inline void shark_resume_leds(struct shark_device *shark)
{
int i;
@@ -247,7 +246,6 @@ static void shark_resume_leds(struct shark_device *shark)
schedule_work(&shark->led_work);
}
-#endif
#else
static int shark_register_leds(struct shark_device *shark, struct device *dev)
{
diff --git a/drivers/media/radio/radio-si476x.c b/drivers/media/radio/radio-si476x.c
index 9c9084c..2fd9009 100644
--- a/drivers/media/radio/radio-si476x.c
+++ b/drivers/media/radio/radio-si476x.c
@@ -268,8 +268,8 @@ struct si476x_radio;
*
* @tune_freq: Tune chip to a specific frequency
* @seek_start: Star station seeking
- * @rsq_status: Get Recieved Signal Quality(RSQ) status
- * @rds_blckcnt: Get recived RDS blocks count
+ * @rsq_status: Get Received Signal Quality(RSQ) status
+ * @rds_blckcnt: Get received RDS blocks count
* @phase_diversity: Change phase diversity mode of the tuner
* @phase_div_status: Get phase diversity mode status
* @acf_status: Get the status of Automatically Controlled
diff --git a/drivers/media/radio/radio-tea5764.c b/drivers/media/radio/radio-tea5764.c
index 036e2f5..3ed1f56 100644
--- a/drivers/media/radio/radio-tea5764.c
+++ b/drivers/media/radio/radio-tea5764.c
@@ -356,7 +356,7 @@ static int vidioc_s_frequency(struct file *file, void *priv,
So we keep it as-is. */
return -EINVAL;
}
- clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
+ freq = clamp(freq, FREQ_MIN * FREQ_MUL, FREQ_MAX * FREQ_MUL);
tea5764_power_up(radio);
tea5764_tune(radio, (freq * 125) / 2);
return 0;
diff --git a/drivers/media/radio/tef6862.c b/drivers/media/radio/tef6862.c
index 69e3245..a9319a2 100644
--- a/drivers/media/radio/tef6862.c
+++ b/drivers/media/radio/tef6862.c
@@ -112,7 +112,7 @@ static int tef6862_s_frequency(struct v4l2_subdev *sd, const struct v4l2_frequen
if (f->tuner != 0)
return -EINVAL;
- clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
+ freq = clamp(freq, TEF6862_LO_FREQ, TEF6862_HI_FREQ);
pll = 1964 + ((freq - TEF6862_LO_FREQ) * 20) / FREQ_MUL;
i2cmsg[0] = (MSA_MODE_PRESET << MSA_MODE_SHIFT) | WM_SUB_PLLM;
i2cmsg[1] = (pll >> 8) & 0xff;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index 72e3fa6..f329485 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1370,7 +1370,7 @@ static void imon_pad_to_keys(struct imon_context *ictx, unsigned char *buf)
* 0x68nnnnB7 to 0x6AnnnnB7, the left mouse button generates
* 0x688301b7 and the right one 0x688481b7. All other keys generate
* 0x2nnnnnnn. Position coordinate is encoded in buf[1] and buf[2] with
- * reversed endianess. Extract direction from buffer, rotate endianess,
+ * reversed endianness. Extract direction from buffer, rotate endianness,
* adjust sign and feed the values into stabilize(). The resulting codes
* will be 0x01008000, 0x01007F00, which match the newer devices.
*/
diff --git a/drivers/media/rc/redrat3.c b/drivers/media/rc/redrat3.c
index 094484f..a5d4f88 100644
--- a/drivers/media/rc/redrat3.c
+++ b/drivers/media/rc/redrat3.c
@@ -118,7 +118,7 @@ static int debug;
#define RR3_IR_IO_LENGTH_FUZZ 0x04
/* Timeout for end of signal detection */
#define RR3_IR_IO_SIG_TIMEOUT 0x05
-/* Minumum value for pause recognition. */
+/* Minimum value for pause recognition. */
#define RR3_IR_IO_MIN_PAUSE 0x06
/* Clock freq. of EZ-USB chip */
diff --git a/drivers/media/tuners/mt2063.c b/drivers/media/tuners/mt2063.c
index 2e1a02e..20cca40 100644
--- a/drivers/media/tuners/mt2063.c
+++ b/drivers/media/tuners/mt2063.c
@@ -1195,7 +1195,7 @@ static u32 mt2063_set_dnc_output_enable(struct mt2063_state *state,
* DNC Output is selected, the other is always off)
*
* @state: ptr to mt2063_state structure
- * @Mode: desired reciever delivery system
+ * @Mode: desired receiver delivery system
*
* Note: Register cache must be valid for it to work
*/
@@ -2119,7 +2119,7 @@ static int mt2063_set_analog_params(struct dvb_frontend *fe,
/*
* As defined on EN 300 429, the DVB-C roll-off factor is 0.15.
- * So, the amount of the needed bandwith is given by:
+ * So, the amount of the needed bandwidth is given by:
* Bw = Symbol_rate * (1 + 0.15)
* As such, the maximum symbol rate supported by 6 MHz is given by:
* max_symbol_rate = 6 MHz / 1.15 = 5217391 Bauds
diff --git a/drivers/media/tuners/tuner-xc2028-types.h b/drivers/media/tuners/tuner-xc2028-types.h
index 74dc46a..7e47987 100644
--- a/drivers/media/tuners/tuner-xc2028-types.h
+++ b/drivers/media/tuners/tuner-xc2028-types.h
@@ -119,7 +119,7 @@
#define V4L2_STD_A2 (V4L2_STD_A2_A | V4L2_STD_A2_B)
#define V4L2_STD_NICAM (V4L2_STD_NICAM_A | V4L2_STD_NICAM_B)
-/* To preserve backward compatibilty,
+/* To preserve backward compatibility,
(std & V4L2_STD_AUDIO) = 0 means that ALL audio stds are supported
*/
diff --git a/drivers/media/usb/cx231xx/cx231xx-cards.c b/drivers/media/usb/cx231xx/cx231xx-cards.c
index e9d017b..528cce9 100644
--- a/drivers/media/usb/cx231xx/cx231xx-cards.c
+++ b/drivers/media/usb/cx231xx/cx231xx-cards.c
@@ -1412,8 +1412,8 @@ err_v4l2:
usb_set_intfdata(interface, NULL);
err_if:
usb_put_dev(udev);
- kfree(dev);
clear_bit(dev->devno, &cx231xx_devused);
+ kfree(dev);
return retval;
}
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index c8fcd78..8f9b2cea 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -131,7 +131,7 @@ static int af9035_wr_regs(struct dvb_usb_device *d, u32 reg, u8 *val, int len)
{
u8 wbuf[MAX_XFER_SIZE];
u8 mbox = (reg >> 16) & 0xff;
- struct usb_req req = { CMD_MEM_WR, mbox, sizeof(wbuf), wbuf, 0, NULL };
+ struct usb_req req = { CMD_MEM_WR, mbox, 6 + len, wbuf, 0, NULL };
if (6 + len > sizeof(wbuf)) {
dev_warn(&d->udev->dev, "%s: i2c wr: len=%d is too big!\n",
@@ -238,14 +238,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
} else {
/* I2C */
u8 buf[MAX_XFER_SIZE];
- struct usb_req req = { CMD_I2C_RD, 0, sizeof(buf),
+ struct usb_req req = { CMD_I2C_RD, 0, 5 + msg[0].len,
buf, msg[1].len, msg[1].buf };
if (5 + msg[0].len > sizeof(buf)) {
dev_warn(&d->udev->dev,
"%s: i2c xfer: len=%d is too big!\n",
KBUILD_MODNAME, msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[1].len;
@@ -274,14 +275,15 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
} else {
/* I2C */
u8 buf[MAX_XFER_SIZE];
- struct usb_req req = { CMD_I2C_WR, 0, sizeof(buf), buf,
- 0, NULL };
+ struct usb_req req = { CMD_I2C_WR, 0, 5 + msg[0].len,
+ buf, 0, NULL };
if (5 + msg[0].len > sizeof(buf)) {
dev_warn(&d->udev->dev,
"%s: i2c xfer: len=%d is too big!\n",
KBUILD_MODNAME, msg[0].len);
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto unlock;
}
req.mbox |= ((msg[0].addr & 0x80) >> 3);
buf[0] = msg[0].len;
@@ -319,6 +321,7 @@ static int af9035_i2c_master_xfer(struct i2c_adapter *adap,
ret = -EOPNOTSUPP;
}
+unlock:
mutex_unlock(&d->i2c_mutex);
if (ret < 0)
@@ -1534,6 +1537,8 @@ static const struct usb_device_id af9035_id_table[] = {
/* XXX: that same ID [0ccd:0099] is used by af9015 driver too */
{ DVB_USB_DEVICE(USB_VID_TERRATEC, 0x0099,
&af9035_props, "TerraTec Cinergy T Stick Dual RC (rev. 2)", NULL) },
+ { DVB_USB_DEVICE(USB_VID_LEADTEK, 0x6a05,
+ &af9035_props, "Leadtek WinFast DTV Dongle Dual", NULL) },
{ }
};
MODULE_DEVICE_TABLE(usb, af9035_id_table);
diff --git a/drivers/media/usb/dvb-usb-v2/mxl111sf.c b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
index 2627553..08240e4 100644
--- a/drivers/media/usb/dvb-usb-v2/mxl111sf.c
+++ b/drivers/media/usb/dvb-usb-v2/mxl111sf.c
@@ -266,7 +266,7 @@ static int mxl111sf_adap_fe_init(struct dvb_frontend *fe)
struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
int err;
- /* exit if we didnt initialize the driver yet */
+ /* exit if we didn't initialize the driver yet */
if (!state->chip_id) {
mxl_debug("driver not yet initialized, exit.");
goto fail;
@@ -322,7 +322,7 @@ static int mxl111sf_adap_fe_sleep(struct dvb_frontend *fe)
struct mxl111sf_adap_state *adap_state = &state->adap_state[fe->id];
int err;
- /* exit if we didnt initialize the driver yet */
+ /* exit if we didn't initialize the driver yet */
if (!state->chip_id) {
mxl_debug("driver not yet initialized, exit.");
goto fail;
diff --git a/drivers/media/usb/dvb-usb/technisat-usb2.c b/drivers/media/usb/dvb-usb/technisat-usb2.c
index 40832a1..98d24ae 100644
--- a/drivers/media/usb/dvb-usb/technisat-usb2.c
+++ b/drivers/media/usb/dvb-usb/technisat-usb2.c
@@ -102,7 +102,7 @@ static int technisat_usb2_i2c_access(struct usb_device *udev,
if (rxlen > 62) {
err("i2c RX buffer can't exceed 62 bytes (dev 0x%02x)",
device_addr);
- txlen = 62;
+ rxlen = 62;
}
b[0] = I2C_SPEED_100KHZ_BIT;
diff --git a/drivers/media/usb/em28xx/em28xx-video.c b/drivers/media/usb/em28xx/em28xx-video.c
index fc5d60e..dd19c9f 100644
--- a/drivers/media/usb/em28xx/em28xx-video.c
+++ b/drivers/media/usb/em28xx/em28xx-video.c
@@ -1664,8 +1664,8 @@ static int em28xx_v4l2_close(struct file *filp)
em28xx_videodbg("users=%d\n", dev->users);
- mutex_lock(&dev->lock);
vb2_fop_release(filp);
+ mutex_lock(&dev->lock);
if (dev->users == 1) {
/* the device is already disconnect,
diff --git a/drivers/media/usb/gspca/gl860/gl860.c b/drivers/media/usb/gspca/gl860/gl860.c
index cb1e64c..cea8d7f 100644
--- a/drivers/media/usb/gspca/gl860/gl860.c
+++ b/drivers/media/usb/gspca/gl860/gl860.c
@@ -438,7 +438,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
s32 nToSkip =
sd->swapRB * (gspca_dev->cam.cam_mode[mode].bytesperline + 1);
- /* Test only against 0202h, so endianess does not matter */
+ /* Test only against 0202h, so endianness does not matter */
switch (*(s16 *) data) {
case 0x0202: /* End of frame, start a new one */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
diff --git a/drivers/media/usb/gspca/pac207.c b/drivers/media/usb/gspca/pac207.c
index cd79c18..07529e5 100644
--- a/drivers/media/usb/gspca/pac207.c
+++ b/drivers/media/usb/gspca/pac207.c
@@ -416,7 +416,7 @@ static void sd_pkt_scan(struct gspca_dev *gspca_dev,
#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
- int len) /* interrput packet length */
+ int len) /* interrupt packet length */
{
int ret = -EINVAL;
diff --git a/drivers/media/usb/gspca/pac7302.c b/drivers/media/usb/gspca/pac7302.c
index a915096..2fd1c5e 100644
--- a/drivers/media/usb/gspca/pac7302.c
+++ b/drivers/media/usb/gspca/pac7302.c
@@ -874,7 +874,7 @@ static int sd_dbg_s_register(struct gspca_dev *gspca_dev,
#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
- int len) /* interrput packet length */
+ int len) /* interrupt packet length */
{
int ret = -EINVAL;
u8 data0, data1;
diff --git a/drivers/media/usb/gspca/stk1135.c b/drivers/media/usb/gspca/stk1135.c
index 1fc80af..48234c9 100644
--- a/drivers/media/usb/gspca/stk1135.c
+++ b/drivers/media/usb/gspca/stk1135.c
@@ -361,6 +361,9 @@ static void stk1135_configure_clock(struct gspca_dev *gspca_dev)
/* set serial interface clock divider (30MHz/0x1f*16+2) = 60240 kHz) */
reg_w(gspca_dev, STK1135_REG_SICTL + 2, 0x1f);
+
+ /* wait a while for sensor to catch up */
+ udelay(1000);
}
static void stk1135_camera_disable(struct gspca_dev *gspca_dev)
diff --git a/drivers/media/usb/gspca/stv0680.c b/drivers/media/usb/gspca/stv0680.c
index 9c08276..7f94ec7 100644
--- a/drivers/media/usb/gspca/stv0680.c
+++ b/drivers/media/usb/gspca/stv0680.c
@@ -139,7 +139,7 @@ static int sd_config(struct gspca_dev *gspca_dev,
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam = &gspca_dev->cam;
- /* Give the camera some time to settle, otherwise initalization will
+ /* Give the camera some time to settle, otherwise initialization will
fail on hotplug, and yes it really needs a full second. */
msleep(1000);
diff --git a/drivers/media/usb/gspca/sunplus.c b/drivers/media/usb/gspca/sunplus.c
index a517d18..46c9f22 100644
--- a/drivers/media/usb/gspca/sunplus.c
+++ b/drivers/media/usb/gspca/sunplus.c
@@ -1027,6 +1027,7 @@ static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x055f, 0xc650), BS(SPCA533, 0)},
{USB_DEVICE(0x05da, 0x1018), BS(SPCA504B, 0)},
{USB_DEVICE(0x06d6, 0x0031), BS(SPCA533, 0)},
+ {USB_DEVICE(0x06d6, 0x0041), BS(SPCA504B, 0)},
{USB_DEVICE(0x0733, 0x1311), BS(SPCA533, 0)},
{USB_DEVICE(0x0733, 0x1314), BS(SPCA533, 0)},
{USB_DEVICE(0x0733, 0x2211), BS(SPCA533, 0)},
diff --git a/drivers/media/usb/gspca/zc3xx.c b/drivers/media/usb/gspca/zc3xx.c
index 7b95d8e..d3e1b6d 100644
--- a/drivers/media/usb/gspca/zc3xx.c
+++ b/drivers/media/usb/gspca/zc3xx.c
@@ -6905,7 +6905,7 @@ static int sd_get_jcomp(struct gspca_dev *gspca_dev,
#if IS_ENABLED(CONFIG_INPUT)
static int sd_int_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* interrupt packet data */
- int len) /* interrput packet length */
+ int len) /* interrupt packet length */
{
if (len == 8 && data[4] == 1) {
input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1);
diff --git a/drivers/media/usb/pwc/pwc-if.c b/drivers/media/usb/pwc/pwc-if.c
index 77bbf78..78c9bc8 100644
--- a/drivers/media/usb/pwc/pwc-if.c
+++ b/drivers/media/usb/pwc/pwc-if.c
@@ -1039,7 +1039,7 @@ static int usb_pwc_probe(struct usb_interface *intf, const struct usb_device_id
/* Set the leds off */
pwc_set_leds(pdev, 0, 0);
- /* Setup intial videomode */
+ /* Setup initial videomode */
rc = pwc_set_video_mode(pdev, MAX_WIDTH, MAX_HEIGHT,
V4L2_PIX_FMT_YUV420, 30, &compression, 1);
if (rc)
diff --git a/drivers/media/usb/usbtv/usbtv.c b/drivers/media/usb/usbtv/usbtv.c
index 8a505a9..6222a4a 100644
--- a/drivers/media/usb/usbtv/usbtv.c
+++ b/drivers/media/usb/usbtv/usbtv.c
@@ -50,13 +50,8 @@
#define USBTV_ISOC_TRANSFERS 16
#define USBTV_ISOC_PACKETS 8
-#define USBTV_WIDTH 720
-#define USBTV_HEIGHT 480
-
#define USBTV_CHUNK_SIZE 256
#define USBTV_CHUNK 240
-#define USBTV_CHUNKS (USBTV_WIDTH * USBTV_HEIGHT \
- / 4 / USBTV_CHUNK)
/* Chunk header. */
#define USBTV_MAGIC_OK(chunk) ((be32_to_cpu(chunk[0]) & 0xff000000) \
@@ -65,6 +60,27 @@
#define USBTV_ODD(chunk) ((be32_to_cpu(chunk[0]) & 0x0000f000) >> 15)
#define USBTV_CHUNK_NO(chunk) (be32_to_cpu(chunk[0]) & 0x00000fff)
+#define USBTV_TV_STD (V4L2_STD_525_60 | V4L2_STD_PAL)
+
+/* parameters for supported TV norms */
+struct usbtv_norm_params {
+ v4l2_std_id norm;
+ int cap_width, cap_height;
+};
+
+static struct usbtv_norm_params norm_params[] = {
+ {
+ .norm = V4L2_STD_525_60,
+ .cap_width = 720,
+ .cap_height = 480,
+ },
+ {
+ .norm = V4L2_STD_PAL,
+ .cap_width = 720,
+ .cap_height = 576,
+ }
+};
+
/* A single videobuf2 frame buffer. */
struct usbtv_buf {
struct vb2_buffer vb;
@@ -94,11 +110,38 @@ struct usbtv {
USBTV_COMPOSITE_INPUT,
USBTV_SVIDEO_INPUT,
} input;
+ v4l2_std_id norm;
+ int width, height;
+ int n_chunks;
int iso_size;
unsigned int sequence;
struct urb *isoc_urbs[USBTV_ISOC_TRANSFERS];
};
+static int usbtv_configure_for_norm(struct usbtv *usbtv, v4l2_std_id norm)
+{
+ int i, ret = 0;
+ struct usbtv_norm_params *params = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(norm_params); i++) {
+ if (norm_params[i].norm & norm) {
+ params = &norm_params[i];
+ break;
+ }
+ }
+
+ if (params) {
+ usbtv->width = params->cap_width;
+ usbtv->height = params->cap_height;
+ usbtv->n_chunks = usbtv->width * usbtv->height
+ / 4 / USBTV_CHUNK;
+ usbtv->norm = params->norm;
+ } else
+ ret = -EINVAL;
+
+ return ret;
+}
+
static int usbtv_set_regs(struct usbtv *usbtv, const u16 regs[][2], int size)
{
int ret;
@@ -158,6 +201,57 @@ static int usbtv_select_input(struct usbtv *usbtv, int input)
return ret;
}
+static int usbtv_select_norm(struct usbtv *usbtv, v4l2_std_id norm)
+{
+ int ret;
+ static const u16 pal[][2] = {
+ { USBTV_BASE + 0x001a, 0x0068 },
+ { USBTV_BASE + 0x010e, 0x0072 },
+ { USBTV_BASE + 0x010f, 0x00a2 },
+ { USBTV_BASE + 0x0112, 0x00b0 },
+ { USBTV_BASE + 0x0117, 0x0001 },
+ { USBTV_BASE + 0x0118, 0x002c },
+ { USBTV_BASE + 0x012d, 0x0010 },
+ { USBTV_BASE + 0x012f, 0x0020 },
+ { USBTV_BASE + 0x024f, 0x0002 },
+ { USBTV_BASE + 0x0254, 0x0059 },
+ { USBTV_BASE + 0x025a, 0x0016 },
+ { USBTV_BASE + 0x025b, 0x0035 },
+ { USBTV_BASE + 0x0263, 0x0017 },
+ { USBTV_BASE + 0x0266, 0x0016 },
+ { USBTV_BASE + 0x0267, 0x0036 }
+ };
+
+ static const u16 ntsc[][2] = {
+ { USBTV_BASE + 0x001a, 0x0079 },
+ { USBTV_BASE + 0x010e, 0x0068 },
+ { USBTV_BASE + 0x010f, 0x009c },
+ { USBTV_BASE + 0x0112, 0x00f0 },
+ { USBTV_BASE + 0x0117, 0x0000 },
+ { USBTV_BASE + 0x0118, 0x00fc },
+ { USBTV_BASE + 0x012d, 0x0004 },
+ { USBTV_BASE + 0x012f, 0x0008 },
+ { USBTV_BASE + 0x024f, 0x0001 },
+ { USBTV_BASE + 0x0254, 0x005f },
+ { USBTV_BASE + 0x025a, 0x0012 },
+ { USBTV_BASE + 0x025b, 0x0001 },
+ { USBTV_BASE + 0x0263, 0x001c },
+ { USBTV_BASE + 0x0266, 0x0011 },
+ { USBTV_BASE + 0x0267, 0x0005 }
+ };
+
+ ret = usbtv_configure_for_norm(usbtv, norm);
+
+ if (!ret) {
+ if (norm & V4L2_STD_525_60)
+ ret = usbtv_set_regs(usbtv, ntsc, ARRAY_SIZE(ntsc));
+ else if (norm & V4L2_STD_PAL)
+ ret = usbtv_set_regs(usbtv, pal, ARRAY_SIZE(pal));
+ }
+
+ return ret;
+}
+
static int usbtv_setup_capture(struct usbtv *usbtv)
{
int ret;
@@ -225,26 +319,11 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
{ USBTV_BASE + 0x0284, 0x0088 },
{ USBTV_BASE + 0x0003, 0x0004 },
- { USBTV_BASE + 0x001a, 0x0079 },
{ USBTV_BASE + 0x0100, 0x00d3 },
- { USBTV_BASE + 0x010e, 0x0068 },
- { USBTV_BASE + 0x010f, 0x009c },
- { USBTV_BASE + 0x0112, 0x00f0 },
{ USBTV_BASE + 0x0115, 0x0015 },
- { USBTV_BASE + 0x0117, 0x0000 },
- { USBTV_BASE + 0x0118, 0x00fc },
- { USBTV_BASE + 0x012d, 0x0004 },
- { USBTV_BASE + 0x012f, 0x0008 },
{ USBTV_BASE + 0x0220, 0x002e },
{ USBTV_BASE + 0x0225, 0x0008 },
{ USBTV_BASE + 0x024e, 0x0002 },
- { USBTV_BASE + 0x024f, 0x0001 },
- { USBTV_BASE + 0x0254, 0x005f },
- { USBTV_BASE + 0x025a, 0x0012 },
- { USBTV_BASE + 0x025b, 0x0001 },
- { USBTV_BASE + 0x0263, 0x001c },
- { USBTV_BASE + 0x0266, 0x0011 },
- { USBTV_BASE + 0x0267, 0x0005 },
{ USBTV_BASE + 0x024e, 0x0002 },
{ USBTV_BASE + 0x024f, 0x0002 },
};
@@ -253,6 +332,10 @@ static int usbtv_setup_capture(struct usbtv *usbtv)
if (ret)
return ret;
+ ret = usbtv_select_norm(usbtv, usbtv->norm);
+ if (ret)
+ return ret;
+
ret = usbtv_select_input(usbtv, usbtv->input);
if (ret)
return ret;
@@ -296,7 +379,7 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
frame_id = USBTV_FRAME_ID(chunk);
odd = USBTV_ODD(chunk);
chunk_no = USBTV_CHUNK_NO(chunk);
- if (chunk_no >= USBTV_CHUNKS)
+ if (chunk_no >= usbtv->n_chunks)
return;
/* Beginning of a frame. */
@@ -324,10 +407,10 @@ static void usbtv_image_chunk(struct usbtv *usbtv, u32 *chunk)
usbtv->chunks_done++;
/* Last chunk in a frame, signalling an end */
- if (odd && chunk_no == USBTV_CHUNKS-1) {
+ if (odd && chunk_no == usbtv->n_chunks-1) {
int size = vb2_plane_size(&buf->vb, 0);
enum vb2_buffer_state state = usbtv->chunks_done ==
- USBTV_CHUNKS ?
+ usbtv->n_chunks ?
VB2_BUF_STATE_DONE :
VB2_BUF_STATE_ERROR;
@@ -500,6 +583,8 @@ static int usbtv_querycap(struct file *file, void *priv,
static int usbtv_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
+ struct usbtv *dev = video_drvdata(file);
+
switch (i->index) {
case USBTV_COMPOSITE_INPUT:
strlcpy(i->name, "Composite", sizeof(i->name));
@@ -512,7 +597,7 @@ static int usbtv_enum_input(struct file *file, void *priv,
}
i->type = V4L2_INPUT_TYPE_CAMERA;
- i->std = V4L2_STD_525_60;
+ i->std = dev->vdev.tvnorms;
return 0;
}
@@ -531,23 +616,37 @@ static int usbtv_enum_fmt_vid_cap(struct file *file, void *priv,
static int usbtv_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
- f->fmt.pix.width = USBTV_WIDTH;
- f->fmt.pix.height = USBTV_HEIGHT;
+ struct usbtv *usbtv = video_drvdata(file);
+
+ f->fmt.pix.width = usbtv->width;
+ f->fmt.pix.height = usbtv->height;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV;
f->fmt.pix.field = V4L2_FIELD_INTERLACED;
- f->fmt.pix.bytesperline = USBTV_WIDTH * 2;
+ f->fmt.pix.bytesperline = usbtv->width * 2;
f->fmt.pix.sizeimage = (f->fmt.pix.bytesperline * f->fmt.pix.height);
f->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M;
- f->fmt.pix.priv = 0;
+
return 0;
}
static int usbtv_g_std(struct file *file, void *priv, v4l2_std_id *norm)
{
- *norm = V4L2_STD_525_60;
+ struct usbtv *usbtv = video_drvdata(file);
+ *norm = usbtv->norm;
return 0;
}
+static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
+{
+ int ret = -EINVAL;
+ struct usbtv *usbtv = video_drvdata(file);
+
+ if ((norm & V4L2_STD_525_60) || (norm & V4L2_STD_PAL))
+ ret = usbtv_select_norm(usbtv, norm);
+
+ return ret;
+}
+
static int usbtv_g_input(struct file *file, void *priv, unsigned int *i)
{
struct usbtv *usbtv = video_drvdata(file);
@@ -561,13 +660,6 @@ static int usbtv_s_input(struct file *file, void *priv, unsigned int i)
return usbtv_select_input(usbtv, i);
}
-static int usbtv_s_std(struct file *file, void *priv, v4l2_std_id norm)
-{
- if (norm & V4L2_STD_525_60)
- return 0;
- return -EINVAL;
-}
-
struct v4l2_ioctl_ops usbtv_ioctl_ops = {
.vidioc_querycap = usbtv_querycap,
.vidioc_enum_input = usbtv_enum_input,
@@ -604,10 +696,12 @@ static int usbtv_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *v4l_fmt, unsigned int *nbuffers,
unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[])
{
+ struct usbtv *usbtv = vb2_get_drv_priv(vq);
+
if (*nbuffers < 2)
*nbuffers = 2;
*nplanes = 1;
- sizes[0] = USBTV_WIDTH * USBTV_HEIGHT / 2 * sizeof(u32);
+ sizes[0] = USBTV_CHUNK * usbtv->n_chunks * 2 * sizeof(u32);
return 0;
}
@@ -690,7 +784,11 @@ static int usbtv_probe(struct usb_interface *intf,
return -ENOMEM;
usbtv->dev = dev;
usbtv->udev = usb_get_dev(interface_to_usbdev(intf));
+
usbtv->iso_size = size;
+
+ (void)usbtv_configure_for_norm(usbtv, V4L2_STD_525_60);
+
spin_lock_init(&usbtv->buflock);
mutex_init(&usbtv->v4l2_lock);
mutex_init(&usbtv->vb2q_lock);
@@ -727,7 +825,7 @@ static int usbtv_probe(struct usb_interface *intf,
usbtv->vdev.release = video_device_release_empty;
usbtv->vdev.fops = &usbtv_fops;
usbtv->vdev.ioctl_ops = &usbtv_ioctl_ops;
- usbtv->vdev.tvnorms = V4L2_STD_525_60;
+ usbtv->vdev.tvnorms = USBTV_TV_STD;
usbtv->vdev.queue = &usbtv->vb2q;
usbtv->vdev.lock = &usbtv->v4l2_lock;
set_bit(V4L2_FL_USE_FH_PRIO, &usbtv->vdev.flags);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 899cb6d..898c208 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -556,7 +556,7 @@ static u16 uvc_video_clock_host_sof(const struct uvc_clock_sample *sample)
*
* SOF = ((SOF2 - SOF1) * PTS + SOF1 * STC2 - SOF2 * STC1) / (STC2 - STC1) (1)
*
- * to avoid loosing precision in the division. Similarly, the host timestamp is
+ * to avoid losing precision in the division. Similarly, the host timestamp is
* computed with
*
* TS = ((TS2 - TS1) * PTS + TS1 * SOF2 - TS2 * SOF1) / (SOF2 - SOF1) (2)
diff --git a/drivers/media/v4l2-core/v4l2-ctrls.c b/drivers/media/v4l2-core/v4l2-ctrls.c
index 60dcc0f..fb46790 100644
--- a/drivers/media/v4l2-core/v4l2-ctrls.c
+++ b/drivers/media/v4l2-core/v4l2-ctrls.c
@@ -420,7 +420,7 @@ const char * const *v4l2_ctrl_get_menu(u32 id)
"Advanced Simple",
"Core",
"Simple Scalable",
- "Advanced Coding Efficency",
+ "Advanced Coding Efficiency",
NULL,
};
diff --git a/drivers/media/v4l2-core/videobuf2-core.c b/drivers/media/v4l2-core/videobuf2-core.c
index b19b306..0edc165 100644
--- a/drivers/media/v4l2-core/videobuf2-core.c
+++ b/drivers/media/v4l2-core/videobuf2-core.c
@@ -145,6 +145,25 @@ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
}
/**
+ * __setup_lengths() - setup initial lengths for every plane in
+ * every buffer on the queue
+ */
+static void __setup_lengths(struct vb2_queue *q, unsigned int n)
+{
+ unsigned int buffer, plane;
+ struct vb2_buffer *vb;
+
+ for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
+ vb = q->bufs[buffer];
+ if (!vb)
+ continue;
+
+ for (plane = 0; plane < vb->num_planes; ++plane)
+ vb->v4l2_planes[plane].length = q->plane_sizes[plane];
+ }
+}
+
+/**
* __setup_offsets() - setup unique offsets ("cookies") for every plane in
* every buffer on the queue
*/
@@ -169,7 +188,6 @@ static void __setup_offsets(struct vb2_queue *q, unsigned int n)
continue;
for (plane = 0; plane < vb->num_planes; ++plane) {
- vb->v4l2_planes[plane].length = q->plane_sizes[plane];
vb->v4l2_planes[plane].m.mem_offset = off;
dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n",
@@ -241,6 +259,7 @@ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
q->bufs[q->num_buffers + buffer] = vb;
}
+ __setup_lengths(q, buffer);
if (memory == V4L2_MEMORY_MMAP)
__setup_offsets(q, buffer);
@@ -1824,8 +1843,8 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
return -EINVAL;
}
- if (eb->flags & ~O_CLOEXEC) {
- dprintk(1, "Queue does support only O_CLOEXEC flag\n");
+ if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
+ dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n");
return -EINVAL;
}
@@ -1848,14 +1867,14 @@ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
vb_plane = &vb->planes[eb->plane];
- dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv);
+ dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
if (IS_ERR_OR_NULL(dbuf)) {
dprintk(1, "Failed to export buffer %d, plane %d\n",
eb->index, eb->plane);
return -EINVAL;
}
- ret = dma_buf_fd(dbuf, eb->flags);
+ ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
if (ret < 0) {
dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
eb->index, eb->plane, ret);
diff --git a/drivers/media/v4l2-core/videobuf2-dma-contig.c b/drivers/media/v4l2-core/videobuf2-dma-contig.c
index 646f08f..33d3871d 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-contig.c
@@ -393,7 +393,7 @@ static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
return sgt;
}
-static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
+static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv, unsigned long flags)
{
struct vb2_dc_buf *buf = buf_priv;
struct dma_buf *dbuf;
@@ -404,7 +404,7 @@ static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
if (WARN_ON(!buf->sgt_base))
return NULL;
- dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
+ dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, flags);
if (IS_ERR(dbuf))
return NULL;
diff --git a/drivers/media/v4l2-core/videobuf2-dma-sg.c b/drivers/media/v4l2-core/videobuf2-dma-sg.c
index 2f86054..0d3a8ff 100644
--- a/drivers/media/v4l2-core/videobuf2-dma-sg.c
+++ b/drivers/media/v4l2-core/videobuf2-dma-sg.c
@@ -178,7 +178,7 @@ static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
GFP_KERNEL);
if (!buf->pages)
- return NULL;
+ goto userptr_fail_alloc_pages;
num_pages_from_user = get_user_pages(current, current->mm,
vaddr & PAGE_MASK,
@@ -204,6 +204,7 @@ userptr_fail_get_user_pages:
while (--num_pages_from_user >= 0)
put_page(buf->pages[num_pages_from_user]);
kfree(buf->pages);
+userptr_fail_alloc_pages:
kfree(buf);
return NULL;
}
diff --git a/drivers/mfd/sec-core.c b/drivers/mfd/sec-core.c
index 34c18fb..54cc255 100644
--- a/drivers/mfd/sec-core.c
+++ b/drivers/mfd/sec-core.c
@@ -81,31 +81,31 @@ static struct of_device_id sec_dt_match[] = {
int sec_reg_read(struct sec_pmic_dev *sec_pmic, u8 reg, void *dest)
{
- return regmap_read(sec_pmic->regmap, reg, dest);
+ return regmap_read(sec_pmic->regmap_pmic, reg, dest);
}
EXPORT_SYMBOL_GPL(sec_reg_read);
int sec_bulk_read(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
{
- return regmap_bulk_read(sec_pmic->regmap, reg, buf, count);
+ return regmap_bulk_read(sec_pmic->regmap_pmic, reg, buf, count);
}
EXPORT_SYMBOL_GPL(sec_bulk_read);
int sec_reg_write(struct sec_pmic_dev *sec_pmic, u8 reg, u8 value)
{
- return regmap_write(sec_pmic->regmap, reg, value);
+ return regmap_write(sec_pmic->regmap_pmic, reg, value);
}
EXPORT_SYMBOL_GPL(sec_reg_write);
int sec_bulk_write(struct sec_pmic_dev *sec_pmic, u8 reg, int count, u8 *buf)
{
- return regmap_raw_write(sec_pmic->regmap, reg, buf, count);
+ return regmap_raw_write(sec_pmic->regmap_pmic, reg, buf, count);
}
EXPORT_SYMBOL_GPL(sec_bulk_write);
int sec_reg_update(struct sec_pmic_dev *sec_pmic, u8 reg, u8 val, u8 mask)
{
- return regmap_update_bits(sec_pmic->regmap, reg, mask, val);
+ return regmap_update_bits(sec_pmic->regmap_pmic, reg, mask, val);
}
EXPORT_SYMBOL_GPL(sec_reg_update);
@@ -166,6 +166,11 @@ static struct regmap_config s5m8767_regmap_config = {
.cache_type = REGCACHE_FLAT,
};
+static const struct regmap_config sec_rtc_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+
#ifdef CONFIG_OF
/*
* Only the common platform data elements for s5m8767 are parsed here from the
@@ -266,9 +271,9 @@ static int sec_pmic_probe(struct i2c_client *i2c,
break;
}
- sec_pmic->regmap = devm_regmap_init_i2c(i2c, regmap);
- if (IS_ERR(sec_pmic->regmap)) {
- ret = PTR_ERR(sec_pmic->regmap);
+ sec_pmic->regmap_pmic = devm_regmap_init_i2c(i2c, regmap);
+ if (IS_ERR(sec_pmic->regmap_pmic)) {
+ ret = PTR_ERR(sec_pmic->regmap_pmic);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
return ret;
@@ -277,6 +282,15 @@ static int sec_pmic_probe(struct i2c_client *i2c,
sec_pmic->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
i2c_set_clientdata(sec_pmic->rtc, sec_pmic);
+ sec_pmic->regmap_rtc = devm_regmap_init_i2c(sec_pmic->rtc,
+ &sec_rtc_regmap_config);
+ if (IS_ERR(sec_pmic->regmap_rtc)) {
+ ret = PTR_ERR(sec_pmic->regmap_rtc);
+ dev_err(&i2c->dev, "Failed to allocate RTC register map: %d\n",
+ ret);
+ return ret;
+ }
+
if (pdata && pdata->cfg_pmic_irq)
pdata->cfg_pmic_irq();
diff --git a/drivers/mfd/sec-irq.c b/drivers/mfd/sec-irq.c
index 0dd84e9..b441b1b 100644
--- a/drivers/mfd/sec-irq.c
+++ b/drivers/mfd/sec-irq.c
@@ -280,19 +280,19 @@ int sec_irq_init(struct sec_pmic_dev *sec_pmic)
switch (type) {
case S5M8763X:
- ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
sec_pmic->irq_base, &s5m8763_irq_chip,
&sec_pmic->irq_data);
break;
case S5M8767X:
- ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
sec_pmic->irq_base, &s5m8767_irq_chip,
&sec_pmic->irq_data);
break;
case S2MPS11X:
- ret = regmap_add_irq_chip(sec_pmic->regmap, sec_pmic->irq,
+ ret = regmap_add_irq_chip(sec_pmic->regmap_pmic, sec_pmic->irq,
IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
sec_pmic->irq_base, &s2mps11_irq_chip,
&sec_pmic->irq_data);
diff --git a/drivers/mtd/maps/pxa2xx-flash.c b/drivers/mtd/maps/pxa2xx-flash.c
index d210d13..0f55589 100644
--- a/drivers/mtd/maps/pxa2xx-flash.c
+++ b/drivers/mtd/maps/pxa2xx-flash.c
@@ -73,7 +73,7 @@ static int pxa2xx_flash_probe(struct platform_device *pdev)
return -ENOMEM;
}
info->map.cached =
- ioremap_cached(info->map.phys, info->map.size);
+ ioremap_cache(info->map.phys, info->map.size);
if (!info->map.cached)
printk(KERN_WARNING "Failed to ioremap cached %s\n",
info->map.name);
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index 4cabdc9..4b3aaa8 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -962,7 +962,7 @@ static int pxa3xx_nand_init_buff(struct pxa3xx_nand_info *info)
static void pxa3xx_nand_free_buff(struct pxa3xx_nand_info *info)
{
struct platform_device *pdev = info->pdev;
- if (use_dma) {
+ if (info->use_dma) {
pxa_free_dma(info->data_dma_ch);
dma_free_coherent(&pdev->dev, info->buf_size,
info->data_buff, info->data_buff_phys);
@@ -1259,10 +1259,6 @@ static struct of_device_id pxa3xx_nand_dt_ids[] = {
.compatible = "marvell,pxa3xx-nand",
.data = (void *)PXA3XX_NAND_VARIANT_PXA,
},
- {
- .compatible = "marvell,armada370-nand",
- .data = (void *)PXA3XX_NAND_VARIANT_ARMADA370,
- },
{}
};
MODULE_DEVICE_TABLE(of, pxa3xx_nand_dt_ids);
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 187b1b7..4ced594 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2201,20 +2201,25 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: speed changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
(__get_link_speed(port) << 1);
pr_debug("Port %d changed speed\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2229,20 +2234,25 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("%s: Warning: duplex changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
+ __get_state_machine_lock(port);
+
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
port->actor_oper_port_key = port->actor_admin_port_key |=
__get_duplex(port);
pr_debug("Port %d changed duplex\n", port->actor_port_number);
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/**
@@ -2258,15 +2268,21 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port = &(SLAVE_AD_INFO(slave).port);
- // if slave is null, the whole port is not initialized
+ /* if slave is null, the whole port is not initialized */
if (!port->slave) {
pr_warning("Warning: %s: link status changed for uninitialized port on %s\n",
slave->bond->dev->name, slave->dev->name);
return;
}
- // on link down we are zeroing duplex and speed since some of the adaptors(ce1000.lan) report full duplex/speed instead of N/A(duplex) / 0(speed)
- // on link up we are forcing recheck on the duplex and speed since some of he adaptors(ce1000.lan) report
+ __get_state_machine_lock(port);
+ /* on link down we are zeroing duplex and speed since
+ * some of the adaptors(ce1000.lan) report full duplex/speed
+ * instead of N/A(duplex) / 0(speed).
+ *
+ * on link up we are forcing recheck on the duplex and speed since
+ * some of he adaptors(ce1000.lan) report.
+ */
if (link == BOND_LINK_UP) {
port->is_enabled = true;
port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
@@ -2282,10 +2298,15 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
port->actor_oper_port_key = (port->actor_admin_port_key &=
~AD_SPEED_KEY_BITS);
}
- //BOND_PRINT_DBG(("Port %d changed link status to %s", port->actor_port_number, ((link == BOND_LINK_UP)?"UP":"DOWN")));
- // there is no need to reselect a new aggregator, just signal the
- // state machines to reinitialize
+ pr_debug("Port %d changed link status to %s",
+ port->actor_port_number,
+ (link == BOND_LINK_UP) ? "UP" : "DOWN");
+ /* there is no need to reselect a new aggregator, just signal the
+ * state machines to reinitialize
+ */
port->sm_vars |= AD_PORT_BEGIN;
+
+ __release_state_machine_lock(port);
}
/*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 36eab0c..398e299 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -4199,9 +4199,9 @@ static int bond_check_params(struct bond_params *params)
(arp_ip_count < BOND_MAX_ARP_TARGETS) && arp_ip_target[i]; i++) {
/* not complete check, but should be good enough to
catch mistakes */
- __be32 ip = in_aton(arp_ip_target[i]);
- if (!isdigit(arp_ip_target[i][0]) || ip == 0 ||
- ip == htonl(INADDR_BROADCAST)) {
+ __be32 ip;
+ if (!in4_pton(arp_ip_target[i], -1, (u8 *)&ip, -1, NULL) ||
+ IS_IP_TARGET_UNUSABLE_ADDRESS(ip)) {
pr_warning("Warning: bad arp_ip_target module parameter (%s), ARP monitoring will not be performed\n",
arp_ip_target[i]);
arp_interval = 0;
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index abf5e10..0ae580b 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1635,12 +1635,12 @@ static ssize_t bonding_show_packets_per_slave(struct device *d,
char *buf)
{
struct bonding *bond = to_bond(d);
- int packets_per_slave = bond->params.packets_per_slave;
+ unsigned int packets_per_slave = bond->params.packets_per_slave;
if (packets_per_slave > 1)
packets_per_slave = reciprocal_value(packets_per_slave);
- return sprintf(buf, "%d\n", packets_per_slave);
+ return sprintf(buf, "%u\n", packets_per_slave);
}
static ssize_t bonding_store_packets_per_slave(struct device *d,
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 5f9a7ad..8aeec0b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -625,6 +625,7 @@ static int ems_usb_start(struct ems_usb *dev)
usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf,
urb->transfer_dma);
+ usb_free_urb(urb);
break;
}
@@ -798,8 +799,8 @@ static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *ne
* allowed (MAX_TX_URBS).
*/
if (!context) {
- usb_unanchor_urb(urb);
usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
+ usb_free_urb(urb);
netdev_warn(netdev, "couldn't find free context\n");
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index 8ee9d15..263dd92 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -927,6 +927,9 @@ static int pcan_usb_pro_init(struct peak_usb_device *dev)
/* set LED in default state (end of init phase) */
pcan_usb_pro_set_led(dev, 0, 1);
+ kfree(bi);
+ kfree(fi);
+
return 0;
err_out:
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 50b853a..46dfb13 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -717,8 +717,7 @@ static int emac_open(struct net_device *dev)
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
- if (devm_request_irq(db->dev, dev->irq, &emac_interrupt,
- 0, dev->name, dev))
+ if (request_irq(dev->irq, &emac_interrupt, 0, dev->name, dev))
return -EAGAIN;
/* Initialize EMAC board */
@@ -774,6 +773,8 @@ static int emac_stop(struct net_device *ndev)
emac_shutdown(ndev);
+ free_irq(ndev->irq, ndev);
+
return 0;
}
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index b2ffad1..248baf6 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -565,6 +565,8 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
/* Make sure pointer to data buffer is set */
wmb();
+ skb_tx_timestamp(skb);
+
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
/* Increment index to point to the next BD */
@@ -579,8 +581,6 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
arc_reg_set(priv, R_STATUS, TXPL_MASK);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index a36a760..2980175 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -145,9 +145,11 @@ static void atl1c_reset_pcie(struct atl1c_hw *hw, u32 flag)
* Mask some pcie error bits
*/
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
- pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
- data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
- pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ if (pos) {
+ pci_read_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, &data);
+ data &= ~(PCI_ERR_UNC_DLP | PCI_ERR_UNC_FCP);
+ pci_write_config_dword(pdev, pos + PCI_ERR_UNCOR_SEVER, data);
+ }
/* clear error status */
pcie_capability_write_word(pdev, PCI_EXP_DEVSTA,
PCI_EXP_DEVSTA_NFED |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index a1f66e2..2d5fce4 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -1250,7 +1250,10 @@ struct bnx2x_slowpath {
* Therefore, if they would have been defined in the same union,
* data can get corrupted.
*/
- struct afex_vif_list_ramrod_data func_afex_rdata;
+ union {
+ struct afex_vif_list_ramrod_data viflist_data;
+ struct function_update_data func_update;
+ } func_afex_rdata;
/* used by dmae command executer */
struct dmae_command dmae[MAX_DMAE_C];
@@ -2499,4 +2502,6 @@ void bnx2x_set_local_cmng(struct bnx2x *bp);
#define MCPR_SCRATCH_BASE(bp) \
(CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
+#define E1H_MAX_MF_SB_COUNT (HC_SB_MAX_SB_E1X/(E1HVN_MAX * PORT_MAX))
+
#endif /* bnx2x.h */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index 20dcc02..11fc795 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -3865,6 +3865,19 @@ static void bnx2x_warpcore_enable_AN_KR(struct bnx2x_phy *phy,
bnx2x_warpcore_enable_AN_KR2(phy, params, vars);
} else {
+ /* Enable Auto-Detect to support 1G over CL37 as well */
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_SERDESDIGITAL_CONTROL1000X1, 0x10);
+
+ /* Force cl48 sync_status LOW to avoid getting stuck in CL73
+ * parallel-detect loop when CL73 and CL37 are enabled.
+ */
+ CL22_WR_OVER_CL45(bp, phy, MDIO_REG_BANK_AER_BLOCK,
+ MDIO_AER_BLOCK_AER_REG, 0);
+ bnx2x_cl45_write(bp, phy, MDIO_WC_DEVAD,
+ MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI, 0x0800);
+ bnx2x_set_aer_mmd(params, phy);
+
bnx2x_disable_kr2(params, vars, phy);
}
@@ -8120,17 +8133,20 @@ static int bnx2x_get_edc_mode(struct bnx2x_phy *phy,
*edc_mode = EDC_MODE_ACTIVE_DAC;
else
check_limiting_mode = 1;
- } else if (copper_module_type &
- SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
+ } else {
+ *edc_mode = EDC_MODE_PASSIVE_DAC;
+ /* Even in case PASSIVE_DAC indication is not set,
+ * treat it as a passive DAC cable, since some cables
+ * don't have this indication.
+ */
+ if (copper_module_type &
+ SFP_EEPROM_FC_TX_TECH_BITMASK_COPPER_PASSIVE) {
DP(NETIF_MSG_LINK,
"Passive Copper cable detected\n");
- *edc_mode =
- EDC_MODE_PASSIVE_DAC;
- } else {
- DP(NETIF_MSG_LINK,
- "Unknown copper-cable-type 0x%x !!!\n",
- copper_module_type);
- return -EINVAL;
+ } else {
+ DP(NETIF_MSG_LINK,
+ "Unknown copper-cable-type\n");
+ }
}
break;
}
@@ -10825,9 +10841,9 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
(1<<11));
if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
- (phy->req_line_speed == SPEED_1000)) {
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)) ||
+ (phy->req_line_speed == SPEED_1000)) {
an_1000_val |= (1<<8);
autoneg_val |= (1<<9 | 1<<12);
if (phy->req_duplex == DUPLEX_FULL)
@@ -10843,30 +10859,32 @@ static int bnx2x_54618se_config_init(struct bnx2x_phy *phy,
0x09,
&an_1000_val);
- /* Set 100 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF)))) {
- an_10_100_val |= (1<<7);
- /* Enable autoneg and restart autoneg for legacy speeds */
- autoneg_val |= (1<<9 | 1<<12);
-
- if (phy->req_duplex == DUPLEX_FULL)
- an_10_100_val |= (1<<8);
- DP(NETIF_MSG_LINK, "Advertising 100M\n");
- }
-
- /* Set 10 speed advertisement */
- if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
- (phy->speed_cap_mask &
- (PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL |
- PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF)))) {
- an_10_100_val |= (1<<5);
- autoneg_val |= (1<<9 | 1<<12);
- if (phy->req_duplex == DUPLEX_FULL)
+ /* Advertise 10/100 link speed */
+ if (phy->req_line_speed == SPEED_AUTO_NEG) {
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF) {
+ an_10_100_val |= (1<<5);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL) {
an_10_100_val |= (1<<6);
- DP(NETIF_MSG_LINK, "Advertising 10M\n");
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 10M-FD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF) {
+ an_10_100_val |= (1<<7);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-HD\n");
+ }
+ if (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL) {
+ an_10_100_val |= (1<<8);
+ autoneg_val |= (1<<9 | 1<<12);
+ DP(NETIF_MSG_LINK, "Advertising 100M-FD\n");
+ }
}
/* Only 10/100 are allowed to work in FORCE mode */
@@ -13342,6 +13360,10 @@ static u8 bnx2x_analyze_link_error(struct link_params *params,
DP(NETIF_MSG_LINK, "Link changed:[%x %x]->%x\n", vars->link_up,
old_status, status);
+ /* Do not touch the link in case physical link down */
+ if ((vars->phy_flags & PHY_PHYSICAL_LINK_FLAG) == 0)
+ return 1;
+
/* a. Update shmem->link_status accordingly
* b. Update link_vars->link_up
*/
@@ -13550,7 +13572,7 @@ static void bnx2x_check_kr2_wa(struct link_params *params,
*/
not_kr2_device = (((base_page & 0x8000) == 0) ||
(((base_page & 0x8000) &&
- ((next_page & 0xe0) == 0x2))));
+ ((next_page & 0xe0) == 0x20))));
/* In case KR2 is already disabled, check if we need to re-enable it */
if (!(vars->link_attr_sync & LINK_ATTR_SYNC_KR2_ENABLE)) {
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 814d0ec..8b3107b 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -11447,9 +11447,9 @@ static int bnx2x_get_hwinfo(struct bnx2x *bp)
}
}
- /* adjust igu_sb_cnt to MF for E1x */
- if (CHIP_IS_E1x(bp) && IS_MF(bp))
- bp->igu_sb_cnt /= E1HVN_MAX;
+ /* adjust igu_sb_cnt to MF for E1H */
+ if (CHIP_IS_E1H(bp) && IS_MF(bp))
+ bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, E1H_MAX_MF_SB_COUNT);
/* port info */
bnx2x_get_port_hwinfo(bp);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 3efbb35..14ffb6e 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7179,6 +7179,7 @@ Theotherbitsarereservedandshouldbezero*/
#define MDIO_WC_REG_RX1_PCI_CTRL 0x80ca
#define MDIO_WC_REG_RX2_PCI_CTRL 0x80da
#define MDIO_WC_REG_RX3_PCI_CTRL 0x80ea
+#define MDIO_WC_REG_RXB_ANA_RX_CONTROL_PCI 0x80fa
#define MDIO_WC_REG_XGXSBLK2_UNICORE_MODE_10G 0x8104
#define MDIO_WC_REG_XGXS_STATUS3 0x8129
#define MDIO_WC_REG_PAR_DET_10G_STATUS 0x8130
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
index 32c92ab..18438a5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
@@ -2038,6 +2038,7 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
struct bnx2x_vlan_mac_ramrod_params p;
struct bnx2x_exe_queue_obj *exeq = &o->exe_queue;
struct bnx2x_exeq_elem *exeq_pos, *exeq_pos_n;
+ unsigned long flags;
int read_lock;
int rc = 0;
@@ -2046,8 +2047,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
spin_lock_bh(&exeq->lock);
list_for_each_entry_safe(exeq_pos, exeq_pos_n, &exeq->exe_queue, link) {
- if (exeq_pos->cmd_data.vlan_mac.vlan_mac_flags ==
- *vlan_mac_flags) {
+ flags = exeq_pos->cmd_data.vlan_mac.vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
rc = exeq->remove(bp, exeq->owner, exeq_pos);
if (rc) {
BNX2X_ERR("Failed to remove command\n");
@@ -2080,7 +2082,9 @@ static int bnx2x_vlan_mac_del_all(struct bnx2x *bp,
return read_lock;
list_for_each_entry(pos, &o->head, link) {
- if (pos->vlan_mac_flags == *vlan_mac_flags) {
+ flags = pos->vlan_mac_flags;
+ if (BNX2X_VLAN_MAC_CMP_FLAGS(flags) ==
+ BNX2X_VLAN_MAC_CMP_FLAGS(*vlan_mac_flags)) {
p.user_req.vlan_mac_flags = pos->vlan_mac_flags;
memcpy(&p.user_req.u, &pos->u, sizeof(pos->u));
rc = bnx2x_config_vlan_mac(bp, &p);
@@ -4382,8 +4386,11 @@ int bnx2x_config_rss(struct bnx2x *bp,
struct bnx2x_raw_obj *r = &o->raw;
/* Do nothing if only driver cleanup was requested */
- if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags))
+ if (test_bit(RAMROD_DRV_CLR_ONLY, &p->ramrod_flags)) {
+ DP(BNX2X_MSG_SP, "Not configuring RSS ramrod_flags=%lx\n",
+ p->ramrod_flags);
return 0;
+ }
r->set_pending(r);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
index 658f4e3..6a53c15 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
@@ -266,6 +266,13 @@ enum {
BNX2X_DONT_CONSUME_CAM_CREDIT,
BNX2X_DONT_CONSUME_CAM_CREDIT_DEST,
};
+/* When looking for matching filters, some flags are not interesting */
+#define BNX2X_VLAN_MAC_CMP_MASK (1 << BNX2X_UC_LIST_MAC | \
+ 1 << BNX2X_ETH_MAC | \
+ 1 << BNX2X_ISCSI_ETH_MAC | \
+ 1 << BNX2X_NETQ_ETH_MAC)
+#define BNX2X_VLAN_MAC_CMP_FLAGS(flags) \
+ ((flags) & BNX2X_VLAN_MAC_CMP_MASK)
struct bnx2x_vlan_mac_ramrod_params {
/* Object to run the command from */
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
index 0216d59..e7845e5 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
@@ -1209,6 +1209,11 @@ static void bnx2x_vfop_rxmode(struct bnx2x *bp, struct bnx2x_virtf *vf)
/* next state */
vfop->state = BNX2X_VFOP_RXMODE_DONE;
+ /* record the accept flags in vfdb so hypervisor can modify them
+ * if necessary
+ */
+ bnx2x_vfq(vf, ramrod->cl_id - vf->igu_base_id, accept_flags) =
+ ramrod->rx_accept_flags;
vfop->rc = bnx2x_config_rx_mode(bp, ramrod);
bnx2x_vfop_finalize(vf, vfop->rc, VFOP_DONE);
op_err:
@@ -1224,39 +1229,43 @@ op_pending:
return;
}
+static void bnx2x_vf_prep_rx_mode(struct bnx2x *bp, u8 qid,
+ struct bnx2x_rx_mode_ramrod_params *ramrod,
+ struct bnx2x_virtf *vf,
+ unsigned long accept_flags)
+{
+ struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
+
+ memset(ramrod, 0, sizeof(*ramrod));
+ ramrod->cid = vfq->cid;
+ ramrod->cl_id = vfq_cl_id(vf, vfq);
+ ramrod->rx_mode_obj = &bp->rx_mode_obj;
+ ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
+ ramrod->rx_accept_flags = accept_flags;
+ ramrod->tx_accept_flags = accept_flags;
+ ramrod->pstate = &vf->filter_state;
+ ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
+
+ set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
+ set_bit(RAMROD_RX, &ramrod->ramrod_flags);
+ set_bit(RAMROD_TX, &ramrod->ramrod_flags);
+
+ ramrod->rdata = bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
+ ramrod->rdata_mapping = bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+}
+
int bnx2x_vfop_rxmode_cmd(struct bnx2x *bp,
struct bnx2x_virtf *vf,
struct bnx2x_vfop_cmd *cmd,
int qid, unsigned long accept_flags)
{
- struct bnx2x_vf_queue *vfq = vfq_get(vf, qid);
struct bnx2x_vfop *vfop = bnx2x_vfop_add(bp, vf);
if (vfop) {
struct bnx2x_rx_mode_ramrod_params *ramrod =
&vf->op_params.rx_mode;
- memset(ramrod, 0, sizeof(*ramrod));
-
- /* Prepare ramrod parameters */
- ramrod->cid = vfq->cid;
- ramrod->cl_id = vfq_cl_id(vf, vfq);
- ramrod->rx_mode_obj = &bp->rx_mode_obj;
- ramrod->func_id = FW_VF_HANDLE(vf->abs_vfid);
-
- ramrod->rx_accept_flags = accept_flags;
- ramrod->tx_accept_flags = accept_flags;
- ramrod->pstate = &vf->filter_state;
- ramrod->state = BNX2X_FILTER_RX_MODE_PENDING;
-
- set_bit(BNX2X_FILTER_RX_MODE_PENDING, &vf->filter_state);
- set_bit(RAMROD_RX, &ramrod->ramrod_flags);
- set_bit(RAMROD_TX, &ramrod->ramrod_flags);
-
- ramrod->rdata =
- bnx2x_vf_sp(bp, vf, rx_mode_rdata.e2);
- ramrod->rdata_mapping =
- bnx2x_vf_sp_map(bp, vf, rx_mode_rdata.e2);
+ bnx2x_vf_prep_rx_mode(bp, qid, ramrod, vf, accept_flags);
bnx2x_vfop_opset(BNX2X_VFOP_RXMODE_CONFIG,
bnx2x_vfop_rxmode, cmd->done);
@@ -3114,6 +3123,11 @@ int bnx2x_sriov_configure(struct pci_dev *dev, int num_vfs_param)
{
struct bnx2x *bp = netdev_priv(pci_get_drvdata(dev));
+ if (!IS_SRIOV(bp)) {
+ BNX2X_ERR("failed to configure SR-IOV since vfdb was not allocated. Check dmesg for errors in probe stage\n");
+ return -EINVAL;
+ }
+
DP(BNX2X_MSG_IOV, "bnx2x_sriov_configure called with %d, BNX2X_NR_VIRTFN(bp) was %d\n",
num_vfs_param, BNX2X_NR_VIRTFN(bp));
@@ -3197,13 +3211,16 @@ int bnx2x_enable_sriov(struct bnx2x *bp)
bnx2x_iov_static_resc(bp, vf);
}
- /* prepare msix vectors in VF configuration space */
+ /* prepare msix vectors in VF configuration space - the value in the
+ * PCI configuration space should be the index of the last entry,
+ * namely one less than the actual size of the table
+ */
for (vf_idx = first_vf; vf_idx < first_vf + req_vfs; vf_idx++) {
bnx2x_pretend_func(bp, HW_VF_HANDLE(bp, vf_idx));
REG_WR(bp, PCICFG_OFFSET + GRC_CONFIG_REG_VF_MSIX_CONTROL,
- num_vf_queues);
+ num_vf_queues - 1);
DP(BNX2X_MSG_IOV, "set msix vec num in VF %d cfg space to %d\n",
- vf_idx, num_vf_queues);
+ vf_idx, num_vf_queues - 1);
}
bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
@@ -3431,10 +3448,18 @@ out:
int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
{
+ struct bnx2x_queue_state_params q_params = {NULL};
+ struct bnx2x_vlan_mac_ramrod_params ramrod_param;
+ struct bnx2x_queue_update_params *update_params;
+ struct pf_vf_bulletin_content *bulletin = NULL;
+ struct bnx2x_rx_mode_ramrod_params rx_ramrod;
struct bnx2x *bp = netdev_priv(dev);
- int rc, q_logical_state;
+ struct bnx2x_vlan_mac_obj *vlan_obj;
+ unsigned long vlan_mac_flags = 0;
+ unsigned long ramrod_flags = 0;
struct bnx2x_virtf *vf = NULL;
- struct pf_vf_bulletin_content *bulletin = NULL;
+ unsigned long accept_flags;
+ int rc;
/* sanity and init */
rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
@@ -3452,104 +3477,118 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
/* update PF's copy of the VF's bulletin. No point in posting the vlan
* to the VF since it doesn't have anything to do with it. But it useful
* to store it here in case the VF is not up yet and we can only
- * configure the vlan later when it does.
+ * configure the vlan later when it does. Treat vlan id 0 as remove the
+ * Host tag.
*/
- bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ if (vlan > 0)
+ bulletin->valid_bitmap |= 1 << VLAN_VALID;
+ else
+ bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
bulletin->vlan = vlan;
/* is vf initialized and queue set up? */
- q_logical_state =
- bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj));
- if (vf->state == VF_ENABLED &&
- q_logical_state == BNX2X_Q_LOGICAL_STATE_ACTIVE) {
- /* configure the vlan in device on this vf's queue */
- unsigned long ramrod_flags = 0;
- unsigned long vlan_mac_flags = 0;
- struct bnx2x_vlan_mac_obj *vlan_obj =
- &bnx2x_leading_vfq(vf, vlan_obj);
- struct bnx2x_vlan_mac_ramrod_params ramrod_param;
- struct bnx2x_queue_state_params q_params = {NULL};
- struct bnx2x_queue_update_params *update_params;
+ if (vf->state != VF_ENABLED ||
+ bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
+ BNX2X_Q_LOGICAL_STATE_ACTIVE)
+ return rc;
- rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
- if (rc)
- return rc;
- memset(&ramrod_param, 0, sizeof(ramrod_param));
+ /* configure the vlan in device on this vf's queue */
+ vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
+ rc = validate_vlan_mac(bp, &bnx2x_leading_vfq(vf, mac_obj));
+ if (rc)
+ return rc;
- /* must lock vfpf channel to protect against vf flows */
- bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+ /* must lock vfpf channel to protect against vf flows */
+ bnx2x_lock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- /* remove existing vlans */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
- &ramrod_flags);
- if (rc) {
- BNX2X_ERR("failed to delete vlans\n");
- rc = -EINVAL;
- goto out;
- }
+ /* remove existing vlans */
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ rc = vlan_obj->delete_all(bp, vlan_obj, &vlan_mac_flags,
+ &ramrod_flags);
+ if (rc) {
+ BNX2X_ERR("failed to delete vlans\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ /* need to remove/add the VF's accept_any_vlan bit */
+ accept_flags = bnx2x_leading_vfq(vf, accept_flags);
+ if (vlan)
+ clear_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+ else
+ set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept_flags);
+
+ bnx2x_vf_prep_rx_mode(bp, LEADING_IDX, &rx_ramrod, vf,
+ accept_flags);
+ bnx2x_leading_vfq(vf, accept_flags) = accept_flags;
+ bnx2x_config_rx_mode(bp, &rx_ramrod);
+
+ /* configure the new vlan to device */
+ memset(&ramrod_param, 0, sizeof(ramrod_param));
+ __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
+ ramrod_param.vlan_mac_obj = vlan_obj;
+ ramrod_param.ramrod_flags = ramrod_flags;
+ set_bit(BNX2X_DONT_CONSUME_CAM_CREDIT,
+ &ramrod_param.user_req.vlan_mac_flags);
+ ramrod_param.user_req.u.vlan.vlan = vlan;
+ ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
+ rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
+ if (rc) {
+ BNX2X_ERR("failed to configure vlan\n");
+ rc = -EINVAL;
+ goto out;
+ }
- /* send queue update ramrod to configure default vlan and silent
- * vlan removal
+ /* send queue update ramrod to configure default vlan and silent
+ * vlan removal
+ */
+ __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
+ q_params.cmd = BNX2X_Q_CMD_UPDATE;
+ q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
+ update_params = &q_params.params.update;
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ &update_params->update_flags);
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ &update_params->update_flags);
+ if (vlan == 0) {
+ /* if vlan is 0 then we want to leave the VF traffic
+ * untagged, and leave the incoming traffic untouched
+ * (i.e. do not remove any vlan tags).
+ */
+ __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
+ &update_params->update_flags);
+ __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
+ &update_params->update_flags);
+ } else {
+ /* configure default vlan to vf queue and set silent
+ * vlan removal (the vf remains unaware of this vlan).
*/
- __set_bit(RAMROD_COMP_WAIT, &q_params.ramrod_flags);
- q_params.cmd = BNX2X_Q_CMD_UPDATE;
- q_params.q_obj = &bnx2x_leading_vfq(vf, sp_obj);
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
&update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM_CHNG,
+ __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
&update_params->update_flags);
+ update_params->def_vlan = vlan;
+ update_params->silent_removal_value =
+ vlan & VLAN_VID_MASK;
+ update_params->silent_removal_mask = VLAN_VID_MASK;
+ }
- if (vlan == 0) {
- /* if vlan is 0 then we want to leave the VF traffic
- * untagged, and leave the incoming traffic untouched
- * (i.e. do not remove any vlan tags).
- */
- __clear_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __clear_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- } else {
- /* configure the new vlan to device */
- __set_bit(RAMROD_COMP_WAIT, &ramrod_flags);
- ramrod_param.vlan_mac_obj = vlan_obj;
- ramrod_param.ramrod_flags = ramrod_flags;
- ramrod_param.user_req.u.vlan.vlan = vlan;
- ramrod_param.user_req.cmd = BNX2X_VLAN_MAC_ADD;
- rc = bnx2x_config_vlan_mac(bp, &ramrod_param);
- if (rc) {
- BNX2X_ERR("failed to configure vlan\n");
- rc = -EINVAL;
- goto out;
- }
-
- /* configure default vlan to vf queue and set silent
- * vlan removal (the vf remains unaware of this vlan).
- */
- update_params = &q_params.params.update;
- __set_bit(BNX2X_Q_UPDATE_DEF_VLAN_EN,
- &update_params->update_flags);
- __set_bit(BNX2X_Q_UPDATE_SILENT_VLAN_REM,
- &update_params->update_flags);
- update_params->def_vlan = vlan;
- }
+ /* Update the Queue state */
+ rc = bnx2x_queue_state_change(bp, &q_params);
+ if (rc) {
+ BNX2X_ERR("Failed to configure default VLAN\n");
+ goto out;
+ }
- /* Update the Queue state */
- rc = bnx2x_queue_state_change(bp, &q_params);
- if (rc) {
- BNX2X_ERR("Failed to configure default VLAN\n");
- goto out;
- }
- /* clear the flag indicating that this VF needs its vlan
- * (will only be set if the HV configured the Vlan before vf was
- * up and we were called because the VF came up later
- */
+ /* clear the flag indicating that this VF needs its vlan
+ * (will only be set if the HV configured the Vlan before vf was
+ * up and we were called because the VF came up later
+ */
out:
- vf->cfg_flags &= ~VF_CFG_VLAN;
- bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
- }
+ vf->cfg_flags &= ~VF_CFG_VLAN;
+ bnx2x_unlock_vf_pf_channel(bp, vf, CHANNEL_TLV_PF_SET_VLAN);
+
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
index 1ff6a936..8c213fa52 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
@@ -74,6 +74,7 @@ struct bnx2x_vf_queue {
/* VLANs object */
struct bnx2x_vlan_mac_obj vlan_obj;
atomic_t vlan_count; /* 0 means vlan-0 is set ~ untagged */
+ unsigned long accept_flags; /* last accept flags configured */
/* Queue Slow-path State object */
struct bnx2x_queue_sp_obj sp_obj;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index efa8a15..0756d7d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -208,7 +208,7 @@ static int bnx2x_get_vf_id(struct bnx2x *bp, u32 *vf_id)
return -EINVAL;
}
- BNX2X_ERR("valid ME register value: 0x%08x\n", me_reg);
+ DP(BNX2X_MSG_IOV, "valid ME register value: 0x%08x\n", me_reg);
*vf_id = (me_reg & ME_REG_VF_NUM_MASK) >> ME_REG_VF_NUM_SHIFT;
@@ -1598,6 +1598,8 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
if (msg->flags & VFPF_SET_Q_FILTERS_RX_MASK_CHANGED) {
unsigned long accept = 0;
+ struct pf_vf_bulletin_content *bulletin =
+ BP_VF_BULLETIN(bp, vf->index);
/* covert VF-PF if mask to bnx2x accept flags */
if (msg->rx_mask & VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST)
@@ -1617,9 +1619,11 @@ static void bnx2x_vfop_mbx_qfilters(struct bnx2x *bp, struct bnx2x_virtf *vf)
__set_bit(BNX2X_ACCEPT_BROADCAST, &accept);
/* A packet arriving the vf's mac should be accepted
- * with any vlan
+ * with any vlan, unless a vlan has already been
+ * configured.
*/
- __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
+ if (!(bulletin->valid_bitmap & (1 << VLAN_VALID)))
+ __set_bit(BNX2X_ACCEPT_ANY_VLAN, &accept);
/* set rx-mode */
rc = bnx2x_vfop_rxmode_cmd(bp, vf, &cmd,
@@ -1710,6 +1714,21 @@ static void bnx2x_vf_mbx_set_q_filters(struct bnx2x *bp,
goto response;
}
}
+ /* if vlan was set by hypervisor we don't allow guest to config vlan */
+ if (bulletin->valid_bitmap & 1 << VLAN_VALID) {
+ int i;
+
+ /* search for vlan filters */
+ for (i = 0; i < filters->n_mac_vlan_filters; i++) {
+ if (filters->filters[i].flags &
+ VFPF_Q_FILTER_VLAN_TAG_VALID) {
+ BNX2X_ERR("VF[%d] attempted to configure vlan but one was already set by Hypervisor. Aborting request\n",
+ vf->abs_vfid);
+ vf->op_rc = -EPERM;
+ goto response;
+ }
+ }
+ }
/* verify vf_qid */
if (filters->vf_qid > vf_rxq_count(vf))
@@ -1805,6 +1824,9 @@ static void bnx2x_vf_mbx_update_rss(struct bnx2x *bp, struct bnx2x_virtf *vf,
vf_op_params->rss_result_mask = rss_tlv->rss_result_mask;
/* flags handled individually for backward/forward compatability */
+ vf_op_params->rss_flags = 0;
+ vf_op_params->ramrod_flags = 0;
+
if (rss_tlv->rss_flags & VFPF_RSS_MODE_DISABLED)
__set_bit(BNX2X_RSS_MODE_DISABLED, &vf_op_params->rss_flags);
if (rss_tlv->rss_flags & VFPF_RSS_MODE_REGULAR)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 369b736..15a66e4 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7622,7 +7622,7 @@ static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
{
u32 base = (u32) mapping & 0xffffffff;
- return (base > 0xffffdcc0) && (base + len + 8 < base);
+ return base + len + 8 < base;
}
/* Test for TSO DMA buffers that cross into regions which are within MSS bytes
@@ -8932,6 +8932,9 @@ static int tg3_chip_reset(struct tg3 *tp)
void (*write_op)(struct tg3 *, u32, u32);
int i, err;
+ if (!pci_device_is_present(tp->pdev))
+ return -ENODEV;
+
tg3_nvram_lock(tp);
tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
@@ -11581,10 +11584,11 @@ static int tg3_close(struct net_device *dev)
memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
- tg3_power_down_prepare(tp);
-
- tg3_carrier_off(tp);
+ if (pci_device_is_present(tp->pdev)) {
+ tg3_power_down_prepare(tp);
+ tg3_carrier_off(tp);
+ }
return 0;
}
@@ -16499,6 +16503,9 @@ static int tg3_get_invariants(struct tg3 *tp, const struct pci_device_id *ent)
/* Clear this out for sanity. */
tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
+ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */
+ tw32(TG3PCI_REG_BASE_ADDR, 0);
+
pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
&pci_state_reg);
if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
@@ -17726,10 +17733,12 @@ static int tg3_suspend(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
- int err;
+ int err = 0;
+
+ rtnl_lock();
if (!netif_running(dev))
- return 0;
+ goto unlock;
tg3_reset_task_cancel(tp);
tg3_phy_stop(tp);
@@ -17771,6 +17780,8 @@ out:
tg3_phy_start(tp);
}
+unlock:
+ rtnl_unlock();
return err;
}
@@ -17779,10 +17790,12 @@ static int tg3_resume(struct device *device)
struct pci_dev *pdev = to_pci_dev(device);
struct net_device *dev = pci_get_drvdata(pdev);
struct tg3 *tp = netdev_priv(dev);
- int err;
+ int err = 0;
+
+ rtnl_lock();
if (!netif_running(dev))
- return 0;
+ goto unlock;
netif_device_attach(dev);
@@ -17806,6 +17819,8 @@ out:
if (!err)
tg3_phy_start(tp);
+unlock:
+ rtnl_unlock();
return err;
}
#endif /* CONFIG_PM_SLEEP */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ecd2fb3..56e0415 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -49,13 +49,15 @@
#include <asm/io.h>
#include "cxgb4_uld.h"
-#define FW_VERSION_MAJOR 1
-#define FW_VERSION_MINOR 4
-#define FW_VERSION_MICRO 0
+#define T4FW_VERSION_MAJOR 0x01
+#define T4FW_VERSION_MINOR 0x06
+#define T4FW_VERSION_MICRO 0x18
+#define T4FW_VERSION_BUILD 0x00
-#define FW_VERSION_MAJOR_T5 0
-#define FW_VERSION_MINOR_T5 0
-#define FW_VERSION_MICRO_T5 0
+#define T5FW_VERSION_MAJOR 0x01
+#define T5FW_VERSION_MINOR 0x08
+#define T5FW_VERSION_MICRO 0x1C
+#define T5FW_VERSION_BUILD 0x00
#define CH_WARN(adap, fmt, ...) dev_warn(adap->pdev_dev, fmt, ## __VA_ARGS__)
@@ -226,6 +228,25 @@ struct tp_params {
uint32_t dack_re; /* DACK timer resolution */
unsigned short tx_modq[NCHAN]; /* channel to modulation queue map */
+
+ u32 vlan_pri_map; /* cached TP_VLAN_PRI_MAP */
+ u32 ingress_config; /* cached TP_INGRESS_CONFIG */
+
+ /* TP_VLAN_PRI_MAP Compressed Filter Tuple field offsets. This is a
+ * subset of the set of fields which may be present in the Compressed
+ * Filter Tuple portion of filters and TCP TCB connections. The
+ * fields which are present are controlled by the TP_VLAN_PRI_MAP.
+ * Since a variable number of fields may or may not be present, their
+ * shifted field positions within the Compressed Filter Tuple may
+ * vary, or not even be present if the field isn't selected in
+ * TP_VLAN_PRI_MAP. Since some of these fields are needed in various
+ * places we store their offsets here, or a -1 if the field isn't
+ * present.
+ */
+ int vlan_shift;
+ int vnic_shift;
+ int port_shift;
+ int protocol_shift;
};
struct vpd_params {
@@ -240,6 +261,26 @@ struct pci_params {
unsigned char width;
};
+#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
+#define CHELSIO_CHIP_FPGA 0x100
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
+#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+
+#define CHELSIO_T4 0x4
+#define CHELSIO_T5 0x5
+
+enum chip_type {
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_FIRST_REV = T4_A1,
+ T4_LAST_REV = T4_A2,
+
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
+ T5_LAST_REV = T5_A1,
+};
+
struct adapter_params {
struct tp_params tp;
struct vpd_params vpd;
@@ -259,7 +300,7 @@ struct adapter_params {
unsigned char nports; /* # of ethernet ports */
unsigned char portvec;
- unsigned char rev; /* chip revision */
+ enum chip_type chip; /* chip code */
unsigned char offload;
unsigned char bypass;
@@ -267,6 +308,23 @@ struct adapter_params {
unsigned int ofldq_wr_cred;
};
+#include "t4fw_api.h"
+
+#define FW_VERSION(chip) ( \
+ FW_HDR_FW_VER_MAJOR_GET(chip##FW_VERSION_MAJOR) | \
+ FW_HDR_FW_VER_MINOR_GET(chip##FW_VERSION_MINOR) | \
+ FW_HDR_FW_VER_MICRO_GET(chip##FW_VERSION_MICRO) | \
+ FW_HDR_FW_VER_BUILD_GET(chip##FW_VERSION_BUILD))
+#define FW_INTFVER(chip, intf) (FW_HDR_INTFVER_##intf)
+
+struct fw_info {
+ u8 chip;
+ char *fs_name;
+ char *fw_mod_name;
+ struct fw_hdr fw_hdr;
+};
+
+
struct trace_params {
u32 data[TRACE_LEN / 4];
u32 mask[TRACE_LEN / 4];
@@ -512,25 +570,6 @@ struct sge {
struct l2t_data;
-#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
-#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
-#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
-
-#define CHELSIO_T4 0x4
-#define CHELSIO_T5 0x5
-
-enum chip_type {
- T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
- T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
- T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
- T4_FIRST_REV = T4_A1,
- T4_LAST_REV = T4_A3,
-
- T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
- T5_FIRST_REV = T5_A1,
- T5_LAST_REV = T5_A1,
-};
-
#ifdef CONFIG_PCI_IOV
/* T4 supports SRIOV on PF0-3 and T5 on PF0-7. However, the Serial
@@ -715,12 +754,12 @@ enum {
static inline int is_t5(enum chip_type chip)
{
- return (chip >= T5_FIRST_REV && chip <= T5_LAST_REV);
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T5;
}
static inline int is_t4(enum chip_type chip)
{
- return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
}
static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
@@ -900,8 +939,14 @@ int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
unsigned int t4_flash_cfg_addr(struct adapter *adapter);
int t4_load_cfg(struct adapter *adapter, const u8 *cfg_data, unsigned int size);
-int t4_check_fw_version(struct adapter *adapter);
+int t4_get_fw_version(struct adapter *adapter, u32 *vers);
+int t4_get_tp_version(struct adapter *adapter, u32 *vers);
+int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_init_tp_params(struct adapter *adap);
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel);
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
void t4_fatal_err(struct adapter *adapter);
int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 8b929ee..fff02ed 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -276,9 +276,9 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
{ 0, }
};
-#define FW_FNAME "cxgb4/t4fw.bin"
+#define FW4_FNAME "cxgb4/t4fw.bin"
#define FW5_FNAME "cxgb4/t5fw.bin"
-#define FW_CFNAME "cxgb4/t4-config.txt"
+#define FW4_CFNAME "cxgb4/t4-config.txt"
#define FW5_CFNAME "cxgb4/t5-config.txt"
MODULE_DESCRIPTION(DRV_DESC);
@@ -286,7 +286,7 @@ MODULE_AUTHOR("Chelsio Communications");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
-MODULE_FIRMWARE(FW_FNAME);
+MODULE_FIRMWARE(FW4_FNAME);
MODULE_FIRMWARE(FW5_FNAME);
/*
@@ -1071,72 +1071,6 @@ freeout: t4_free_sge_resources(adap);
}
/*
- * Returns 0 if new FW was successfully loaded, a positive errno if a load was
- * started but failed, and a negative errno if flash load couldn't start.
- */
-static int upgrade_fw(struct adapter *adap)
-{
- int ret;
- u32 vers, exp_major;
- const struct fw_hdr *hdr;
- const struct firmware *fw;
- struct device *dev = adap->pdev_dev;
- char *fw_file_name;
-
- switch (CHELSIO_CHIP_VERSION(adap->chip)) {
- case CHELSIO_T4:
- fw_file_name = FW_FNAME;
- exp_major = FW_VERSION_MAJOR;
- break;
- case CHELSIO_T5:
- fw_file_name = FW5_FNAME;
- exp_major = FW_VERSION_MAJOR_T5;
- break;
- default:
- dev_err(dev, "Unsupported chip type, %x\n", adap->chip);
- return -EINVAL;
- }
-
- ret = request_firmware(&fw, fw_file_name, dev);
- if (ret < 0) {
- dev_err(dev, "unable to load firmware image %s, error %d\n",
- fw_file_name, ret);
- return ret;
- }
-
- hdr = (const struct fw_hdr *)fw->data;
- vers = ntohl(hdr->fw_ver);
- if (FW_HDR_FW_VER_MAJOR_GET(vers) != exp_major) {
- ret = -EINVAL; /* wrong major version, won't do */
- goto out;
- }
-
- /*
- * If the flash FW is unusable or we found something newer, load it.
- */
- if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != exp_major ||
- vers > adap->params.fw_vers) {
- dev_info(dev, "upgrading firmware ...\n");
- ret = t4_fw_upgrade(adap, adap->mbox, fw->data, fw->size,
- /*force=*/false);
- if (!ret)
- dev_info(dev,
- "firmware upgraded to version %pI4 from %s\n",
- &hdr->fw_ver, fw_file_name);
- else
- dev_err(dev, "firmware upgrade failed! err=%d\n", -ret);
- } else {
- /*
- * Tell our caller that we didn't upgrade the firmware.
- */
- ret = -EINVAL;
- }
-
-out: release_firmware(fw);
- return ret;
-}
-
-/*
* Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
* The allocated memory is cleared.
*/
@@ -1415,7 +1349,7 @@ static int get_sset_count(struct net_device *dev, int sset)
static int get_regs_len(struct net_device *dev)
{
struct adapter *adap = netdev2adap(dev);
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
return T4_REGMAP_SIZE;
else
return T5_REGMAP_SIZE;
@@ -1499,7 +1433,7 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
data += sizeof(struct port_stats) / sizeof(u64);
collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
data += sizeof(struct queue_port_stats) / sizeof(u64);
- if (!is_t4(adapter->chip)) {
+ if (!is_t4(adapter->params.chip)) {
t4_write_reg(adapter, SGE_STAT_CFG, STATSOURCE_T5(7));
val1 = t4_read_reg(adapter, SGE_STAT_TOTAL);
val2 = t4_read_reg(adapter, SGE_STAT_MATCH);
@@ -1521,8 +1455,8 @@ static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
*/
static inline unsigned int mk_adap_vers(const struct adapter *ap)
{
- return CHELSIO_CHIP_VERSION(ap->chip) |
- (CHELSIO_CHIP_RELEASE(ap->chip) << 10) | (1 << 16);
+ return CHELSIO_CHIP_VERSION(ap->params.chip) |
+ (CHELSIO_CHIP_RELEASE(ap->params.chip) << 10) | (1 << 16);
}
static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
@@ -2189,7 +2123,7 @@ static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
static const unsigned int *reg_ranges;
int arr_size = 0, buf_size = 0;
- if (is_t4(ap->chip)) {
+ if (is_t4(ap->params.chip)) {
reg_ranges = &t4_reg_ranges[0];
arr_size = ARRAY_SIZE(t4_reg_ranges);
buf_size = T4_REGMAP_SIZE;
@@ -2967,7 +2901,7 @@ static int setup_debugfs(struct adapter *adap)
size = t4_read_reg(adap, MA_EDRAM1_BAR);
add_debugfs_mem(adap, "edc1", MEM_EDC1, EDRAM_SIZE_GET(size));
}
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
size = t4_read_reg(adap, MA_EXT_MEMORY_BAR);
if (i & EXT_MEM_ENABLE)
add_debugfs_mem(adap, "mc", MEM_MC,
@@ -3052,7 +2986,14 @@ int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
if (stid >= 0) {
t->stid_tab[stid].data = data;
stid += t->stid_base;
- t->stids_in_use++;
+ /* IPv6 requires max of 520 bits or 16 cells in TCAM
+ * This is equivalent to 4 TIDs. With CLIP enabled it
+ * needs 2 TIDs.
+ */
+ if (family == PF_INET)
+ t->stids_in_use++;
+ else
+ t->stids_in_use += 4;
}
spin_unlock_bh(&t->stid_lock);
return stid;
@@ -3078,7 +3019,8 @@ int cxgb4_alloc_sftid(struct tid_info *t, int family, void *data)
}
if (stid >= 0) {
t->stid_tab[stid].data = data;
- stid += t->stid_base;
+ stid -= t->nstids;
+ stid += t->sftid_base;
t->stids_in_use++;
}
spin_unlock_bh(&t->stid_lock);
@@ -3090,14 +3032,24 @@ EXPORT_SYMBOL(cxgb4_alloc_sftid);
*/
void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
spin_lock_bh(&t->stid_lock);
if (family == PF_INET)
__clear_bit(stid, t->stid_bmap);
else
bitmap_release_region(t->stid_bmap, stid, 2);
t->stid_tab[stid].data = NULL;
- t->stids_in_use--;
+ if (family == PF_INET)
+ t->stids_in_use--;
+ else
+ t->stids_in_use -= 4;
spin_unlock_bh(&t->stid_lock);
}
EXPORT_SYMBOL(cxgb4_free_stid);
@@ -3200,6 +3152,7 @@ static int tid_init(struct tid_info *t)
size_t size;
unsigned int stid_bmap_size;
unsigned int natids = t->natids;
+ struct adapter *adap = container_of(t, struct adapter, tids);
stid_bmap_size = BITS_TO_LONGS(t->nstids + t->nsftids);
size = t->ntids * sizeof(*t->tid_tab) +
@@ -3233,6 +3186,11 @@ static int tid_init(struct tid_info *t)
t->afree = t->atid_tab;
}
bitmap_zero(t->stid_bmap, t->nstids + t->nsftids);
+ /* Reserve stid 0 for T4/T5 adapters */
+ if (!t->stid_base &&
+ (is_t4(adap->params.chip) || is_t5(adap->params.chip)))
+ __set_bit(0, t->stid_bmap);
+
return 0;
}
@@ -3419,7 +3377,7 @@ unsigned int cxgb4_dbfifo_count(const struct net_device *dev, int lpfifo)
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
lp_count = G_LP_COUNT(v1);
hp_count = G_HP_COUNT(v1);
} else {
@@ -3588,7 +3546,7 @@ static void drain_db_fifo(struct adapter *adap, int usecs)
do {
v1 = t4_read_reg(adap, A_SGE_DBFIFO_STATUS);
v2 = t4_read_reg(adap, SGE_DBFIFO_STATUS2);
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
lp_count = G_LP_COUNT(v1);
hp_count = G_HP_COUNT(v1);
} else {
@@ -3708,7 +3666,7 @@ static void process_db_drop(struct work_struct *work)
adap = container_of(work, struct adapter, db_drop_task);
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
disable_dbs(adap);
notify_rdma_uld(adap, CXGB4_CONTROL_DB_DROP);
drain_db_fifo(adap, 1);
@@ -3753,7 +3711,7 @@ static void process_db_drop(struct work_struct *work)
void t4_db_full(struct adapter *adap)
{
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, SGE_INT_ENABLE3,
DBFIFO_HP_INT | DBFIFO_LP_INT, 0);
queue_work(workq, &adap->db_full_task);
@@ -3762,7 +3720,7 @@ void t4_db_full(struct adapter *adap)
void t4_db_dropped(struct adapter *adap)
{
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
queue_work(workq, &adap->db_drop_task);
}
@@ -3789,7 +3747,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.nchan = adap->params.nports;
lli.nports = adap->params.nports;
lli.wr_cred = adap->params.ofldq_wr_cred;
- lli.adapter_type = adap->params.rev;
+ lli.adapter_type = adap->params.chip;
lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
@@ -3797,7 +3755,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
(adap->fn * 4));
- lli.filt_mode = adap->filter_mode;
+ lli.filt_mode = adap->params.tp.vlan_pri_map;
/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
for (i = 0; i < NCHAN; i++)
lli.tx_modq[i] = i;
@@ -4245,7 +4203,7 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
/* Check to make sure the filter requested is writable ...
@@ -4271,12 +4229,17 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
f->fs.val.lip[i] = val[i];
f->fs.mask.lip[i] = ~0;
}
- if (adap->filter_mode & F_PORT) {
+ if (adap->params.tp.vlan_pri_map & F_PORT) {
f->fs.val.iport = port;
f->fs.mask.iport = mask;
}
}
+ if (adap->params.tp.vlan_pri_map & F_PROTOCOL) {
+ f->fs.val.proto = IPPROTO_TCP;
+ f->fs.mask.proto = ~0;
+ }
+
f->fs.dirsteer = 1;
f->fs.iq = queue;
/* Mark filter as locked */
@@ -4303,7 +4266,7 @@ int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
adap = netdev2adap(dev);
/* Adjust stid to correct filter index */
- stid -= adap->tids.nstids;
+ stid -= adap->tids.sftid_base;
stid += adap->tids.nftids;
f = &adap->tids.ftid_tab[stid];
@@ -4483,7 +4446,7 @@ static void setup_memwin(struct adapter *adap)
u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
bar0 = pci_resource_start(adap->pdev, 0); /* truncation intentional */
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
mem_win0_base = bar0 + MEMWIN0_BASE;
mem_win1_base = bar0 + MEMWIN1_BASE;
mem_win2_base = bar0 + MEMWIN2_BASE;
@@ -4668,8 +4631,10 @@ static int adap_init0_config(struct adapter *adapter, int reset)
const struct firmware *cf;
unsigned long mtype = 0, maddr = 0;
u32 finiver, finicsum, cfcsum;
- int ret, using_flash;
+ int ret;
+ int config_issued = 0;
char *fw_config_file, fw_config_file_path[256];
+ char *config_name = NULL;
/*
* Reset device if necessary.
@@ -4686,9 +4651,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* then use that. Otherwise, use the configuration file stored
* in the adapter flash ...
*/
- switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
+ switch (CHELSIO_CHIP_VERSION(adapter->params.chip)) {
case CHELSIO_T4:
- fw_config_file = FW_CFNAME;
+ fw_config_file = FW4_CFNAME;
break;
case CHELSIO_T5:
fw_config_file = FW5_CFNAME;
@@ -4702,13 +4667,16 @@ static int adap_init0_config(struct adapter *adapter, int reset)
ret = request_firmware(&cf, fw_config_file, adapter->pdev_dev);
if (ret < 0) {
- using_flash = 1;
+ config_name = "On FLASH";
mtype = FW_MEMTYPE_CF_FLASH;
maddr = t4_flash_cfg_addr(adapter);
} else {
u32 params[7], val[7];
- using_flash = 0;
+ sprintf(fw_config_file_path,
+ "/lib/firmware/%s", fw_config_file);
+ config_name = fw_config_file_path;
+
if (cf->size >= FLASH_CFG_MAX_SIZE)
ret = -ENOMEM;
else {
@@ -4776,6 +4744,26 @@ static int adap_init0_config(struct adapter *adapter, int reset)
FW_LEN16(caps_cmd));
ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd, sizeof(caps_cmd),
&caps_cmd);
+
+ /* If the CAPS_CONFIG failed with an ENOENT (for a Firmware
+ * Configuration File in FLASH), our last gasp effort is to use the
+ * Firmware Configuration File which is embedded in the firmware. A
+ * very few early versions of the firmware didn't have one embedded
+ * but we can ignore those.
+ */
+ if (ret == -ENOENT) {
+ memset(&caps_cmd, 0, sizeof(caps_cmd));
+ caps_cmd.op_to_write =
+ htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+ FW_CMD_REQUEST |
+ FW_CMD_READ);
+ caps_cmd.cfvalid_to_len16 = htonl(FW_LEN16(caps_cmd));
+ ret = t4_wr_mbox(adapter, adapter->mbox, &caps_cmd,
+ sizeof(caps_cmd), &caps_cmd);
+ config_name = "Firmware Default";
+ }
+
+ config_issued = 1;
if (ret < 0)
goto bye;
@@ -4816,7 +4804,6 @@ static int adap_init0_config(struct adapter *adapter, int reset)
if (ret < 0)
goto bye;
- sprintf(fw_config_file_path, "/lib/firmware/%s", fw_config_file);
/*
* Return successfully and note that we're operating with parameters
* not supplied by the driver, rather than from hard-wired
@@ -4824,11 +4811,8 @@ static int adap_init0_config(struct adapter *adapter, int reset)
*/
adapter->flags |= USING_SOFT_PARAMS;
dev_info(adapter->pdev_dev, "Successfully configured using Firmware "\
- "Configuration File %s, version %#x, computed checksum %#x\n",
- (using_flash
- ? "in device FLASH"
- : fw_config_file_path),
- finiver, cfcsum);
+ "Configuration File \"%s\", version %#x, computed checksum %#x\n",
+ config_name, finiver, cfcsum);
return 0;
/*
@@ -4837,9 +4821,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
* want to issue a warning since this is fairly common.)
*/
bye:
- if (ret != -ENOENT)
- dev_warn(adapter->pdev_dev, "Configuration file error %d\n",
- -ret);
+ if (config_issued && ret != -ENOENT)
+ dev_warn(adapter->pdev_dev, "\"%s\" configuration file error %d\n",
+ config_name, -ret);
return ret;
}
@@ -5086,6 +5070,47 @@ bye:
return ret;
}
+static struct fw_info fw_info_array[] = {
+ {
+ .chip = CHELSIO_T4,
+ .fs_name = FW4_CFNAME,
+ .fw_mod_name = FW4_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T4,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T4)),
+ .intfver_nic = FW_INTFVER(T4, NIC),
+ .intfver_vnic = FW_INTFVER(T4, VNIC),
+ .intfver_ri = FW_INTFVER(T4, RI),
+ .intfver_iscsi = FW_INTFVER(T4, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T4, FCOE),
+ },
+ }, {
+ .chip = CHELSIO_T5,
+ .fs_name = FW5_CFNAME,
+ .fw_mod_name = FW5_FNAME,
+ .fw_hdr = {
+ .chip = FW_HDR_CHIP_T5,
+ .fw_ver = __cpu_to_be32(FW_VERSION(T5)),
+ .intfver_nic = FW_INTFVER(T5, NIC),
+ .intfver_vnic = FW_INTFVER(T5, VNIC),
+ .intfver_ri = FW_INTFVER(T5, RI),
+ .intfver_iscsi = FW_INTFVER(T5, ISCSI),
+ .intfver_fcoe = FW_INTFVER(T5, FCOE),
+ },
+ }
+};
+
+static struct fw_info *find_fw_info(int chip)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(fw_info_array); i++) {
+ if (fw_info_array[i].chip == chip)
+ return &fw_info_array[i];
+ }
+ return NULL;
+}
+
/*
* Phase 0 of initialization: contact FW, obtain config, perform basic init.
*/
@@ -5096,7 +5121,7 @@ static int adap_init0(struct adapter *adap)
enum dev_state state;
u32 params[7], val[7];
struct fw_caps_config_cmd caps_cmd;
- int reset = 1, j;
+ int reset = 1;
/*
* Contact FW, advertising Master capability (and potentially forcing
@@ -5123,44 +5148,54 @@ static int adap_init0(struct adapter *adap)
* later reporting and B. to warn if the currently loaded firmware
* is excessively mismatched relative to the driver.)
*/
- ret = t4_check_fw_version(adap);
-
- /* The error code -EFAULT is returned by t4_check_fw_version() if
- * firmware on adapter < supported firmware. If firmware on adapter
- * is too old (not supported by driver) and we're the MASTER_PF set
- * adapter state to DEV_STATE_UNINIT to force firmware upgrade
- * and reinitialization.
- */
- if ((adap->flags & MASTER_PF) && ret == -EFAULT)
- state = DEV_STATE_UNINIT;
+ t4_get_fw_version(adap, &adap->params.fw_vers);
+ t4_get_tp_version(adap, &adap->params.tp_vers);
if ((adap->flags & MASTER_PF) && state != DEV_STATE_INIT) {
- if (ret == -EINVAL || ret == -EFAULT || ret > 0) {
- if (upgrade_fw(adap) >= 0) {
- /*
- * Note that the chip was reset as part of the
- * firmware upgrade so we don't reset it again
- * below and grab the new firmware version.
- */
- reset = 0;
- ret = t4_check_fw_version(adap);
- } else
- if (ret == -EFAULT) {
- /*
- * Firmware is old but still might
- * work if we force reinitialization
- * of the adapter. Ignoring FW upgrade
- * failure.
- */
- dev_warn(adap->pdev_dev,
- "Ignoring firmware upgrade "
- "failure, and forcing driver "
- "to reinitialize the "
- "adapter.\n");
- ret = 0;
- }
+ struct fw_info *fw_info;
+ struct fw_hdr *card_fw;
+ const struct firmware *fw;
+ const u8 *fw_data = NULL;
+ unsigned int fw_size = 0;
+
+ /* This is the firmware whose headers the driver was compiled
+ * against
+ */
+ fw_info = find_fw_info(CHELSIO_CHIP_VERSION(adap->params.chip));
+ if (fw_info == NULL) {
+ dev_err(adap->pdev_dev,
+ "unable to get firmware info for chip %d.\n",
+ CHELSIO_CHIP_VERSION(adap->params.chip));
+ return -EINVAL;
}
+
+ /* allocate memory to read the header of the firmware on the
+ * card
+ */
+ card_fw = t4_alloc_mem(sizeof(*card_fw));
+
+ /* Get FW from from /lib/firmware/ */
+ ret = request_firmware(&fw, fw_info->fw_mod_name,
+ adap->pdev_dev);
+ if (ret < 0) {
+ dev_err(adap->pdev_dev,
+ "unable to load firmware image %s, error %d\n",
+ fw_info->fw_mod_name, ret);
+ } else {
+ fw_data = fw->data;
+ fw_size = fw->size;
+ }
+
+ /* upgrade FW logic */
+ ret = t4_prep_fw(adap, fw_info, fw_data, fw_size, card_fw,
+ state, &reset);
+
+ /* Cleaning up */
+ if (fw != NULL)
+ release_firmware(fw);
+ t4_free_mem(card_fw);
+
if (ret < 0)
- return ret;
+ goto bye;
}
/*
@@ -5245,7 +5280,7 @@ static int adap_init0(struct adapter *adap)
if (ret == -ENOENT) {
dev_info(adap->pdev_dev,
"No Configuration File present "
- "on adapter. Using hard-wired "
+ "on adapter. Using hard-wired "
"configuration parameters.\n");
ret = adap_init0_no_config(adap, reset);
}
@@ -5428,21 +5463,11 @@ static int adap_init0(struct adapter *adap)
/*
* These are finalized by FW initialization, load their values now.
*/
- v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
- adap->params.tp.tre = TIMERRESOLUTION_GET(v);
- adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
adap->params.b_wnd);
- /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
- for (j = 0; j < NCHAN; j++)
- adap->params.tp.tx_modq[j] = j;
-
- t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
- &adap->filter_mode, 1,
- TP_VLAN_PRI_MAP);
-
+ t4_init_tp_params(adap);
adap->flags |= FW_OK;
return 0;
@@ -5787,7 +5812,7 @@ static void print_port_info(const struct net_device *dev)
netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
adap->params.vpd.id,
- CHELSIO_CHIP_RELEASE(adap->params.rev), buf,
+ CHELSIO_CHIP_RELEASE(adap->params.chip), buf,
is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
(adap->flags & USING_MSIX) ? " MSI-X" :
(adap->flags & USING_MSI) ? " MSI" : "");
@@ -5910,7 +5935,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (err)
goto out_unmap_bar0;
- if (!is_t4(adapter->chip)) {
+ if (!is_t4(adapter->params.chip)) {
s_qpp = QUEUESPERPAGEPF1 * adapter->fn;
qpp = 1 << QUEUESPERPAGEPF0_GET(t4_read_reg(adapter,
SGE_EGRESS_QUEUES_PER_PAGE_PF) >> s_qpp);
@@ -6064,7 +6089,7 @@ sriov:
out_free_dev:
free_some_resources(adapter);
out_unmap_bar:
- if (!is_t4(adapter->chip))
+ if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
out_unmap_bar0:
iounmap(adapter->regs);
@@ -6116,7 +6141,7 @@ static void remove_one(struct pci_dev *pdev)
free_some_resources(adapter);
iounmap(adapter->regs);
- if (!is_t4(adapter->chip))
+ if (!is_t4(adapter->params.chip))
iounmap(adapter->bar2);
kfree(adapter);
pci_disable_pcie_error_reporting(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
index 6f21f24..4dd0a82 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
@@ -131,7 +131,14 @@ static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
{
- stid -= t->stid_base;
+ /* Is it a server filter TID? */
+ if (t->nsftids && (stid >= t->sftid_base)) {
+ stid -= t->sftid_base;
+ stid += t->nstids;
+ } else {
+ stid -= t->stid_base;
+ }
+
return stid < (t->nstids + t->nsftids) ? t->stid_tab[stid].data : NULL;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.c b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
index 2987809..cb05be9 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.c
@@ -45,6 +45,7 @@
#include "l2t.h"
#include "t4_msg.h"
#include "t4fw_api.h"
+#include "t4_regs.h"
#define VLAN_NONE 0xfff
@@ -411,6 +412,40 @@ done:
}
EXPORT_SYMBOL(cxgb4_l2t_get);
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t)
+{
+ struct adapter *adap = netdev2adap(dev);
+ struct tp_params *tp = &adap->params.tp;
+ u64 ntuple = 0;
+
+ /* Initialize each of the fields which we care about which are present
+ * in the Compressed Filter Tuple.
+ */
+ if (tp->vlan_shift >= 0 && l2t->vlan != VLAN_NONE)
+ ntuple |= (F_FT_VLAN_VLD | l2t->vlan) << tp->vlan_shift;
+
+ if (tp->port_shift >= 0)
+ ntuple |= (u64)l2t->lport << tp->port_shift;
+
+ if (tp->protocol_shift >= 0)
+ ntuple |= (u64)IPPROTO_TCP << tp->protocol_shift;
+
+ if (tp->vnic_shift >= 0) {
+ u32 viid = cxgb4_port_viid(dev);
+ u32 vf = FW_VIID_VIN_GET(viid);
+ u32 pf = FW_VIID_PFN_GET(viid);
+ u32 vld = FW_VIID_VIVLD_GET(viid);
+
+ ntuple |= (u64)(V_FT_VNID_ID_VF(vf) |
+ V_FT_VNID_ID_PF(pf) |
+ V_FT_VNID_ID_VLD(vld)) << tp->vnic_shift;
+ }
+
+ return ntuple;
+}
+EXPORT_SYMBOL(cxgb4_select_ntuple);
+
/*
* Called when address resolution fails for an L2T entry to handle packets
* on the arpq head. If a packet specifies a failure handler it is invoked,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/l2t.h b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
index 108c0f1..85eb5c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/l2t.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/l2t.h
@@ -98,7 +98,8 @@ int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
const struct net_device *physdev,
unsigned int priority);
-
+u64 cxgb4_select_ntuple(struct net_device *dev,
+ const struct l2t_entry *l2t);
void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index ac311f5..cc3511a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -509,7 +509,7 @@ static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
u32 val;
if (q->pend_cred >= 8) {
val = PIDX(q->pend_cred / 8);
- if (!is_t4(adap->chip))
+ if (!is_t4(adap->params.chip))
val |= DBTYPE(1);
wmb();
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO(1) |
@@ -847,7 +847,7 @@ static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
wmb(); /* write descriptors before telling HW */
spin_lock(&q->db_lock);
if (!q->db_disabled) {
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
QID(q->cntxt_id) | PIDX(n));
} else {
@@ -1596,7 +1596,7 @@ static noinline int handle_trace_pkt(struct adapter *adap,
return 0;
}
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
__skb_pull(skb, sizeof(struct cpl_trace_pkt));
else
__skb_pull(skb, sizeof(struct cpl_t5_trace_pkt));
@@ -1661,7 +1661,7 @@ int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
const struct cpl_rx_pkt *pkt;
struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
struct sge *s = &q->adap->sge;
- int cpl_trace_pkt = is_t4(q->adap->chip) ?
+ int cpl_trace_pkt = is_t4(q->adap->params.chip) ?
CPL_TRACE_PKT : CPL_TRACE_PKT_T5;
if (unlikely(*(u8 *)rsp == cpl_trace_pkt))
@@ -2182,7 +2182,7 @@ err:
static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
{
q->cntxt_id = id;
- if (!is_t4(adap->chip)) {
+ if (!is_t4(adap->params.chip)) {
unsigned int s_qpp;
unsigned short udb_density;
unsigned long qpshift;
@@ -2581,7 +2581,7 @@ static int t4_sge_init_soft(struct adapter *adap)
#undef READ_FL_BUF
if (fl_small_pg != PAGE_SIZE ||
- (fl_large_pg != 0 && (fl_large_pg <= fl_small_pg ||
+ (fl_large_pg != 0 && (fl_large_pg < fl_small_pg ||
(fl_large_pg & (fl_large_pg-1)) != 0))) {
dev_err(adap->pdev_dev, "bad SGE FL page buffer sizes [%d, %d]\n",
fl_small_pg, fl_large_pg);
@@ -2641,7 +2641,7 @@ static int t4_sge_init_hard(struct adapter *adap)
* Set up to drop DOORBELL writes when the DOORBELL FIFO overflows
* and generate an interrupt when this occurs so we can recover.
*/
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
t4_set_reg_field(adap, A_SGE_DBFIFO_STATUS,
V_HP_INT_THRESH(M_HP_INT_THRESH) |
V_LP_INT_THRESH(M_LP_INT_THRESH),
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 4cbb2f9..e1413ea 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -296,7 +296,7 @@ int t4_mc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 mc_bist_cmd, mc_bist_cmd_addr, mc_bist_cmd_len;
u32 mc_bist_status_rdata, mc_bist_data_pattern;
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
mc_bist_cmd = MC_BIST_CMD;
mc_bist_cmd_addr = MC_BIST_CMD_ADDR;
mc_bist_cmd_len = MC_BIST_CMD_LEN;
@@ -349,7 +349,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
u32 edc_bist_cmd, edc_bist_cmd_addr, edc_bist_cmd_len;
u32 edc_bist_cmd_data_pattern, edc_bist_status_rdata;
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
edc_bist_cmd = EDC_REG(EDC_BIST_CMD, idx);
edc_bist_cmd_addr = EDC_REG(EDC_BIST_CMD_ADDR, idx);
edc_bist_cmd_len = EDC_REG(EDC_BIST_CMD_LEN, idx);
@@ -402,7 +402,7 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
{
int i;
- u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
+ u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
/*
* Setup offset into PCIE memory window. Address must be a
@@ -863,104 +863,169 @@ unlock:
}
/**
- * get_fw_version - read the firmware version
+ * t4_get_fw_version - read the firmware version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the FW version from flash.
*/
-static int get_fw_version(struct adapter *adapter, u32 *vers)
+int t4_get_fw_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, adapter->params.sf_fw_start +
- offsetof(struct fw_hdr, fw_ver), 1, vers, 0);
+ return t4_read_flash(adapter, FLASH_FW_START +
+ offsetof(struct fw_hdr, fw_ver), 1,
+ vers, 0);
}
/**
- * get_tp_version - read the TP microcode version
+ * t4_get_tp_version - read the TP microcode version
* @adapter: the adapter
* @vers: where to place the version
*
* Reads the TP microcode version from flash.
*/
-static int get_tp_version(struct adapter *adapter, u32 *vers)
+int t4_get_tp_version(struct adapter *adapter, u32 *vers)
{
- return t4_read_flash(adapter, adapter->params.sf_fw_start +
+ return t4_read_flash(adapter, FLASH_FW_START +
offsetof(struct fw_hdr, tp_microcode_ver),
1, vers, 0);
}
-/**
- * t4_check_fw_version - check if the FW is compatible with this driver
- * @adapter: the adapter
- *
- * Checks if an adapter's FW is compatible with the driver. Returns 0
- * if there's exact match, a negative error if the version could not be
- * read or there's a major version mismatch, and a positive value if the
- * expected major version is found but there's a minor version mismatch.
+/* Is the given firmware API compatible with the one the driver was compiled
+ * with?
*/
-int t4_check_fw_version(struct adapter *adapter)
+static int fw_compatible(const struct fw_hdr *hdr1, const struct fw_hdr *hdr2)
{
- u32 api_vers[2];
- int ret, major, minor, micro;
- int exp_major, exp_minor, exp_micro;
- ret = get_fw_version(adapter, &adapter->params.fw_vers);
- if (!ret)
- ret = get_tp_version(adapter, &adapter->params.tp_vers);
- if (!ret)
- ret = t4_read_flash(adapter, adapter->params.sf_fw_start +
- offsetof(struct fw_hdr, intfver_nic),
- 2, api_vers, 1);
- if (ret)
- return ret;
+ /* short circuit if it's the exact same firmware version */
+ if (hdr1->chip == hdr2->chip && hdr1->fw_ver == hdr2->fw_ver)
+ return 1;
- major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
- minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
- micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+#define SAME_INTF(x) (hdr1->intfver_##x == hdr2->intfver_##x)
+ if (hdr1->chip == hdr2->chip && SAME_INTF(nic) && SAME_INTF(vnic) &&
+ SAME_INTF(ri) && SAME_INTF(iscsi) && SAME_INTF(fcoe))
+ return 1;
+#undef SAME_INTF
- switch (CHELSIO_CHIP_VERSION(adapter->chip)) {
- case CHELSIO_T4:
- exp_major = FW_VERSION_MAJOR;
- exp_minor = FW_VERSION_MINOR;
- exp_micro = FW_VERSION_MICRO;
- break;
- case CHELSIO_T5:
- exp_major = FW_VERSION_MAJOR_T5;
- exp_minor = FW_VERSION_MINOR_T5;
- exp_micro = FW_VERSION_MICRO_T5;
- break;
- default:
- dev_err(adapter->pdev_dev, "Unsupported chip type, %x\n",
- adapter->chip);
- return -EINVAL;
- }
+ return 0;
+}
- memcpy(adapter->params.api_vers, api_vers,
- sizeof(adapter->params.api_vers));
+/* The firmware in the filesystem is usable, but should it be installed?
+ * This routine explains itself in detail if it indicates the filesystem
+ * firmware should be installed.
+ */
+static int should_install_fs_fw(struct adapter *adap, int card_fw_usable,
+ int k, int c)
+{
+ const char *reason;
- if (major < exp_major || (major == exp_major && minor < exp_minor) ||
- (major == exp_major && minor == exp_minor && micro < exp_micro)) {
- dev_err(adapter->pdev_dev,
- "Card has firmware version %u.%u.%u, minimum "
- "supported firmware is %u.%u.%u.\n", major, minor,
- micro, exp_major, exp_minor, exp_micro);
- return -EFAULT;
+ if (!card_fw_usable) {
+ reason = "incompatible or unusable";
+ goto install;
}
- if (major != exp_major) { /* major mismatch - fail */
- dev_err(adapter->pdev_dev,
- "card FW has major version %u, driver wants %u\n",
- major, exp_major);
- return -EINVAL;
+ if (k > c) {
+ reason = "older than the version supported with this driver";
+ goto install;
}
- if (minor == exp_minor && micro == exp_micro)
- return 0; /* perfect match */
+ return 0;
+
+install:
+ dev_err(adap->pdev_dev, "firmware on card (%u.%u.%u.%u) is %s, "
+ "installing firmware %u.%u.%u.%u on card.\n",
+ FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
+ FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c), reason,
+ FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
+ FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
- /* Minor/micro version mismatch. Report it but often it's OK. */
return 1;
}
+int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
+ const u8 *fw_data, unsigned int fw_size,
+ struct fw_hdr *card_fw, enum dev_state state,
+ int *reset)
+{
+ int ret, card_fw_usable, fs_fw_usable;
+ const struct fw_hdr *fs_fw;
+ const struct fw_hdr *drv_fw;
+
+ drv_fw = &fw_info->fw_hdr;
+
+ /* Read the header of the firmware on the card */
+ ret = -t4_read_flash(adap, FLASH_FW_START,
+ sizeof(*card_fw) / sizeof(uint32_t),
+ (uint32_t *)card_fw, 1);
+ if (ret == 0) {
+ card_fw_usable = fw_compatible(drv_fw, (const void *)card_fw);
+ } else {
+ dev_err(adap->pdev_dev,
+ "Unable to read card's firmware header: %d\n", ret);
+ card_fw_usable = 0;
+ }
+
+ if (fw_data != NULL) {
+ fs_fw = (const void *)fw_data;
+ fs_fw_usable = fw_compatible(drv_fw, fs_fw);
+ } else {
+ fs_fw = NULL;
+ fs_fw_usable = 0;
+ }
+
+ if (card_fw_usable && card_fw->fw_ver == drv_fw->fw_ver &&
+ (!fs_fw_usable || fs_fw->fw_ver == drv_fw->fw_ver)) {
+ /* Common case: the firmware on the card is an exact match and
+ * the filesystem one is an exact match too, or the filesystem
+ * one is absent/incompatible.
+ */
+ } else if (fs_fw_usable && state == DEV_STATE_UNINIT &&
+ should_install_fs_fw(adap, card_fw_usable,
+ be32_to_cpu(fs_fw->fw_ver),
+ be32_to_cpu(card_fw->fw_ver))) {
+ ret = -t4_fw_upgrade(adap, adap->mbox, fw_data,
+ fw_size, 0);
+ if (ret != 0) {
+ dev_err(adap->pdev_dev,
+ "failed to install firmware: %d\n", ret);
+ goto bye;
+ }
+
+ /* Installed successfully, update the cached header too. */
+ memcpy(card_fw, fs_fw, sizeof(*card_fw));
+ card_fw_usable = 1;
+ *reset = 0; /* already reset as part of load_fw */
+ }
+
+ if (!card_fw_usable) {
+ uint32_t d, c, k;
+
+ d = be32_to_cpu(drv_fw->fw_ver);
+ c = be32_to_cpu(card_fw->fw_ver);
+ k = fs_fw ? be32_to_cpu(fs_fw->fw_ver) : 0;
+
+ dev_err(adap->pdev_dev, "Cannot find a usable firmware: "
+ "chip state %d, "
+ "driver compiled with %d.%d.%d.%d, "
+ "card has %d.%d.%d.%d, filesystem has %d.%d.%d.%d\n",
+ state,
+ FW_HDR_FW_VER_MAJOR_GET(d), FW_HDR_FW_VER_MINOR_GET(d),
+ FW_HDR_FW_VER_MICRO_GET(d), FW_HDR_FW_VER_BUILD_GET(d),
+ FW_HDR_FW_VER_MAJOR_GET(c), FW_HDR_FW_VER_MINOR_GET(c),
+ FW_HDR_FW_VER_MICRO_GET(c), FW_HDR_FW_VER_BUILD_GET(c),
+ FW_HDR_FW_VER_MAJOR_GET(k), FW_HDR_FW_VER_MINOR_GET(k),
+ FW_HDR_FW_VER_MICRO_GET(k), FW_HDR_FW_VER_BUILD_GET(k));
+ ret = EINVAL;
+ goto bye;
+ }
+
+ /* We're using whatever's on the card and it's known to be good. */
+ adap->params.fw_vers = be32_to_cpu(card_fw->fw_ver);
+ adap->params.tp_vers = be32_to_cpu(card_fw->tp_microcode_ver);
+
+bye:
+ return ret;
+}
+
/**
* t4_flash_erase_sectors - erase a range of flash sectors
* @adapter: the adapter
@@ -1368,7 +1433,7 @@ static void pcie_intr_handler(struct adapter *adapter)
PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
pcie_port_intr_info) +
t4_handle_intr_status(adapter, PCIE_INT_CAUSE,
- is_t4(adapter->chip) ?
+ is_t4(adapter->params.chip) ?
pcie_intr_info : t5_pcie_intr_info);
if (fat)
@@ -1782,7 +1847,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
{
u32 v, int_cause_reg;
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
int_cause_reg = PORT_REG(port, XGMAC_PORT_INT_CAUSE);
else
int_cause_reg = T5_PORT_REG(port, MAC_PORT_INT_CAUSE);
@@ -2250,7 +2315,7 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
#define GET_STAT(name) \
t4_read_reg64(adap, \
- (is_t4(adap->chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
+ (is_t4(adap->params.chip) ? PORT_REG(idx, MPS_PORT_STAT_##name##_L) : \
T5_PORT_REG(idx, MPS_PORT_STAT_##name##_L)))
#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
@@ -2332,7 +2397,7 @@ void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
{
u32 mag_id_reg_l, mag_id_reg_h, port_cfg_reg;
- if (is_t4(adap->chip)) {
+ if (is_t4(adap->params.chip)) {
mag_id_reg_l = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO);
mag_id_reg_h = PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI);
port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
@@ -2374,7 +2439,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
int i;
u32 port_cfg_reg;
- if (is_t4(adap->chip))
+ if (is_t4(adap->params.chip))
port_cfg_reg = PORT_REG(port, XGMAC_PORT_CFG2);
else
port_cfg_reg = T5_PORT_REG(port, MAC_PORT_CFG2);
@@ -2387,7 +2452,7 @@ int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
return -EINVAL;
#define EPIO_REG(name) \
- (is_t4(adap->chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
+ (is_t4(adap->params.chip) ? PORT_REG(port, XGMAC_PORT_EPIO_##name) : \
T5_PORT_REG(port, MAC_PORT_EPIO_##name))
t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
@@ -2474,7 +2539,7 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
{
int i, off;
- u32 win_pf = is_t4(adap->chip) ? 0 : V_PFNUM(adap->fn);
+ u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
/* Align on a 2KB boundary.
*/
@@ -3306,7 +3371,7 @@ int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
int i, ret;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p;
- unsigned int max_naddr = is_t4(adap->chip) ?
+ unsigned int max_naddr = is_t4(adap->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
@@ -3368,7 +3433,7 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
int ret, mode;
struct fw_vi_mac_cmd c;
struct fw_vi_mac_exact *p = c.u.exact;
- unsigned int max_mac_addr = is_t4(adap->chip) ?
+ unsigned int max_mac_addr = is_t4(adap->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
@@ -3699,13 +3764,14 @@ int t4_prep_adapter(struct adapter *adapter)
{
int ret, ver;
uint16_t device_id;
+ u32 pl_rev;
ret = t4_wait_dev_ready(adapter);
if (ret < 0)
return ret;
get_pci_mode(adapter, &adapter->params.pci);
- adapter->params.rev = t4_read_reg(adapter, PL_REV);
+ pl_rev = G_REV(t4_read_reg(adapter, PL_REV));
ret = get_flash_params(adapter);
if (ret < 0) {
@@ -3717,14 +3783,13 @@ int t4_prep_adapter(struct adapter *adapter)
*/
pci_read_config_word(adapter->pdev, PCI_DEVICE_ID, &device_id);
ver = device_id >> 12;
+ adapter->params.chip = 0;
switch (ver) {
case CHELSIO_T4:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4,
- adapter->params.rev);
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T4, pl_rev);
break;
case CHELSIO_T5:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5,
- adapter->params.rev);
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, pl_rev);
break;
default:
dev_err(adapter->pdev_dev, "Device %d is not supported\n",
@@ -3732,9 +3797,6 @@ int t4_prep_adapter(struct adapter *adapter)
return -EINVAL;
}
- /* Reassign the updated revision field */
- adapter->params.rev = adapter->chip;
-
init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
/*
@@ -3746,6 +3808,109 @@ int t4_prep_adapter(struct adapter *adapter)
return 0;
}
+/**
+ * t4_init_tp_params - initialize adap->params.tp
+ * @adap: the adapter
+ *
+ * Initialize various fields of the adapter's TP Parameters structure.
+ */
+int t4_init_tp_params(struct adapter *adap)
+{
+ int chan;
+ u32 v;
+
+ v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+ adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+ adap->params.tp.dack_re = DELAYEDACKRESOLUTION_GET(v);
+
+ /* MODQ_REQ_MAP defaults to setting queues 0-3 to chan 0-3 */
+ for (chan = 0; chan < NCHAN; chan++)
+ adap->params.tp.tx_modq[chan] = chan;
+
+ /* Cache the adapter's Compressed Filter Mode and global Incress
+ * Configuration.
+ */
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.vlan_pri_map, 1,
+ TP_VLAN_PRI_MAP);
+ t4_read_indirect(adap, TP_PIO_ADDR, TP_PIO_DATA,
+ &adap->params.tp.ingress_config, 1,
+ TP_INGRESS_CONFIG);
+
+ /* Now that we have TP_VLAN_PRI_MAP cached, we can calculate the field
+ * shift positions of several elements of the Compressed Filter Tuple
+ * for this adapter which we need frequently ...
+ */
+ adap->params.tp.vlan_shift = t4_filter_field_shift(adap, F_VLAN);
+ adap->params.tp.vnic_shift = t4_filter_field_shift(adap, F_VNIC_ID);
+ adap->params.tp.port_shift = t4_filter_field_shift(adap, F_PORT);
+ adap->params.tp.protocol_shift = t4_filter_field_shift(adap,
+ F_PROTOCOL);
+
+ /* If TP_INGRESS_CONFIG.VNID == 0, then TP_VLAN_PRI_MAP.VNIC_ID
+ * represents the presense of an Outer VLAN instead of a VNIC ID.
+ */
+ if ((adap->params.tp.ingress_config & F_VNIC) == 0)
+ adap->params.tp.vnic_shift = -1;
+
+ return 0;
+}
+
+/**
+ * t4_filter_field_shift - calculate filter field shift
+ * @adap: the adapter
+ * @filter_sel: the desired field (from TP_VLAN_PRI_MAP bits)
+ *
+ * Return the shift position of a filter field within the Compressed
+ * Filter Tuple. The filter field is specified via its selection bit
+ * within TP_VLAN_PRI_MAL (filter mode). E.g. F_VLAN.
+ */
+int t4_filter_field_shift(const struct adapter *adap, int filter_sel)
+{
+ unsigned int filter_mode = adap->params.tp.vlan_pri_map;
+ unsigned int sel;
+ int field_shift;
+
+ if ((filter_mode & filter_sel) == 0)
+ return -1;
+
+ for (sel = 1, field_shift = 0; sel < filter_sel; sel <<= 1) {
+ switch (filter_mode & sel) {
+ case F_FCOE:
+ field_shift += W_FT_FCOE;
+ break;
+ case F_PORT:
+ field_shift += W_FT_PORT;
+ break;
+ case F_VNIC_ID:
+ field_shift += W_FT_VNIC_ID;
+ break;
+ case F_VLAN:
+ field_shift += W_FT_VLAN;
+ break;
+ case F_TOS:
+ field_shift += W_FT_TOS;
+ break;
+ case F_PROTOCOL:
+ field_shift += W_FT_PROTOCOL;
+ break;
+ case F_ETHERTYPE:
+ field_shift += W_FT_ETHERTYPE;
+ break;
+ case F_MACMATCH:
+ field_shift += W_FT_MACMATCH;
+ break;
+ case F_MPSHITTYPE:
+ field_shift += W_FT_MPSHITTYPE;
+ break;
+ case F_FRAGMENTATION:
+ field_shift += W_FT_FRAGMENTATION;
+ break;
+ }
+ }
+ return field_shift;
+}
+
int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
{
u8 addr[6];
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index ef146c0b..4082522 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -1092,6 +1092,11 @@
#define PL_REV 0x1943c
+#define S_REV 0
+#define M_REV 0xfU
+#define V_REV(x) ((x) << S_REV)
+#define G_REV(x) (((x) >> S_REV) & M_REV)
+
#define LE_DB_CONFIG 0x19c04
#define HASHEN 0x00100000U
@@ -1166,10 +1171,50 @@
#define A_TP_TX_SCHED_PCMD 0x25
+#define S_VNIC 11
+#define V_VNIC(x) ((x) << S_VNIC)
+#define F_VNIC V_VNIC(1U)
+
+#define S_FRAGMENTATION 9
+#define V_FRAGMENTATION(x) ((x) << S_FRAGMENTATION)
+#define F_FRAGMENTATION V_FRAGMENTATION(1U)
+
+#define S_MPSHITTYPE 8
+#define V_MPSHITTYPE(x) ((x) << S_MPSHITTYPE)
+#define F_MPSHITTYPE V_MPSHITTYPE(1U)
+
+#define S_MACMATCH 7
+#define V_MACMATCH(x) ((x) << S_MACMATCH)
+#define F_MACMATCH V_MACMATCH(1U)
+
+#define S_ETHERTYPE 6
+#define V_ETHERTYPE(x) ((x) << S_ETHERTYPE)
+#define F_ETHERTYPE V_ETHERTYPE(1U)
+
+#define S_PROTOCOL 5
+#define V_PROTOCOL(x) ((x) << S_PROTOCOL)
+#define F_PROTOCOL V_PROTOCOL(1U)
+
+#define S_TOS 4
+#define V_TOS(x) ((x) << S_TOS)
+#define F_TOS V_TOS(1U)
+
+#define S_VLAN 3
+#define V_VLAN(x) ((x) << S_VLAN)
+#define F_VLAN V_VLAN(1U)
+
+#define S_VNIC_ID 2
+#define V_VNIC_ID(x) ((x) << S_VNIC_ID)
+#define F_VNIC_ID V_VNIC_ID(1U)
+
#define S_PORT 1
#define V_PORT(x) ((x) << S_PORT)
#define F_PORT V_PORT(1U)
+#define S_FCOE 0
+#define V_FCOE(x) ((x) << S_FCOE)
+#define F_FCOE V_FCOE(1U)
+
#define NUM_MPS_CLS_SRAM_L_INSTANCES 336
#define NUM_MPS_T5_CLS_SRAM_L_INSTANCES 512
@@ -1199,4 +1244,46 @@
#define EDC_STRIDE_T5 (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR)
#define EDC_REG_T5(reg, idx) (reg + EDC_STRIDE_T5 * idx)
+#define A_PL_VF_REV 0x4
+#define A_PL_VF_WHOAMI 0x0
+#define A_PL_VF_REVISION 0x8
+
+#define S_CHIPID 4
+#define M_CHIPID 0xfU
+#define V_CHIPID(x) ((x) << S_CHIPID)
+#define G_CHIPID(x) (((x) >> S_CHIPID) & M_CHIPID)
+
+/* TP_VLAN_PRI_MAP controls which subset of fields will be present in the
+ * Compressed Filter Tuple for LE filters. Each bit set in TP_VLAN_PRI_MAP
+ * selects for a particular field being present. These fields, when present
+ * in the Compressed Filter Tuple, have the following widths in bits.
+ */
+#define W_FT_FCOE 1
+#define W_FT_PORT 3
+#define W_FT_VNIC_ID 17
+#define W_FT_VLAN 17
+#define W_FT_TOS 8
+#define W_FT_PROTOCOL 8
+#define W_FT_ETHERTYPE 16
+#define W_FT_MACMATCH 9
+#define W_FT_MPSHITTYPE 3
+#define W_FT_FRAGMENTATION 1
+
+/* Some of the Compressed Filter Tuple fields have internal structure. These
+ * bit shifts/masks describe those structures. All shifts are relative to the
+ * base position of the fields within the Compressed Filter Tuple
+ */
+#define S_FT_VLAN_VLD 16
+#define V_FT_VLAN_VLD(x) ((x) << S_FT_VLAN_VLD)
+#define F_FT_VLAN_VLD V_FT_VLAN_VLD(1U)
+
+#define S_FT_VNID_ID_VF 0
+#define V_FT_VNID_ID_VF(x) ((x) << S_FT_VNID_ID_VF)
+
+#define S_FT_VNID_ID_PF 7
+#define V_FT_VNID_ID_PF(x) ((x) << S_FT_VNID_ID_PF)
+
+#define S_FT_VNID_ID_VLD 16
+#define V_FT_VNID_ID_VLD(x) ((x) << S_FT_VNID_ID_VLD)
+
#endif /* __T4_REGS_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
index 6f77ac4..74fea74 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
@@ -2157,7 +2157,7 @@ struct fw_debug_cmd {
struct fw_hdr {
u8 ver;
- u8 reserved1;
+ u8 chip; /* terminator chip type */
__be16 len512; /* bin length in units of 512-bytes */
__be32 fw_ver; /* firmware version */
__be32 tp_microcode_ver;
@@ -2176,6 +2176,11 @@ struct fw_hdr {
__be32 reserved6[23];
};
+enum fw_hdr_chip {
+ FW_HDR_CHIP_T4,
+ FW_HDR_CHIP_T5
+};
+
#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index be5c7ef..68eaa9c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -344,7 +344,6 @@ struct adapter {
unsigned long registered_device_map;
unsigned long open_device_map;
unsigned long flags;
- enum chip_type chip;
struct adapter_params params;
/* queue and interrupt resources */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index 5f90ec5..0899c09 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -1064,7 +1064,7 @@ static inline unsigned int mk_adap_vers(const struct adapter *adapter)
/*
* Chip version 4, revision 0x3f (cxgb4vf).
*/
- return CHELSIO_CHIP_VERSION(adapter->chip) | (0x3f << 10);
+ return CHELSIO_CHIP_VERSION(adapter->params.chip) | (0x3f << 10);
}
/*
@@ -1551,9 +1551,13 @@ static void cxgb4vf_get_regs(struct net_device *dev,
reg_block_dump(adapter, regbuf,
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_FIRST,
T4VF_MPS_BASE_ADDR + T4VF_MOD_MAP_MPS_LAST);
+
+ /* T5 adds new registers in the PL Register map.
+ */
reg_block_dump(adapter, regbuf,
T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_FIRST,
- T4VF_PL_BASE_ADDR + T4VF_MOD_MAP_PL_LAST);
+ T4VF_PL_BASE_ADDR + (is_t4(adapter->params.chip)
+ ? A_PL_VF_WHOAMI : A_PL_VF_REVISION));
reg_block_dump(adapter, regbuf,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_FIRST,
T4VF_CIM_BASE_ADDR + T4VF_MOD_MAP_CIM_LAST);
@@ -2087,6 +2091,7 @@ static int adap_init0(struct adapter *adapter)
unsigned int ethqsets;
int err;
u32 param, val = 0;
+ unsigned int chipid;
/*
* Wait for the device to become ready before proceeding ...
@@ -2114,12 +2119,14 @@ static int adap_init0(struct adapter *adapter)
return err;
}
+ adapter->params.chip = 0;
switch (adapter->pdev->device >> 12) {
case CHELSIO_T4:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
+ adapter->params.chip = CHELSIO_CHIP_CODE(CHELSIO_T4, 0);
break;
case CHELSIO_T5:
- adapter->chip = CHELSIO_CHIP_CODE(CHELSIO_T5, 0);
+ chipid = G_REV(t4_read_reg(adapter, A_PL_VF_REV));
+ adapter->params.chip |= CHELSIO_CHIP_CODE(CHELSIO_T5, chipid);
break;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 8475c4c..0a89963 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -537,7 +537,7 @@ static inline void ring_fl_db(struct adapter *adapter, struct sge_fl *fl)
*/
if (fl->pend_cred >= FL_PER_EQ_UNIT) {
val = PIDX(fl->pend_cred / FL_PER_EQ_UNIT);
- if (!is_t4(adapter->chip))
+ if (!is_t4(adapter->params.chip))
val |= DBTYPE(1);
wmb();
t4_write_reg(adapter, T4VF_SGE_BASE_ADDR + SGE_VF_KDOORBELL,
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 53cbfed..6136245 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -39,21 +39,28 @@
#include "../cxgb4/t4fw_api.h"
#define CHELSIO_CHIP_CODE(version, revision) (((version) << 4) | (revision))
-#define CHELSIO_CHIP_VERSION(code) ((code) >> 4)
+#define CHELSIO_CHIP_VERSION(code) (((code) >> 4) & 0xf)
#define CHELSIO_CHIP_RELEASE(code) ((code) & 0xf)
+/* All T4 and later chips have their PCI-E Device IDs encoded as 0xVFPP where:
+ *
+ * V = "4" for T4; "5" for T5, etc. or
+ * = "a" for T4 FPGA; "b" for T4 FPGA, etc.
+ * F = "0" for PF 0..3; "4".."7" for PF4..7; and "8" for VFs
+ * PP = adapter product designation
+ */
#define CHELSIO_T4 0x4
#define CHELSIO_T5 0x5
enum chip_type {
- T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 0),
- T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
- T4_A3 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
+ T4_A1 = CHELSIO_CHIP_CODE(CHELSIO_T4, 1),
+ T4_A2 = CHELSIO_CHIP_CODE(CHELSIO_T4, 2),
T4_FIRST_REV = T4_A1,
- T4_LAST_REV = T4_A3,
+ T4_LAST_REV = T4_A2,
- T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
- T5_FIRST_REV = T5_A1,
+ T5_A0 = CHELSIO_CHIP_CODE(CHELSIO_T5, 0),
+ T5_A1 = CHELSIO_CHIP_CODE(CHELSIO_T5, 1),
+ T5_FIRST_REV = T5_A0,
T5_LAST_REV = T5_A1,
};
@@ -203,6 +210,7 @@ struct adapter_params {
struct vpd_params vpd; /* Vital Product Data */
struct rss_params rss; /* Receive Side Scaling */
struct vf_resources vfres; /* Virtual Function Resource limits */
+ enum chip_type chip; /* chip code */
u8 nports; /* # of Ethernet "ports" */
};
@@ -253,7 +261,7 @@ static inline int t4vf_wr_mbox_ns(struct adapter *adapter, const void *cmd,
static inline int is_t4(enum chip_type chip)
{
- return (chip >= T4_FIRST_REV && chip <= T4_LAST_REV);
+ return CHELSIO_CHIP_VERSION(chip) == CHELSIO_T4;
}
int t4vf_wait_dev_ready(struct adapter *);
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index 9f96dc3..d958c44 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -1027,7 +1027,7 @@ int t4vf_alloc_mac_filt(struct adapter *adapter, unsigned int viid, bool free,
unsigned nfilters = 0;
unsigned int rem = naddr;
struct fw_vi_mac_cmd cmd, rpl;
- unsigned int max_naddr = is_t4(adapter->chip) ?
+ unsigned int max_naddr = is_t4(adapter->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
@@ -1121,7 +1121,7 @@ int t4vf_change_mac(struct adapter *adapter, unsigned int viid,
struct fw_vi_mac_exact *p = &cmd.u.exact[0];
size_t len16 = DIV_ROUND_UP(offsetof(struct fw_vi_mac_cmd,
u.exact[1]), 16);
- unsigned int max_naddr = is_t4(adapter->chip) ?
+ unsigned int max_naddr = is_t4(adapter->params.chip) ?
NUM_MPS_CLS_SRAM_L_INSTANCES :
NUM_MPS_T5_CLS_SRAM_L_INSTANCES;
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 5878df6..4ccaf9a 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -104,6 +104,7 @@ static inline char *nic_name(struct pci_dev *pdev)
#define BE3_MAX_RSS_QS 16
#define BE3_MAX_TX_QS 16
#define BE3_MAX_EVT_QS 16
+#define BE3_SRIOV_MAX_EVT_QS 8
#define MAX_RX_QS 32
#define MAX_EVT_QS 32
@@ -480,7 +481,7 @@ struct be_adapter {
struct list_head entry;
u32 flash_status;
- struct completion flash_compl;
+ struct completion et_cmd_compl;
struct be_resources res; /* resources available for the func */
u16 num_vfs; /* Number of VFs provisioned by PF */
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index e0e8bc1..94c35c8 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -141,11 +141,17 @@ static int be_mcc_compl_process(struct be_adapter *adapter,
subsystem = resp_hdr->subsystem;
}
+ if (opcode == OPCODE_LOWLEVEL_LOOPBACK_TEST &&
+ subsystem == CMD_SUBSYSTEM_LOWLEVEL) {
+ complete(&adapter->et_cmd_compl);
+ return 0;
+ }
+
if (((opcode == OPCODE_COMMON_WRITE_FLASHROM) ||
(opcode == OPCODE_COMMON_WRITE_OBJECT)) &&
(subsystem == CMD_SUBSYSTEM_COMMON)) {
adapter->flash_status = compl_status;
- complete(&adapter->flash_compl);
+ complete(&adapter->et_cmd_compl);
}
if (compl_status == MCC_STATUS_SUCCESS) {
@@ -2017,6 +2023,9 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
0x3ea83c02, 0x4a110304};
int status;
+ if (!(be_if_cap_flags(adapter) & BE_IF_FLAGS_RSS))
+ return 0;
+
if (mutex_lock_interruptible(&adapter->mbox_lock))
return -1;
@@ -2160,7 +2169,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
msecs_to_jiffies(60000)))
status = -1;
else
@@ -2255,8 +2264,8 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
be_mcc_notify(adapter);
spin_unlock_bh(&adapter->mcc_lock);
- if (!wait_for_completion_timeout(&adapter->flash_compl,
- msecs_to_jiffies(40000)))
+ if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
+ msecs_to_jiffies(40000)))
status = -1;
else
status = adapter->flash_status;
@@ -2367,6 +2376,7 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
{
struct be_mcc_wrb *wrb;
struct be_cmd_req_loopback_test *req;
+ struct be_cmd_resp_loopback_test *resp;
int status;
spin_lock_bh(&adapter->mcc_lock);
@@ -2381,8 +2391,8 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL);
- req->hdr.timeout = cpu_to_le32(4);
+ req->hdr.timeout = cpu_to_le32(15);
req->pattern = cpu_to_le64(pattern);
req->src_port = cpu_to_le32(port_num);
req->dest_port = cpu_to_le32(port_num);
@@ -2390,12 +2400,15 @@ int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
req->num_pkts = cpu_to_le32(num_pkts);
req->loopback_type = cpu_to_le32(loopback_type);
- status = be_mcc_notify_wait(adapter);
- if (!status) {
- struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
- status = le32_to_cpu(resp->status);
- }
+ be_mcc_notify(adapter);
+
+ spin_unlock_bh(&adapter->mcc_lock);
+ wait_for_completion(&adapter->et_cmd_compl);
+ resp = embedded_payload(wrb);
+ status = le32_to_cpu(resp->status);
+
+ return status;
err:
spin_unlock_bh(&adapter->mcc_lock);
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_hw.h b/drivers/net/ethernet/emulex/benet/be_hw.h
index 3e21621..dc88782 100644
--- a/drivers/net/ethernet/emulex/benet/be_hw.h
+++ b/drivers/net/ethernet/emulex/benet/be_hw.h
@@ -64,6 +64,9 @@
#define SLIPORT_ERROR_NO_RESOURCE1 0x2
#define SLIPORT_ERROR_NO_RESOURCE2 0x9
+#define SLIPORT_ERROR_FW_RESET1 0x2
+#define SLIPORT_ERROR_FW_RESET2 0x0
+
/********* Memory BAR register ************/
#define PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET 0xfc
/* Host Interrupt Enable, if set interrupts are enabled although "PCI Interrupt
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index fee64bf..bf40fda 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2464,8 +2464,16 @@ void be_detect_error(struct be_adapter *adapter)
*/
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
adapter->hw_error = true;
- dev_err(&adapter->pdev->dev,
- "Error detected in the card\n");
+ /* Do not log error messages if its a FW reset */
+ if (sliport_err1 == SLIPORT_ERROR_FW_RESET1 &&
+ sliport_err2 == SLIPORT_ERROR_FW_RESET2) {
+ dev_info(&adapter->pdev->dev,
+ "Firmware update in progress\n");
+ return;
+ } else {
+ dev_err(&adapter->pdev->dev,
+ "Error detected in the card\n");
+ }
}
if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
@@ -2736,13 +2744,16 @@ static int be_rx_qs_create(struct be_adapter *adapter)
if (!BEx_chip(adapter))
adapter->rss_flags |= RSS_ENABLE_UDP_IPV4 |
RSS_ENABLE_UDP_IPV6;
+ } else {
+ /* Disable RSS, if only default RX Q is created */
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ }
- rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
- 128);
- if (rc) {
- adapter->rss_flags = 0;
- return rc;
- }
+ rc = be_cmd_rss_config(adapter, rsstable, adapter->rss_flags,
+ 128);
+ if (rc) {
+ adapter->rss_flags = RSS_ENABLE_NONE;
+ return rc;
}
/* First time posting */
@@ -2932,28 +2943,35 @@ static void be_cancel_worker(struct be_adapter *adapter)
}
}
-static int be_clear(struct be_adapter *adapter)
+static void be_mac_clear(struct be_adapter *adapter)
{
int i;
+ if (adapter->pmac_id) {
+ for (i = 0; i < (adapter->uc_macs + 1); i++)
+ be_cmd_pmac_del(adapter, adapter->if_handle,
+ adapter->pmac_id[i], 0);
+ adapter->uc_macs = 0;
+
+ kfree(adapter->pmac_id);
+ adapter->pmac_id = NULL;
+ }
+}
+
+static int be_clear(struct be_adapter *adapter)
+{
be_cancel_worker(adapter);
if (sriov_enabled(adapter))
be_vf_clear(adapter);
/* delete the primary mac along with the uc-mac list */
- for (i = 0; i < (adapter->uc_macs + 1); i++)
- be_cmd_pmac_del(adapter, adapter->if_handle,
- adapter->pmac_id[i], 0);
- adapter->uc_macs = 0;
+ be_mac_clear(adapter);
be_cmd_if_destroy(adapter, adapter->if_handle, 0);
be_clear_queues(adapter);
- kfree(adapter->pmac_id);
- adapter->pmac_id = NULL;
-
be_msix_disable(adapter);
return 0;
}
@@ -3109,11 +3127,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
{
struct pci_dev *pdev = adapter->pdev;
bool use_sriov = false;
+ int max_vfs;
- if (BE3_chip(adapter) && sriov_want(adapter)) {
- int max_vfs;
+ max_vfs = pci_sriov_get_totalvfs(pdev);
- max_vfs = pci_sriov_get_totalvfs(pdev);
+ if (BE3_chip(adapter) && sriov_want(adapter)) {
res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
use_sriov = res->max_vfs;
}
@@ -3144,7 +3162,11 @@ static void BEx_get_resources(struct be_adapter *adapter,
BE3_MAX_RSS_QS : BE2_MAX_RSS_QS;
res->max_rx_qs = res->max_rss_qs + 1;
- res->max_evt_qs = be_physfn(adapter) ? BE3_MAX_EVT_QS : 1;
+ if (be_physfn(adapter))
+ res->max_evt_qs = (max_vfs > 0) ?
+ BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
+ else
+ res->max_evt_qs = 1;
res->if_cap_flags = BE_IF_CAP_FLAGS_WANT;
if (!(adapter->function_caps & BE_FUNCTION_CAPS_RSS))
@@ -3812,6 +3834,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
}
if (change_status == LANCER_FW_RESET_NEEDED) {
+ dev_info(&adapter->pdev->dev,
+ "Resetting adapter to activate new FW\n");
status = lancer_physdev_ctrl(adapter,
PHYSDEV_CONTROL_FW_RESET_MASK);
if (status) {
@@ -4188,7 +4212,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
spin_lock_init(&adapter->mcc_lock);
spin_lock_init(&adapter->mcc_cq_lock);
- init_completion(&adapter->flash_compl);
+ init_completion(&adapter->et_cmd_compl);
pci_save_state(adapter->pdev);
return 0;
@@ -4363,13 +4387,13 @@ static int lancer_recover_func(struct be_adapter *adapter)
goto err;
}
- dev_err(dev, "Error recovery successful\n");
+ dev_err(dev, "Adapter recovery successful\n");
return 0;
err:
if (status == -EAGAIN)
dev_err(dev, "Waiting for resource provisioning\n");
else
- dev_err(dev, "Error recovery failed\n");
+ dev_err(dev, "Adapter recovery failed\n");
return status;
}
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 4cbebf3..50bb71c 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -98,10 +98,6 @@ static void set_multicast_list(struct net_device *ndev);
* detected as not set during a prior frame transmission, then the
* ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
* were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
- * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
- * detected as not set during a prior frame transmission, then the
- * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
- * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
* frames not being transmitted until there is a 0-to-1 transition on
* ENET_TDAR[TDAR].
*/
@@ -385,7 +381,7 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
* data.
*/
bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
- FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
+ skb->len, DMA_TO_DEVICE);
if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
bdp->cbd_bufaddr = 0;
fep->tx_skbuff[index] = NULL;
@@ -432,6 +428,8 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* If this was the last BD in the ring, start at the beginning again. */
bdp = fec_enet_get_nextdesc(bdp, fep);
+ skb_tx_timestamp(skb);
+
fep->cur_tx = bdp;
if (fep->cur_tx == fep->dirty_tx)
@@ -440,8 +438,6 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
/* Trigger transmission start */
writel(0, fep->hwp + FEC_X_DES_ACTIVE);
- skb_tx_timestamp(skb);
-
return NETDEV_TX_OK;
}
@@ -779,11 +775,10 @@ fec_enet_tx(struct net_device *ndev)
else
index = bdp - fep->tx_bd_base;
- dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
- FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
- bdp->cbd_bufaddr = 0;
-
skb = fep->tx_skbuff[index];
+ dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, skb->len,
+ DMA_TO_DEVICE);
+ bdp->cbd_bufaddr = 0;
/* Check for errors. */
if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 2d1c6bd..7628e0f 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3033,7 +3033,7 @@ static struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
dev->hw_features = NETIF_F_SG | NETIF_F_TSO |
NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_CTAG_TX;
- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO |
+ dev->features = NETIF_F_SG | NETIF_F_TSO |
NETIF_F_HIGHDMA | NETIF_F_IP_CSUM |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM;
diff --git a/drivers/net/ethernet/intel/e1000e/80003es2lan.c b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
index 895450e..ff2d806 100644
--- a/drivers/net/ethernet/intel/e1000e/80003es2lan.c
+++ b/drivers/net/ethernet/intel/e1000e/80003es2lan.c
@@ -718,8 +718,11 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
e1000_release_phy_80003es2lan(hw);
/* Disable IBIST slave mode (far-end loopback) */
- e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
- &kum_reg_data);
+ ret_val =
+ e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+ &kum_reg_data);
+ if (ret_val)
+ return ret_val;
kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
kum_reg_data);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 8d3945a..c30d41d 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6174,7 +6174,7 @@ static int __e1000_resume(struct pci_dev *pdev)
return 0;
}
-#ifdef CONFIG_PM_SLEEP
+#ifdef CONFIG_PM
static int e1000_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
@@ -6193,7 +6193,7 @@ static int e1000_resume(struct device *dev)
return __e1000_resume(pdev);
}
-#endif /* CONFIG_PM_SLEEP */
+#endif /* CONFIG_PM */
#ifdef CONFIG_PM_RUNTIME
static int e1000_runtime_suspend(struct device *dev)
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index da2be59..20e71f4 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -1757,19 +1757,23 @@ s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
* it across the board.
*/
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
- if (ret_val)
+ if (ret_val) {
/* If the first read fails, another entity may have
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ msleep(usec_interval / 1000);
+ else
+ udelay(usec_interval);
+ }
ret_val = e1e_rphy(hw, MII_BMSR, &phy_status);
if (ret_val)
break;
if (phy_status & BMSR_LSTATUS)
break;
if (usec_interval >= 1000)
- mdelay(usec_interval / 1000);
+ msleep(usec_interval / 1000);
else
udelay(usec_interval);
}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index be15938..12b0932 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -354,6 +354,9 @@ static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
int i;
+ if (!vsi->tx_rings)
+ return stats;
+
rcu_read_lock();
for (i = 0; i < vsi->num_queue_pairs; i++) {
struct i40e_ring *tx_ring, *rx_ring;
diff --git a/drivers/net/ethernet/intel/igb/e1000_phy.c b/drivers/net/ethernet/intel/igb/e1000_phy.c
index c4c4fe3..ad2b74d 100644
--- a/drivers/net/ethernet/intel/igb/e1000_phy.c
+++ b/drivers/net/ethernet/intel/igb/e1000_phy.c
@@ -1728,7 +1728,10 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
* ownership of the resources, wait and try again to
* see if they have relinquished the resources yet.
*/
- udelay(usec_interval);
+ if (usec_interval >= 1000)
+ mdelay(usec_interval/1000);
+ else
+ udelay(usec_interval);
}
ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
if (ret_val)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
index d6f0c0d..72084f7 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
@@ -291,7 +291,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
{
struct ixgbe_adapter *adapter = pci_get_drvdata(dev);
int err;
+#ifdef CONFIG_PCI_IOV
u32 current_flags = adapter->flags;
+#endif
err = ixgbe_disable_sriov(adapter);
diff --git a/drivers/net/ethernet/marvell/mvmdio.c b/drivers/net/ethernet/marvell/mvmdio.c
index 7354960..c4eeb69 100644
--- a/drivers/net/ethernet/marvell/mvmdio.c
+++ b/drivers/net/ethernet/marvell/mvmdio.c
@@ -92,6 +92,12 @@ static int orion_mdio_wait_ready(struct mii_bus *bus)
if (time_is_before_jiffies(end))
++timedout;
} else {
+ /* wait_event_timeout does not guarantee a delay of at
+ * least one whole jiffie, so timeout must be no less
+ * than two.
+ */
+ if (timeout < 2)
+ timeout = 2;
wait_event_timeout(dev->smi_busy_wait,
orion_mdio_smi_is_done(dev),
timeout);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index b8e232b..d5f0d72 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1378,7 +1378,7 @@ static void mvneta_rxq_drop_pkts(struct mvneta_port *pp,
dev_kfree_skb_any(skb);
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- rx_desc->data_size, DMA_FROM_DEVICE);
+ MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
}
if (rx_done)
@@ -1424,7 +1424,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
}
dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr,
- rx_desc->data_size, DMA_FROM_DEVICE);
+ MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE);
rx_bytes = rx_desc->data_size -
(ETH_FCS_LEN + MVNETA_MH_SIZE);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5789ea2..01fc651 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2635,6 +2635,8 @@ static int __init mlx4_init(void)
return -ENOMEM;
ret = pci_register_driver(&mlx4_driver);
+ if (ret < 0)
+ destroy_workqueue(mlx4_wq);
return ret < 0 ? ret : 0;
}
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index 2d045be..1e8b951 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -5150,8 +5150,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
{
struct fe_priv *np = netdev_priv(dev);
u8 __iomem *base = get_hwbase(dev);
- int result;
- memset(buffer, 0, nv_get_sset_count(dev, ETH_SS_TEST)*sizeof(u64));
+ int result, count;
+
+ count = nv_get_sset_count(dev, ETH_SS_TEST);
+ memset(buffer, 0, count * sizeof(u64));
if (!nv_link_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
@@ -5195,7 +5197,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
return;
}
- if (!nv_loopback_test(dev)) {
+ if (count > NV_TEST_COUNT_BASE && !nv_loopback_test(dev)) {
test->flags |= ETH_TEST_FL_FAILED;
buffer[3] = 1;
}
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 7692dfd..cc68657 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -1604,13 +1604,13 @@ netxen_process_lro(struct netxen_adapter *adapter,
u32 seq_number;
u8 vhdr_len = 0;
- if (unlikely(ring > adapter->max_rds_rings))
+ if (unlikely(ring >= adapter->max_rds_rings))
return NULL;
rds_ring = &recv_ctx->rds_rings[ring];
index = netxen_get_lro_sts_refhandle(sts_data0);
- if (unlikely(index > rds_ring->num_desc))
+ if (unlikely(index >= rds_ring->num_desc))
return NULL;
buffer = &rds_ring->rx_buf_arr[index];
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 631ea0a..ff80cd8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -487,6 +487,7 @@ struct qlcnic_hardware_context {
struct qlcnic_mailbox *mailbox;
u8 extend_lb_time;
u8 phys_port_id[ETH_ALEN];
+ u8 lb_mode;
};
struct qlcnic_adapter_stats {
@@ -578,6 +579,8 @@ struct qlcnic_host_tx_ring {
dma_addr_t phys_addr;
dma_addr_t hw_cons_phys_addr;
struct netdev_queue *txq;
+ /* Lock to protect Tx descriptors cleanup */
+ spinlock_t tx_clean_lock;
} ____cacheline_internodealigned_in_smp;
/*
@@ -808,6 +811,7 @@ struct qlcnic_mac_list_s {
#define QLCNIC_ILB_MODE 0x1
#define QLCNIC_ELB_MODE 0x2
+#define QLCNIC_LB_MODE_MASK 0x3
#define QLCNIC_LINKEVENT 0x1
#define QLCNIC_LB_RESPONSE 0x2
@@ -1093,7 +1097,6 @@ struct qlcnic_adapter {
struct qlcnic_filter_hash rx_fhash;
struct list_head vf_mc_list;
- spinlock_t tx_clean_lock;
spinlock_t mac_learn_lock;
/* spinlock for catching rcv filters for eswitch traffic */
spinlock_t rx_mac_learn_lock;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index b1cb0ff..f776f99 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -447,8 +447,9 @@ irqreturn_t qlcnic_83xx_intr(int irq, void *data)
qlcnic_83xx_poll_process_aen(adapter);
- if (ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
- ahw->diag_cnt++;
+ if (ahw->diag_test) {
+ if (ahw->diag_test == QLCNIC_INTERRUPT_TEST)
+ ahw->diag_cnt++;
qlcnic_83xx_enable_legacy_msix_mbx_intr(adapter);
return IRQ_HANDLED;
}
@@ -1345,11 +1346,6 @@ static int qlcnic_83xx_diag_alloc_res(struct net_device *netdev, int test,
}
if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- /* disable and free mailbox interrupt */
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- qlcnic_83xx_enable_mbx_poll(adapter);
- qlcnic_83xx_free_mbx_intr(adapter);
- }
adapter->ahw->loopback_state = 0;
adapter->ahw->hw_ops->setup_link_event(adapter, 1);
}
@@ -1363,33 +1359,20 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
{
struct qlcnic_adapter *adapter = netdev_priv(netdev);
struct qlcnic_host_sds_ring *sds_ring;
- int ring, err;
+ int ring;
clear_bit(__QLCNIC_DEV_UP, &adapter->state);
if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST) {
for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
sds_ring = &adapter->recv_ctx->sds_rings[ring];
- qlcnic_83xx_disable_intr(adapter, sds_ring);
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_enable_mbx_poll(adapter);
+ if (adapter->flags & QLCNIC_MSIX_ENABLED)
+ qlcnic_83xx_disable_intr(adapter, sds_ring);
}
}
qlcnic_fw_destroy_ctx(adapter);
qlcnic_detach(adapter);
- if (adapter->ahw->diag_test == QLCNIC_LOOPBACK_TEST) {
- if (!(adapter->flags & QLCNIC_MSIX_ENABLED)) {
- err = qlcnic_83xx_setup_mbx_intr(adapter);
- qlcnic_83xx_disable_mbx_poll(adapter);
- if (err) {
- dev_err(&adapter->pdev->dev,
- "%s: failed to setup mbx interrupt\n",
- __func__);
- goto out;
- }
- }
- }
adapter->ahw->diag_test = 0;
adapter->drv_sds_rings = drv_sds_rings;
@@ -1399,9 +1382,6 @@ static void qlcnic_83xx_diag_free_res(struct net_device *netdev,
if (netif_running(netdev))
__qlcnic_up(adapter, netdev);
- if (adapter->ahw->diag_test == QLCNIC_INTERRUPT_TEST &&
- !(adapter->flags & QLCNIC_MSIX_ENABLED))
- qlcnic_83xx_disable_mbx_poll(adapter);
out:
netif_device_attach(netdev);
}
@@ -1704,12 +1684,6 @@ int qlcnic_83xx_loopback_test(struct net_device *netdev, u8 mode)
}
} while ((adapter->ahw->linkup && ahw->has_link_events) != 1);
- /* Make sure carrier is off and queue is stopped during loopback */
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
-
ret = qlcnic_do_lb_test(adapter, mode);
qlcnic_83xx_clear_lb_mode(adapter, mode);
@@ -2141,6 +2115,7 @@ static void qlcnic_83xx_handle_link_aen(struct qlcnic_adapter *adapter,
ahw->link_autoneg = MSB(MSW(data[3]));
ahw->module_type = MSB(LSW(data[3]));
ahw->has_link_events = 1;
+ ahw->lb_mode = data[4] & QLCNIC_LB_MODE_MASK;
qlcnic_advert_link_change(adapter, link_status);
}
@@ -3754,6 +3729,19 @@ static void qlcnic_83xx_decode_mbx_rsp(struct qlcnic_adapter *adapter,
return;
}
+static inline void qlcnic_dump_mailbox_registers(struct qlcnic_adapter *adapter)
+{
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 offset;
+
+ offset = QLCRDX(ahw, QLCNIC_DEF_INT_MASK);
+ dev_info(&adapter->pdev->dev, "Mbx interrupt mask=0x%x, Mbx interrupt enable=0x%x, Host mbx control=0x%x, Fw mbx control=0x%x",
+ readl(ahw->pci_base0 + offset),
+ QLCRDX(ahw, QLCNIC_MBX_INTR_ENBL),
+ QLCRDX(ahw, QLCNIC_HOST_MBX_CTRL),
+ QLCRDX(ahw, QLCNIC_FW_MBX_CTRL));
+}
+
static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
{
struct qlcnic_mailbox *mbx = container_of(work, struct qlcnic_mailbox,
@@ -3798,6 +3786,8 @@ static void qlcnic_83xx_mailbox_worker(struct work_struct *work)
__func__, cmd->cmd_op, cmd->type, ahw->pci_func,
ahw->op_mode);
clear_bit(QLC_83XX_MBX_READY, &mbx->status);
+ qlcnic_dump_mailbox_registers(adapter);
+ qlcnic_83xx_get_mbx_data(adapter, cmd);
qlcnic_dump_mbx(adapter, cmd);
qlcnic_83xx_idc_request_reset(adapter,
QLCNIC_FORCE_FW_DUMP_KEY);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 4cae6ca..a6a3350 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -662,4 +662,5 @@ pci_ers_result_t qlcnic_83xx_io_error_detected(struct pci_dev *,
pci_channel_state_t);
pci_ers_result_t qlcnic_83xx_io_slot_reset(struct pci_dev *);
void qlcnic_83xx_io_resume(struct pci_dev *);
+void qlcnic_83xx_stop_hw(struct qlcnic_adapter *);
#endif
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 89208e5..918e18d 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -740,6 +740,7 @@ static int qlcnic_83xx_idc_unknown_state(struct qlcnic_adapter *adapter)
adapter->ahw->idc.err_code = -EIO;
dev_err(&adapter->pdev->dev,
"%s: Device in unknown state\n", __func__);
+ clear_bit(__QLCNIC_RESETTING, &adapter->state);
return 0;
}
@@ -818,7 +819,6 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_mailbox *mbx = ahw->mailbox;
int ret = 0;
- u32 owner;
u32 val;
/* Perform NIC configuration based ready state entry actions */
@@ -848,9 +848,9 @@ static int qlcnic_83xx_idc_ready_state(struct qlcnic_adapter *adapter)
set_bit(__QLCNIC_RESETTING, &adapter->state);
qlcnic_83xx_idc_enter_need_reset_state(adapter, 1);
} else {
- owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
- if (ahw->pci_func == owner)
- qlcnic_dump_fw(adapter);
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
}
return -EIO;
}
@@ -948,13 +948,26 @@ static int qlcnic_83xx_idc_need_quiesce_state(struct qlcnic_adapter *adapter)
return 0;
}
-static int qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
+static void qlcnic_83xx_idc_failed_state(struct qlcnic_adapter *adapter)
{
- dev_err(&adapter->pdev->dev, "%s: please restart!!\n", __func__);
+ struct qlcnic_hardware_context *ahw = adapter->ahw;
+ u32 val, owner;
+
+ val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ owner = qlcnic_83xx_idc_find_reset_owner_id(adapter);
+ if (ahw->pci_func == owner) {
+ qlcnic_83xx_stop_hw(adapter);
+ qlcnic_dump_fw(adapter);
+ }
+ }
+
+ netdev_warn(adapter->netdev, "%s: Reboot will be required to recover the adapter!!\n",
+ __func__);
clear_bit(__QLCNIC_RESETTING, &adapter->state);
- adapter->ahw->idc.err_code = -EIO;
+ ahw->idc.err_code = -EIO;
- return 0;
+ return;
}
static int qlcnic_83xx_idc_quiesce_state(struct qlcnic_adapter *adapter)
@@ -1063,12 +1076,6 @@ void qlcnic_83xx_idc_poll_dev_state(struct work_struct *work)
adapter->ahw->idc.prev_state = adapter->ahw->idc.curr_state;
qlcnic_83xx_periodic_tasks(adapter);
- /* Do not reschedule if firmaware is in hanged state and auto
- * recovery is disabled
- */
- if ((adapter->flags & QLCNIC_FW_HANG) && !qlcnic_auto_fw_reset)
- return;
-
/* Re-schedule the function */
if (test_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status))
qlcnic_schedule_work(adapter, qlcnic_83xx_idc_poll_dev_state,
@@ -1219,10 +1226,10 @@ void qlcnic_83xx_idc_request_reset(struct qlcnic_adapter *adapter, u32 key)
}
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
- if ((val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) ||
- !qlcnic_auto_fw_reset) {
- dev_err(&adapter->pdev->dev,
- "%s:failed, device in non reset mode\n", __func__);
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 0);
qlcnic_83xx_unlock_driver(adapter);
return;
}
@@ -1254,24 +1261,24 @@ static int qlcnic_83xx_copy_bootloader(struct qlcnic_adapter *adapter)
if (size & 0xF)
size = (size + 16) & ~0xF;
- p_cache = kzalloc(size, GFP_KERNEL);
+ p_cache = vzalloc(size);
if (p_cache == NULL)
return -ENOMEM;
ret = qlcnic_83xx_lockless_flash_read32(adapter, src, p_cache,
size / sizeof(u32));
if (ret) {
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
/* 16 byte write to MS memory */
ret = qlcnic_83xx_ms_mem_write128(adapter, dest, (u32 *)p_cache,
size / 16);
if (ret) {
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
- kfree(p_cache);
+ vfree(p_cache);
return ret;
}
@@ -1939,7 +1946,7 @@ static void qlcnic_83xx_exec_template_cmd(struct qlcnic_adapter *p_dev,
p_dev->ahw->reset.seq_index = index;
}
-static void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
+void qlcnic_83xx_stop_hw(struct qlcnic_adapter *p_dev)
{
p_dev->ahw->reset.seq_index = 0;
@@ -1994,6 +2001,14 @@ static int qlcnic_83xx_restart_hw(struct qlcnic_adapter *adapter)
val = QLCRDX(adapter->ahw, QLC_83XX_IDC_CTRL);
if (!(val & QLC_83XX_IDC_GRACEFULL_RESET))
qlcnic_dump_fw(adapter);
+
+ if (val & QLC_83XX_IDC_DISABLE_FW_RESET_RECOVERY) {
+ netdev_info(adapter->netdev, "%s: Auto firmware recovery is disabled\n",
+ __func__);
+ qlcnic_83xx_idc_enter_failed_state(adapter, 1);
+ return err;
+ }
+
qlcnic_83xx_init_hw(adapter);
if (qlcnic_83xx_copy_bootloader(adapter))
@@ -2073,8 +2088,8 @@ int qlcnic_83xx_configure_opmode(struct qlcnic_adapter *adapter)
ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
- adapter->max_sds_rings = ahw->max_rx_ques;
- adapter->max_tx_rings = ahw->max_tx_ques;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
+ adapter->max_tx_rings = QLCNIC_MAX_TX_RINGS;
} else {
return -EIO;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
index b36c02f..e3be276 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_ethtool.c
@@ -667,30 +667,25 @@ qlcnic_set_ringparam(struct net_device *dev,
static int qlcnic_validate_ring_count(struct qlcnic_adapter *adapter,
u8 rx_ring, u8 tx_ring)
{
+ if (rx_ring == 0 || tx_ring == 0)
+ return -EINVAL;
+
if (rx_ring != 0) {
if (rx_ring > adapter->max_sds_rings) {
- netdev_err(adapter->netdev, "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
+ netdev_err(adapter->netdev,
+ "Invalid ring count, SDS ring count %d should not be greater than max %d driver sds rings.\n",
rx_ring, adapter->max_sds_rings);
return -EINVAL;
}
}
if (tx_ring != 0) {
- if (qlcnic_82xx_check(adapter) &&
- (tx_ring > adapter->max_tx_rings)) {
+ if (tx_ring > adapter->max_tx_rings) {
netdev_err(adapter->netdev,
"Invalid ring count, Tx ring count %d should not be greater than max %d driver Tx rings.\n",
tx_ring, adapter->max_tx_rings);
return -EINVAL;
}
-
- if (qlcnic_83xx_check(adapter) &&
- (tx_ring > QLCNIC_SINGLE_RING)) {
- netdev_err(adapter->netdev,
- "Invalid ring count, Tx ring count %d should not be greater than %d driver Tx rings.\n",
- tx_ring, QLCNIC_SINGLE_RING);
- return -EINVAL;
- }
}
return 0;
@@ -948,6 +943,7 @@ static int qlcnic_irq_test(struct net_device *netdev)
struct qlcnic_hardware_context *ahw = adapter->ahw;
struct qlcnic_cmd_args cmd;
int ret, drv_sds_rings = adapter->drv_sds_rings;
+ int drv_tx_rings = adapter->drv_tx_rings;
if (qlcnic_83xx_check(adapter))
return qlcnic_83xx_interrupt_test(netdev);
@@ -980,6 +976,7 @@ free_diag_res:
clear_diag_irq:
adapter->drv_sds_rings = drv_sds_rings;
+ adapter->drv_tx_rings = drv_tx_rings;
clear_bit(__QLCNIC_RESETTING, &adapter->state);
return ret;
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
index e9c21e5..c4262c2 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_init.c
@@ -134,6 +134,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
struct qlcnic_skb_frag *buffrag;
int i, j;
+ spin_lock(&tx_ring->tx_clean_lock);
+
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -157,6 +159,8 @@ void qlcnic_release_tx_buffers(struct qlcnic_adapter *adapter,
}
cmd_buf++;
}
+
+ spin_unlock(&tx_ring->tx_clean_lock);
}
void qlcnic_free_sw_resources(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index 0149c94..ad1531a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -687,17 +687,15 @@ void qlcnic_advert_link_change(struct qlcnic_adapter *adapter, int linkup)
if (adapter->ahw->linkup && !linkup) {
netdev_info(netdev, "NIC Link is down\n");
adapter->ahw->linkup = 0;
- if (netif_running(netdev)) {
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
- }
+ netif_carrier_off(netdev);
} else if (!adapter->ahw->linkup && linkup) {
+ /* Do not advertise Link up if the port is in loopback mode */
+ if (qlcnic_83xx_check(adapter) && adapter->ahw->lb_mode)
+ return;
+
netdev_info(netdev, "NIC Link is up\n");
adapter->ahw->linkup = 1;
- if (netif_running(netdev)) {
- netif_carrier_on(netdev);
- netif_wake_queue(netdev);
- }
+ netif_carrier_on(netdev);
}
}
@@ -784,7 +782,7 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
struct net_device *netdev = adapter->netdev;
struct qlcnic_skb_frag *frag;
- if (!spin_trylock(&adapter->tx_clean_lock))
+ if (!spin_trylock(&tx_ring->tx_clean_lock))
return 1;
sw_consumer = tx_ring->sw_consumer;
@@ -813,8 +811,9 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
break;
}
+ tx_ring->sw_consumer = sw_consumer;
+
if (count && netif_running(netdev)) {
- tx_ring->sw_consumer = sw_consumer;
smp_mb();
if (netif_tx_queue_stopped(tx_ring->txq) &&
netif_carrier_ok(netdev)) {
@@ -840,7 +839,8 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter,
*/
hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
done = (sw_consumer == hw_consumer);
- spin_unlock(&adapter->tx_clean_lock);
+
+ spin_unlock(&tx_ring->tx_clean_lock);
return done;
}
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index 05c1eef..b8a245a 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -1178,6 +1178,7 @@ qlcnic_initialize_nic(struct qlcnic_adapter *adapter)
} else {
adapter->ahw->nic_mode = QLCNIC_DEFAULT_MODE;
adapter->max_tx_rings = QLCNIC_MAX_HW_TX_RINGS;
+ adapter->max_sds_rings = QLCNIC_MAX_SDS_RINGS;
adapter->flags &= ~QLCNIC_ESWITCH_ENABLED;
}
@@ -1755,7 +1756,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
if (qlcnic_sriov_vf_check(adapter))
qlcnic_sriov_cleanup_async_list(&adapter->ahw->sriov->bc);
smp_mb();
- spin_lock(&adapter->tx_clean_lock);
netif_carrier_off(netdev);
adapter->ahw->linkup = 0;
netif_tx_disable(netdev);
@@ -1776,7 +1776,6 @@ void __qlcnic_down(struct qlcnic_adapter *adapter, struct net_device *netdev)
for (ring = 0; ring < adapter->drv_tx_rings; ring++)
qlcnic_release_tx_buffers(adapter, &adapter->tx_ring[ring]);
- spin_unlock(&adapter->tx_clean_lock);
}
/* Usage: During suspend and firmware recovery module */
@@ -1940,7 +1939,6 @@ int qlcnic_diag_alloc_res(struct net_device *netdev, int test)
qlcnic_detach(adapter);
adapter->drv_sds_rings = QLCNIC_SINGLE_RING;
- adapter->drv_tx_rings = QLCNIC_SINGLE_RING;
adapter->ahw->diag_test = test;
adapter->ahw->linkup = 0;
@@ -2172,6 +2170,7 @@ int qlcnic_alloc_tx_rings(struct qlcnic_adapter *adapter,
}
memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
tx_ring->cmd_buf_arr = cmd_buf_arr;
+ spin_lock_init(&tx_ring->tx_clean_lock);
}
if (qlcnic_83xx_check(adapter) ||
@@ -2299,7 +2298,6 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
rwlock_init(&adapter->ahw->crb_lock);
mutex_init(&adapter->ahw->mem_lock);
- spin_lock_init(&adapter->tx_clean_lock);
INIT_LIST_HEAD(&adapter->mac_list);
qlcnic_register_dcb(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
index 686f460..024f816 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_pf.c
@@ -75,7 +75,6 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
num_vfs = sriov->num_vfs;
max = num_vfs + 1;
info->bit_offsets = 0xffff;
- info->max_tx_ques = res->num_tx_queues / max;
info->max_rx_mcast_mac_filters = res->num_rx_mcast_mac_filters;
num_vf_macs = QLCNIC_SRIOV_VF_MAX_MAC;
@@ -86,6 +85,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_mac_filters = temp;
info->min_tx_bw = 0;
info->max_tx_bw = MAX_BW;
+ info->max_tx_ques = res->num_tx_queues - sriov->num_vfs;
} else {
id = qlcnic_sriov_func_to_index(adapter, func);
if (id < 0)
@@ -95,6 +95,7 @@ static int qlcnic_sriov_pf_cal_res_limit(struct qlcnic_adapter *adapter,
info->max_tx_bw = vp->max_tx_bw;
info->max_rx_ucast_mac_filters = num_vf_macs;
info->max_tx_mac_filters = num_vf_macs;
+ info->max_tx_ques = QLCNIC_SINGLE_RING;
}
info->max_rx_ip_addr = res->num_destip / max;
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge.h b/drivers/net/ethernet/qlogic/qlge/qlge.h
index 0c9c4e8..0351747 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge.h
+++ b/drivers/net/ethernet/qlogic/qlge/qlge.h
@@ -18,7 +18,7 @@
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
-#define DRV_VERSION "1.00.00.33"
+#define DRV_VERSION "1.00.00.34"
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
index 0780e03..8dee1be 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_ethtool.c
@@ -181,6 +181,7 @@ static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
};
#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
#define QLGE_STATS_LEN ARRAY_SIZE(ql_gstrings_stats)
+#define QLGE_RCV_MAC_ERR_STATS 7
static int ql_update_ring_coalescing(struct ql_adapter *qdev)
{
@@ -280,6 +281,9 @@ static void ql_update_stats(struct ql_adapter *qdev)
iter++;
}
+ /* Update receive mac error statistics */
+ iter += QLGE_RCV_MAC_ERR_STATS;
+
/*
* Get Per-priority TX pause frame counter statistics.
*/
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index a245dc1..449f506 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2376,14 +2376,6 @@ static netdev_features_t qlge_fix_features(struct net_device *ndev,
netdev_features_t features)
{
int err;
- /*
- * Since there is no support for separate rx/tx vlan accel
- * enable/disable make sure tx flag is always in same state as rx.
- */
- if (features & NETIF_F_HW_VLAN_CTAG_RX)
- features |= NETIF_F_HW_VLAN_CTAG_TX;
- else
- features &= ~NETIF_F_HW_VLAN_CTAG_TX;
/* Update the behavior of vlan accel in the adapter */
err = qlge_update_hw_vlan_features(ndev, features);
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 2e27837..fd844b5 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -585,7 +585,7 @@ static void efx_start_datapath(struct efx_nic *efx)
EFX_MAX_FRAME_LEN(efx->net_dev->mtu) +
efx->type->rx_buffer_padding);
rx_buf_len = (sizeof(struct efx_rx_page_state) +
- NET_IP_ALIGN + efx->rx_dma_len);
+ efx->rx_ip_align + efx->rx_dma_len);
if (rx_buf_len <= PAGE_SIZE) {
efx->rx_scatter = efx->type->always_rx_scatter;
efx->rx_buffer_order = 0;
@@ -645,6 +645,8 @@ static void efx_start_datapath(struct efx_nic *efx)
WARN_ON(channel->rx_pkt_n_frags);
}
+ efx_ptp_start_datapath(efx);
+
if (netif_device_present(efx->net_dev))
netif_tx_wake_all_queues(efx->net_dev);
}
@@ -659,6 +661,8 @@ static void efx_stop_datapath(struct efx_nic *efx)
EFX_ASSERT_RESET_SERIALISED(efx);
BUG_ON(efx->port_enabled);
+ efx_ptp_stop_datapath(efx);
+
/* Stop RX refill */
efx_for_each_channel(channel, efx) {
efx_for_each_channel_rx_queue(rx_queue, channel)
@@ -2540,6 +2544,8 @@ static int efx_init_struct(struct efx_nic *efx,
efx->net_dev = net_dev;
efx->rx_prefix_size = efx->type->rx_prefix_size;
+ efx->rx_ip_align =
+ NET_IP_ALIGN ? (efx->rx_prefix_size + NET_IP_ALIGN) % 4 : 0;
efx->rx_packet_hash_offset =
efx->type->rx_hash_offset - efx->type->rx_prefix_size;
spin_lock_init(&efx->stats_lock);
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 366c8e3..4b0bd8a 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -50,6 +50,7 @@ struct efx_mcdi_async_param {
static void efx_mcdi_timeout_async(unsigned long context);
static int efx_mcdi_drv_attach(struct efx_nic *efx, bool driver_operating,
bool *was_attached_out);
+static bool efx_mcdi_poll_once(struct efx_nic *efx);
static inline struct efx_mcdi_iface *efx_mcdi(struct efx_nic *efx)
{
@@ -237,6 +238,21 @@ static void efx_mcdi_read_response_header(struct efx_nic *efx)
}
}
+static bool efx_mcdi_poll_once(struct efx_nic *efx)
+{
+ struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
+
+ rmb();
+ if (!efx->type->mcdi_poll_response(efx))
+ return false;
+
+ spin_lock_bh(&mcdi->iface_lock);
+ efx_mcdi_read_response_header(efx);
+ spin_unlock_bh(&mcdi->iface_lock);
+
+ return true;
+}
+
static int efx_mcdi_poll(struct efx_nic *efx)
{
struct efx_mcdi_iface *mcdi = efx_mcdi(efx);
@@ -272,18 +288,13 @@ static int efx_mcdi_poll(struct efx_nic *efx)
time = jiffies;
- rmb();
- if (efx->type->mcdi_poll_response(efx))
+ if (efx_mcdi_poll_once(efx))
break;
if (time_after(time, finish))
return -ETIMEDOUT;
}
- spin_lock_bh(&mcdi->iface_lock);
- efx_mcdi_read_response_header(efx);
- spin_unlock_bh(&mcdi->iface_lock);
-
/* Return rc=0 like wait_event_timeout() */
return 0;
}
@@ -619,6 +630,16 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
rc = efx_mcdi_await_completion(efx);
if (rc != 0) {
+ netif_err(efx, hw, efx->net_dev,
+ "MC command 0x%x inlen %d mode %d timed out\n",
+ cmd, (int)inlen, mcdi->mode);
+
+ if (mcdi->mode == MCDI_MODE_EVENTS && efx_mcdi_poll_once(efx)) {
+ netif_err(efx, hw, efx->net_dev,
+ "MCDI request was completed without an event\n");
+ rc = 0;
+ }
+
/* Close the race with efx_mcdi_ev_cpl() executing just too late
* and completing a request we've just cancelled, by ensuring
* that the seqno check therein fails.
@@ -627,11 +648,9 @@ int efx_mcdi_rpc_finish(struct efx_nic *efx, unsigned cmd, size_t inlen,
++mcdi->seqno;
++mcdi->credits;
spin_unlock_bh(&mcdi->iface_lock);
+ }
- netif_err(efx, hw, efx->net_dev,
- "MC command 0x%x inlen %d mode %d timed out\n",
- cmd, (int)inlen, mcdi->mode);
- } else {
+ if (rc == 0) {
size_t hdr_len, data_len;
/* At the very least we need a memory barrier here to ensure
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index b14a717..542a0d2 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -683,6 +683,8 @@ struct vfdi_status;
* @n_channels: Number of channels in use
* @n_rx_channels: Number of channels used for RX (= number of RX queues)
* @n_tx_channels: Number of channels used for TX
+ * @rx_ip_align: RX DMA address offset to have IP header aligned in
+ * in accordance with NET_IP_ALIGN
* @rx_dma_len: Current maximum RX DMA length
* @rx_buffer_order: Order (log2) of number of pages for each RX buffer
* @rx_buffer_truesize: Amortised allocation size of an RX buffer,
@@ -816,6 +818,7 @@ struct efx_nic {
unsigned rss_spread;
unsigned tx_channel_offset;
unsigned n_tx_channels;
+ unsigned int rx_ip_align;
unsigned int rx_dma_len;
unsigned int rx_buffer_order;
unsigned int rx_buffer_truesize;
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 11b6112..91c63ec7 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -560,6 +560,8 @@ void efx_ptp_get_ts_info(struct efx_nic *efx, struct ethtool_ts_info *ts_info);
bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
int efx_ptp_tx(struct efx_nic *efx, struct sk_buff *skb);
void efx_ptp_event(struct efx_nic *efx, efx_qword_t *ev);
+void efx_ptp_start_datapath(struct efx_nic *efx);
+void efx_ptp_stop_datapath(struct efx_nic *efx);
extern const struct efx_nic_type falcon_a1_nic_type;
extern const struct efx_nic_type falcon_b0_nic_type;
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index 03acf57..3dd39dc 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -220,6 +220,7 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
+ * @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
@@ -270,6 +271,7 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
+ bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
@@ -635,6 +637,11 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
+ /* If the event overflow flag is set and the event list is now empty
+ * clear the flag to re-enable the overflow warning message.
+ */
+ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
+ ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
@@ -676,6 +683,11 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
+ /* If the event overflow flag is set and the event list is now empty
+ * clear the flag to re-enable the overflow warning message.
+ */
+ if (ptp->evt_overflow && list_empty(&ptp->evt_list))
+ ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
@@ -705,8 +717,9 @@ static bool efx_ptp_process_events(struct efx_nic *efx, struct sk_buff_head *q)
__skb_queue_tail(q, skb);
} else if (time_after(jiffies, match->expiry)) {
match->state = PTP_PACKET_STATE_TIMED_OUT;
- netif_warn(efx, rx_err, efx->net_dev,
- "PTP packet - no timestamp seen\n");
+ if (net_ratelimit())
+ netif_warn(efx, rx_err, efx->net_dev,
+ "PTP packet - no timestamp seen\n");
__skb_queue_tail(q, skb);
} else {
/* Replace unprocessed entry and stop */
@@ -788,9 +801,14 @@ fail:
static int efx_ptp_stop(struct efx_nic *efx)
{
struct efx_ptp_data *ptp = efx->ptp_data;
- int rc = efx_ptp_disable(efx);
struct list_head *cursor;
struct list_head *next;
+ int rc;
+
+ if (ptp == NULL)
+ return 0;
+
+ rc = efx_ptp_disable(efx);
if (ptp->rxfilter_installed) {
efx_filter_remove_id_safe(efx, EFX_FILTER_PRI_REQUIRED,
@@ -809,11 +827,19 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
+ ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
}
+static int efx_ptp_restart(struct efx_nic *efx)
+{
+ if (efx->ptp_data && efx->ptp_data->enabled)
+ return efx_ptp_start(efx);
+ return 0;
+}
+
static void efx_ptp_pps_worker(struct work_struct *work)
{
struct efx_ptp_data *ptp =
@@ -901,6 +927,7 @@ static int efx_ptp_probe_channel(struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
+ ptp->evt_overflow = false;
ptp->phc_clock_info.owner = THIS_MODULE;
snprintf(ptp->phc_clock_info.name,
@@ -989,7 +1016,11 @@ bool efx_ptp_is_ptp_tx(struct efx_nic *efx, struct sk_buff *skb)
skb->len >= PTP_MIN_LENGTH &&
skb->len <= MC_CMD_PTP_IN_TRANSMIT_PACKET_MAXNUM &&
likely(skb->protocol == htons(ETH_P_IP)) &&
+ skb_transport_header_was_set(skb) &&
+ skb_network_header_len(skb) >= sizeof(struct iphdr) &&
ip_hdr(skb)->protocol == IPPROTO_UDP &&
+ skb_headlen(skb) >=
+ skb_transport_offset(skb) + sizeof(struct udphdr) &&
udp_hdr(skb)->dest == htons(PTP_EVENT_PORT);
}
@@ -1106,7 +1137,7 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
{
if ((enable_wanted != efx->ptp_data->enabled) ||
(enable_wanted && (efx->ptp_data->mode != new_mode))) {
- int rc;
+ int rc = 0;
if (enable_wanted) {
/* Change of mode requires disable */
@@ -1123,7 +1154,8 @@ static int efx_ptp_change_mode(struct efx_nic *efx, bool enable_wanted,
* succeed.
*/
efx->ptp_data->mode = new_mode;
- rc = efx_ptp_start(efx);
+ if (netif_running(efx->net_dev))
+ rc = efx_ptp_start(efx);
if (rc == 0) {
rc = efx_ptp_synchronize(efx,
PTP_SYNC_ATTEMPTS * 2);
@@ -1295,8 +1327,13 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
- } else {
- netif_err(efx, rx_err, efx->net_dev, "No free PTP event");
+ } else if (!ptp->evt_overflow) {
+ /* Log a warning message and set the event overflow flag.
+ * The message won't be logged again until the event queue
+ * becomes empty.
+ */
+ netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
+ ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
@@ -1389,7 +1426,7 @@ static int efx_phc_adjfreq(struct ptp_clock_info *ptp, s32 delta)
if (rc != 0)
return rc;
- ptp_data->current_adjfreq = delta;
+ ptp_data->current_adjfreq = adjustment_ns;
return 0;
}
@@ -1404,7 +1441,7 @@ static int efx_phc_adjtime(struct ptp_clock_info *ptp, s64 delta)
MCDI_SET_DWORD(inbuf, PTP_IN_OP, MC_CMD_PTP_OP_ADJUST);
MCDI_SET_DWORD(inbuf, PTP_IN_PERIPH_ID, 0);
- MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, 0);
+ MCDI_SET_QWORD(inbuf, PTP_IN_ADJUST_FREQ, ptp_data->current_adjfreq);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_SECONDS, (u32)delta_ts.tv_sec);
MCDI_SET_DWORD(inbuf, PTP_IN_ADJUST_NANOSECONDS, (u32)delta_ts.tv_nsec);
return efx_mcdi_rpc(efx, MC_CMD_PTP, inbuf, sizeof(inbuf),
@@ -1491,3 +1528,14 @@ void efx_ptp_probe(struct efx_nic *efx)
efx->extra_channel_type[EFX_EXTRA_CHANNEL_PTP] =
&efx_ptp_channel_type;
}
+
+void efx_ptp_start_datapath(struct efx_nic *efx)
+{
+ if (efx_ptp_restart(efx))
+ netif_err(efx, drv, efx->net_dev, "Failed to restart PTP.\n");
+}
+
+void efx_ptp_stop_datapath(struct efx_nic *efx)
+{
+ efx_ptp_stop(efx);
+}
diff --git a/drivers/net/ethernet/sfc/rx.c b/drivers/net/ethernet/sfc/rx.c
index 8f09e68..42488df 100644
--- a/drivers/net/ethernet/sfc/rx.c
+++ b/drivers/net/ethernet/sfc/rx.c
@@ -94,7 +94,7 @@ static inline void efx_sync_rx_buffer(struct efx_nic *efx,
void efx_rx_config_page_split(struct efx_nic *efx)
{
- efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + NET_IP_ALIGN,
+ efx->rx_page_buf_step = ALIGN(efx->rx_dma_len + efx->rx_ip_align,
EFX_RX_BUF_ALIGNMENT);
efx->rx_bufs_per_page = efx->rx_buffer_order ? 1 :
((PAGE_SIZE - sizeof(struct efx_rx_page_state)) /
@@ -189,9 +189,9 @@ static int efx_init_rx_buffers(struct efx_rx_queue *rx_queue)
do {
index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index);
- rx_buf->dma_addr = dma_addr + NET_IP_ALIGN;
+ rx_buf->dma_addr = dma_addr + efx->rx_ip_align;
rx_buf->page = page;
- rx_buf->page_offset = page_offset + NET_IP_ALIGN;
+ rx_buf->page_offset = page_offset + efx->rx_ip_align;
rx_buf->len = efx->rx_dma_len;
rx_buf->flags = 0;
++rx_queue->added_count;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0c9b5d9..8bf29eb 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -82,6 +82,7 @@ static const char version[] =
#include <linux/mii.h>
#include <linux/workqueue.h>
#include <linux/of.h>
+#include <linux/of_device.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
@@ -2184,6 +2185,15 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
}
}
+#if IS_BUILTIN(CONFIG_OF)
+static const struct of_device_id smc91x_match[] = {
+ { .compatible = "smsc,lan91c94", },
+ { .compatible = "smsc,lan91c111", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, smc91x_match);
+#endif
+
/*
* smc_init(void)
* Input parameters:
@@ -2198,6 +2208,7 @@ static void smc_release_datacs(struct platform_device *pdev, struct net_device *
static int smc_drv_probe(struct platform_device *pdev)
{
struct smc91x_platdata *pd = dev_get_platdata(&pdev->dev);
+ const struct of_device_id *match = NULL;
struct smc_local *lp;
struct net_device *ndev;
struct resource *res, *ires;
@@ -2217,11 +2228,34 @@ static int smc_drv_probe(struct platform_device *pdev)
*/
lp = netdev_priv(ndev);
+ lp->cfg.flags = 0;
if (pd) {
memcpy(&lp->cfg, pd, sizeof(lp->cfg));
lp->io_shift = SMC91X_IO_SHIFT(lp->cfg.flags);
- } else {
+ }
+
+#if IS_BUILTIN(CONFIG_OF)
+ match = of_match_device(of_match_ptr(smc91x_match), &pdev->dev);
+ if (match) {
+ struct device_node *np = pdev->dev.of_node;
+ u32 val;
+
+ /* Combination of IO widths supported, default to 16-bit */
+ if (!of_property_read_u32(np, "reg-io-width", &val)) {
+ if (val & 1)
+ lp->cfg.flags |= SMC91X_USE_8BIT;
+ if ((val == 0) || (val & 2))
+ lp->cfg.flags |= SMC91X_USE_16BIT;
+ if (val & 4)
+ lp->cfg.flags |= SMC91X_USE_32BIT;
+ } else {
+ lp->cfg.flags |= SMC91X_USE_16BIT;
+ }
+ }
+#endif
+
+ if (!pd && !match) {
lp->cfg.flags |= (SMC_CAN_USE_8BIT) ? SMC91X_USE_8BIT : 0;
lp->cfg.flags |= (SMC_CAN_USE_16BIT) ? SMC91X_USE_16BIT : 0;
lp->cfg.flags |= (SMC_CAN_USE_32BIT) ? SMC91X_USE_32BIT : 0;
@@ -2370,15 +2404,6 @@ static int smc_drv_resume(struct device *dev)
return 0;
}
-#ifdef CONFIG_OF
-static const struct of_device_id smc91x_match[] = {
- { .compatible = "smsc,lan91c94", },
- { .compatible = "smsc,lan91c111", },
- {},
-};
-MODULE_DEVICE_TABLE(of, smc91x_match);
-#endif
-
static struct dev_pm_ops smc_drv_pm_ops = {
.suspend = smc_drv_suspend,
.resume = smc_drv_resume,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 8a7a23a..797b56a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -622,17 +622,15 @@ static int stmmac_init_ptp(struct stmmac_priv *priv)
if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
return -EOPNOTSUPP;
- if (netif_msg_hw(priv)) {
- if (priv->dma_cap.time_stamp) {
- pr_debug("IEEE 1588-2002 Time Stamp supported\n");
- priv->adv_ts = 0;
- }
- if (priv->dma_cap.atime_stamp && priv->extend_desc) {
- pr_debug
- ("IEEE 1588-2008 Advanced Time Stamp supported\n");
- priv->adv_ts = 1;
- }
- }
+ priv->adv_ts = 0;
+ if (priv->dma_cap.atime_stamp && priv->extend_desc)
+ priv->adv_ts = 1;
+
+ if (netif_msg_hw(priv) && priv->dma_cap.time_stamp)
+ pr_debug("IEEE 1588-2002 Time Stamp supported\n");
+
+ if (netif_msg_hw(priv) && priv->adv_ts)
+ pr_debug("IEEE 1588-2008 Advanced Time Stamp supported\n");
priv->hw->ptp = &stmmac_ptp;
priv->hwts_tx_en = 0;
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
index b8b0eee..7680581 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ptp.c
@@ -56,7 +56,7 @@ static int stmmac_adjust_freq(struct ptp_clock_info *ptp, s32 ppb)
priv->hw->ptp->config_addend(priv->ioaddr, addend);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
@@ -91,7 +91,7 @@ static int stmmac_adjust_time(struct ptp_clock_info *ptp, s64 delta)
priv->hw->ptp->adjust_systime(priv->ioaddr, sec, nsec, neg_adj);
- spin_unlock_irqrestore(&priv->lock, flags);
+ spin_unlock_irqrestore(&priv->ptp_lock, flags);
return 0;
}
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index dd0dd627..4f1d254 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -2019,7 +2019,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO
| NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
NETIF_F_HW_VLAN_CTAG_FILTER | NETIF_F_RXCSUM
- /*| NETIF_F_FRAGLIST */
;
ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
NETIF_F_TSO | NETIF_F_HW_VLAN_CTAG_TX;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 7536a4c0..5330fd2 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -740,6 +740,8 @@ static void _cpsw_adjust_link(struct cpsw_slave *slave,
/* set speed_in input in case RMII mode is used in 100Mbps */
if (phy->speed == 100)
mac_control |= BIT(15);
+ else if (phy->speed == 10)
+ mac_control |= BIT(18); /* In Band mode */
*link = true;
} else {
@@ -1151,6 +1153,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
* receive descs
*/
cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);
+
+ if (cpts_register(&priv->pdev->dev, priv->cpts,
+ priv->data.cpts_clock_mult,
+ priv->data.cpts_clock_shift))
+ dev_err(priv->dev, "error registering cpts device\n");
+
}
/* Enable Interrupt pacing if configured */
@@ -1197,6 +1205,7 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_carrier_off(priv->ndev);
if (cpsw_common_res_usage_state(priv) <= 1) {
+ cpts_unregister(priv->cpts);
cpsw_intr_disable(priv);
cpdma_ctlr_int_ctrl(priv->dma, false);
cpdma_ctlr_stop(priv->dma);
@@ -1816,6 +1825,8 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
}
i++;
+ if (i == data->slaves)
+ break;
}
return 0;
@@ -1983,9 +1994,15 @@ static int cpsw_probe(struct platform_device *pdev)
goto clean_runtime_disable_ret;
}
priv->regs = ss_regs;
- priv->version = __raw_readl(&priv->regs->id_ver);
priv->host_port = HOST_PORT_NUM;
+ /* Need to enable clocks with runtime PM api to access module
+ * registers
+ */
+ pm_runtime_get_sync(&pdev->dev);
+ priv->version = readl(&priv->regs->id_ver);
+ pm_runtime_put_sync(&pdev->dev);
+
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
priv->wr_regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(priv->wr_regs)) {
@@ -2091,7 +2108,7 @@ static int cpsw_probe(struct platform_device *pdev)
while ((res = platform_get_resource(priv->pdev, IORESOURCE_IRQ, k))) {
for (i = res->start; i <= res->end; i++) {
if (devm_request_irq(&pdev->dev, i, cpsw_interrupt, 0,
- dev_name(priv->dev), priv)) {
+ dev_name(&pdev->dev), priv)) {
dev_err(priv->dev, "error attaching irq\n");
goto clean_ale_ret;
}
@@ -2155,8 +2172,6 @@ static int cpsw_remove(struct platform_device *pdev)
unregister_netdev(cpsw_get_slave_ndev(priv, 1));
unregister_netdev(ndev);
- cpts_unregister(priv->cpts);
-
cpsw_ale_destroy(priv->ale);
cpdma_chan_destroy(priv->txch);
cpdma_chan_destroy(priv->rxch);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 41ba974..cd9b164 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -61,6 +61,7 @@
#include <linux/davinci_emac.h>
#include <linux/of.h>
#include <linux/of_address.h>
+#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/of_net.h>
@@ -1752,10 +1753,14 @@ static const struct net_device_ops emac_netdev_ops = {
#endif
};
+static const struct of_device_id davinci_emac_of_match[];
+
static struct emac_platform_data *
davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
{
struct device_node *np;
+ const struct of_device_id *match;
+ const struct emac_platform_data *auxdata;
struct emac_platform_data *pdata = NULL;
const u8 *mac_addr;
@@ -1793,7 +1798,20 @@ davinci_emac_of_get_pdata(struct platform_device *pdev, struct emac_priv *priv)
priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
if (!priv->phy_node)
- pdata->phy_id = "";
+ pdata->phy_id = NULL;
+
+ auxdata = pdev->dev.platform_data;
+ if (auxdata) {
+ pdata->interrupt_enable = auxdata->interrupt_enable;
+ pdata->interrupt_disable = auxdata->interrupt_disable;
+ }
+
+ match = of_match_device(davinci_emac_of_match, &pdev->dev);
+ if (match && match->data) {
+ auxdata = match->data;
+ pdata->version = auxdata->version;
+ pdata->hw_ram_addr = auxdata->hw_ram_addr;
+ }
pdev->dev.platform_data = pdata;
@@ -2020,8 +2038,14 @@ static const struct dev_pm_ops davinci_emac_pm_ops = {
};
#if IS_ENABLED(CONFIG_OF)
+static const struct emac_platform_data am3517_emac_data = {
+ .version = EMAC_VERSION_2,
+ .hw_ram_addr = 0x01e20000,
+};
+
static const struct of_device_id davinci_emac_of_match[] = {
{.compatible = "ti,davinci-dm6467-emac", },
+ {.compatible = "ti,am3517-emac", .data = &am3517_emac_data, },
{},
};
MODULE_DEVICE_TABLE(of, davinci_emac_of_match);
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 1f23641..2166e87 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -1017,7 +1017,7 @@ static int temac_of_probe(struct platform_device *op)
platform_set_drvdata(op, ndev);
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_SG;
ndev->netdev_ops = &temac_netdev_ops;
ndev->ethtool_ops = &temac_ethtool_ops;
#if 0
diff --git a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
index b2ff038..f9293da 100644
--- a/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
+++ b/drivers/net/ethernet/xilinx/xilinx_axienet_main.c
@@ -1486,7 +1486,7 @@ static int axienet_of_probe(struct platform_device *op)
SET_NETDEV_DEV(ndev, &op->dev);
ndev->flags &= ~IFF_MULTICAST; /* clear multicast */
- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST;
+ ndev->features = NETIF_F_SG;
ndev->netdev_ops = &axienet_netdev_ops;
ndev->ethtool_ops = &axienet_ethtool_ops;
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 74234a5..fefb8cd 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -163,26 +163,9 @@ static void xemaclite_enable_interrupts(struct net_local *drvdata)
__raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
drvdata->base_addr + XEL_TSR_OFFSET);
- /* Enable the Tx interrupts for the second Buffer if
- * configured in HW */
- if (drvdata->tx_ping_pong != 0) {
- reg_data = __raw_readl(drvdata->base_addr +
- XEL_BUFFER_OFFSET + XEL_TSR_OFFSET);
- __raw_writel(reg_data | XEL_TSR_XMIT_IE_MASK,
- drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
- }
-
/* Enable the Rx interrupts for the first buffer */
__raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr + XEL_RSR_OFFSET);
- /* Enable the Rx interrupts for the second Buffer if
- * configured in HW */
- if (drvdata->rx_ping_pong != 0) {
- __raw_writel(XEL_RSR_RECV_IE_MASK, drvdata->base_addr +
- XEL_BUFFER_OFFSET + XEL_RSR_OFFSET);
- }
-
/* Enable the Global Interrupt Enable */
__raw_writel(XEL_GIER_GIE_MASK, drvdata->base_addr + XEL_GIER_OFFSET);
}
@@ -206,31 +189,10 @@ static void xemaclite_disable_interrupts(struct net_local *drvdata)
__raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
drvdata->base_addr + XEL_TSR_OFFSET);
- /* Disable the Tx interrupts for the second Buffer
- * if configured in HW */
- if (drvdata->tx_ping_pong != 0) {
- reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
- __raw_writel(reg_data & (~XEL_TSR_XMIT_IE_MASK),
- drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_TSR_OFFSET);
- }
-
/* Disable the Rx interrupts for the first buffer */
reg_data = __raw_readl(drvdata->base_addr + XEL_RSR_OFFSET);
__raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
drvdata->base_addr + XEL_RSR_OFFSET);
-
- /* Disable the Rx interrupts for the second buffer
- * if configured in HW */
- if (drvdata->rx_ping_pong != 0) {
-
- reg_data = __raw_readl(drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_RSR_OFFSET);
- __raw_writel(reg_data & (~XEL_RSR_RECV_IE_MASK),
- drvdata->base_addr + XEL_BUFFER_OFFSET +
- XEL_RSR_OFFSET);
- }
}
/**
@@ -258,6 +220,13 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
*to_u16_ptr++ = *from_u16_ptr++;
*to_u16_ptr++ = *from_u16_ptr++;
+ /* This barrier resolves occasional issues seen around
+ * cases where the data is not properly flushed out
+ * from the processor store buffers to the destination
+ * memory locations.
+ */
+ wmb();
+
/* Output a word */
*to_u32_ptr++ = align_buffer;
}
@@ -273,6 +242,12 @@ static void xemaclite_aligned_write(void *src_ptr, u32 *dest_ptr,
for (; length > 0; length--)
*to_u8_ptr++ = *from_u8_ptr++;
+ /* This barrier resolves occasional issues seen around
+ * cases where the data is not properly flushed out
+ * from the processor store buffers to the destination
+ * memory locations.
+ */
+ wmb();
*to_u32_ptr = align_buffer;
}
}
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c
index 3169252..5d78c1d 100644
--- a/drivers/net/hamradio/hdlcdrv.c
+++ b/drivers/net/hamradio/hdlcdrv.c
@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
case HDLCDRVCTL_CALIBRATE:
if(!capable(CAP_SYS_RAWIO))
return -EPERM;
+ if (bi.data.calibrate > INT_MAX / s->par.bitrate)
+ return -EINVAL;
s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16;
return 0;
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 1971411..61dd244 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -1057,6 +1057,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
break;
case SIOCYAMGCFG:
+ memset(&yi, 0, sizeof(yi));
yi.cfg.mask = 0xffffffff;
yi.cfg.iobase = yp->iobase;
yi.cfg.irq = yp->irq;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 524f713..71baeb3 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -261,9 +261,7 @@ int netvsc_recv_callback(struct hv_device *device_obj,
struct sk_buff *skb;
net = ((struct netvsc_device *)hv_get_drvdata(device_obj))->ndev;
- if (!net) {
- netdev_err(net, "got receive callback but net device"
- " not initialized yet\n");
+ if (!net || net->reg_state != NETREG_REGISTERED) {
packet->status = NVSP_STAT_FAIL;
return 0;
}
@@ -327,7 +325,6 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
return -EINVAL;
nvdev->start_remove = true;
- cancel_delayed_work_sync(&ndevctx->dwork);
cancel_work_sync(&ndevctx->work);
netif_tx_disable(ndev);
rndis_filter_device_remove(hdev);
@@ -436,19 +433,11 @@ static int netvsc_probe(struct hv_device *dev,
SET_ETHTOOL_OPS(net, &ethtool_ops);
SET_NETDEV_DEV(net, &dev->device);
- ret = register_netdev(net);
- if (ret != 0) {
- pr_err("Unable to register netdev.\n");
- free_netdev(net);
- goto out;
- }
-
/* Notify the netvsc driver of the new device */
device_info.ring_size = ring_size;
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- unregister_netdev(net);
free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
@@ -457,7 +446,13 @@ static int netvsc_probe(struct hv_device *dev,
netif_carrier_on(net);
-out:
+ ret = register_netdev(net);
+ if (ret != 0) {
+ pr_err("Unable to register netdev.\n");
+ rndis_filter_device_remove(dev);
+ free_netdev(net);
+ }
+
return ret;
}
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index acf9379..60406b0 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -690,8 +690,19 @@ static netdev_features_t macvlan_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct macvlan_dev *vlan = netdev_priv(dev);
+ netdev_features_t mask;
- return features & (vlan->set_features | ~MACVLAN_FEATURES);
+ features |= NETIF_F_ALL_FOR_ALL;
+ features &= (vlan->set_features | ~MACVLAN_FEATURES);
+ mask = features;
+
+ features = netdev_increment_features(vlan->lowerdev->features,
+ features,
+ mask);
+ if (!vlan->fwd_priv)
+ features |= NETIF_F_LLTX;
+
+ return features;
}
static const struct ethtool_ops macvlan_ethtool_ops = {
@@ -1019,9 +1030,8 @@ static int macvlan_device_event(struct notifier_block *unused,
break;
case NETDEV_FEAT_CHANGE:
list_for_each_entry(vlan, &port->vlans, list) {
- vlan->dev->features = dev->features & MACVLAN_FEATURES;
vlan->dev->gso_max_size = dev->gso_max_size;
- netdev_features_change(vlan->dev);
+ netdev_update_features(vlan->dev);
}
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 9093004..2a89da0 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -770,7 +770,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
int ret;
int vnet_hdr_len = 0;
int vlan_offset = 0;
- int copied;
+ int copied, total;
if (q->flags & IFF_VNET_HDR) {
struct virtio_net_hdr vnet_hdr;
@@ -785,7 +785,8 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
if (memcpy_toiovecend(iv, (void *)&vnet_hdr, 0, sizeof(vnet_hdr)))
return -EFAULT;
}
- copied = vnet_hdr_len;
+ total = copied = vnet_hdr_len;
+ total += skb->len;
if (!vlan_tx_tag_present(skb))
len = min_t(int, skb->len, len);
@@ -800,6 +801,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
len = min_t(int, skb->len + VLAN_HLEN, len);
+ total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
@@ -817,10 +819,9 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
}
ret = skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
- copied += len;
done:
- return ret ? ret : copied;
+ return ret ? ret : total;
}
static ssize_t macvtap_do_read(struct macvtap_queue *q, struct kiocb *iocb,
@@ -875,7 +876,9 @@ static ssize_t macvtap_aio_read(struct kiocb *iocb, const struct iovec *iv,
}
ret = macvtap_do_read(q, iocb, iv, len, file->f_flags & O_NONBLOCK);
- ret = min_t(ssize_t, ret, len); /* XXX copied from tun.c. Why? */
+ ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
return ret;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3ae28f4..26fa05a 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -336,6 +336,21 @@ static struct phy_driver ksphy_driver[] = {
.resume = genphy_resume,
.driver = { .owner = THIS_MODULE,},
}, {
+ .phy_id = PHY_ID_KSZ8041RNLI,
+ .phy_id_mask = 0x00fffff0,
+ .name = "Micrel KSZ8041RNLI",
+ .features = PHY_BASIC_FEATURES |
+ SUPPORTED_Pause | SUPPORTED_Asym_Pause,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = kszphy_ack_interrupt,
+ .config_intr = kszphy_config_intr,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
+ .driver = { .owner = THIS_MODULE,},
+}, {
.phy_id = PHY_ID_KSZ8051,
.phy_id_mask = 0x00fffff0,
.name = "Micrel KSZ8051",
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 36c6994..98434b8 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -565,10 +565,8 @@ int phy_start_interrupts(struct phy_device *phydev)
int err = 0;
atomic_set(&phydev->irq_disable, 0);
- if (request_irq(phydev->irq, phy_interrupt,
- IRQF_SHARED,
- "phy_interrupt",
- phydev) < 0) {
+ if (request_irq(phydev->irq, phy_interrupt, 0, "phy_interrupt",
+ phydev) < 0) {
pr_warn("%s: Can't get IRQ %d (PHY)\n",
phydev->bus->name, phydev->irq);
phydev->irq = PHY_POLL;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 782e38b..7c8343a 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1184,7 +1184,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
- int vlan_offset = 0;
+ int vlan_offset = 0, copied;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
@@ -1248,6 +1248,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
total += tun->vnet_hdr_sz;
}
+ copied = total;
+ total += skb->len;
if (!vlan_tx_tag_present(skb)) {
len = min_t(int, skb->len, len);
} else {
@@ -1262,24 +1264,24 @@ static ssize_t tun_put_user(struct tun_struct *tun,
vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
len = min_t(int, skb->len + VLAN_HLEN, len);
+ total += VLAN_HLEN;
copy = min_t(int, vlan_offset, len);
- ret = skb_copy_datagram_const_iovec(skb, 0, iv, total, copy);
+ ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
len -= copy;
- total += copy;
+ copied += copy;
if (ret || !len)
goto done;
copy = min_t(int, sizeof(veth), len);
- ret = memcpy_toiovecend(iv, (void *)&veth, total, copy);
+ ret = memcpy_toiovecend(iv, (void *)&veth, copied, copy);
len -= copy;
- total += copy;
+ copied += copy;
if (ret || !len)
goto done;
}
- skb_copy_datagram_const_iovec(skb, vlan_offset, iv, total, len);
- total += len;
+ skb_copy_datagram_const_iovec(skb, vlan_offset, iv, copied, len);
done:
tun->dev->stats.tx_packets++;
@@ -1356,6 +1358,8 @@ static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
ret = tun_do_read(tun, tfile, iocb, iv, len,
file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
+ if (ret > 0)
+ iocb->ki_pos = ret;
out:
tun_put(tun);
return ret;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 85e4a016..47b0f73 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -276,12 +276,12 @@ config USB_NET_CDC_MBIM
module will be called cdc_mbim.
config USB_NET_DM9601
- tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
+ tristate "Davicom DM96xx based USB 10/100 ethernet devices"
depends on USB_USBNET
select CRC32
help
- This option adds support for Davicom DM9601 based USB 1.1
- 10/100 Ethernet adapters.
+ This option adds support for Davicom DM9601/DM9620/DM9621A
+ based USB 10/100 Ethernet adapters.
config USB_NET_SR9700
tristate "CoreChip-sz SR9700 based USB 1.1 10/100 ethernet devices"
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index c6867f9..14aa48f 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -1,5 +1,5 @@
/*
- * Davicom DM9601 USB 1.1 10/100Mbps ethernet devices
+ * Davicom DM96xx USB 10/100Mbps ethernet devices
*
* Peter Korsgaard <jacmet@sunsite.dk>
*
@@ -364,7 +364,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf)
dev->net->ethtool_ops = &dm9601_ethtool_ops;
dev->net->hard_header_len += DM_TX_OVERHEAD;
dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len;
- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD;
+
+ /* dm9620/21a require room for 4 byte padding, even in dm9601
+ * mode, so we need +1 to be able to receive full size
+ * ethernet frames.
+ */
+ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1;
dev->mii.dev = dev->net;
dev->mii.mdio_read = dm9601_mdio_read;
@@ -468,7 +473,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
gfp_t flags)
{
- int len;
+ int len, pad;
/* format:
b1: packet length low
@@ -476,12 +481,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
b3..n: packet data
*/
- len = skb->len;
+ len = skb->len + DM_TX_OVERHEAD;
+
+ /* workaround for dm962x errata with tx fifo getting out of
+ * sync if a USB bulk transfer retry happens right after a
+ * packet with odd / maxpacket length by adding up to 3 bytes
+ * padding.
+ */
+ while ((len & 1) || !(len % dev->maxpacket))
+ len++;
- if (skb_headroom(skb) < DM_TX_OVERHEAD) {
+ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */
+ pad = len - skb->len;
+
+ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) {
struct sk_buff *skb2;
- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags);
+ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
@@ -490,10 +506,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
__skb_push(skb, DM_TX_OVERHEAD);
- /* usbnet adds padding if length is a multiple of packet size
- if so, adjust length value in header */
- if ((skb->len % dev->maxpacket) == 0)
- len++;
+ if (pad) {
+ memset(skb->data + skb->len, 0, pad);
+ __skb_put(skb, pad);
+ }
skb->data[0] = len;
skb->data[1] = len >> 8;
@@ -543,7 +559,7 @@ static int dm9601_link_reset(struct usbnet *dev)
}
static const struct driver_info dm9601_info = {
- .description = "Davicom DM9601 USB Ethernet",
+ .description = "Davicom DM96xx USB 10/100 Ethernet",
.flags = FLAG_ETHER | FLAG_LINK_INTR,
.bind = dm9601_bind,
.rx_fixup = dm9601_rx_fixup,
@@ -594,6 +610,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE(0x0a46, 0x9620), /* DM9620 USB to Fast Ethernet Adapter */
.driver_info = (unsigned long)&dm9601_info,
},
+ {
+ USB_DEVICE(0x0a46, 0x9621), /* DM9621A USB to Fast Ethernet Adapter */
+ .driver_info = (unsigned long)&dm9601_info,
+ },
{}, // END
};
@@ -612,5 +632,5 @@ static struct usb_driver dm9601_driver = {
module_usb_driver(dm9601_driver);
MODULE_AUTHOR("Peter Korsgaard <jacmet@sunsite.dk>");
-MODULE_DESCRIPTION("Davicom DM9601 USB 1.1 ethernet devices");
+MODULE_DESCRIPTION("Davicom DM96xx USB 10/100 ethernet devices");
MODULE_LICENSE("GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 86292e6..1a48234 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -185,7 +185,6 @@ enum rx_ctrl_state{
#define BM_REQUEST_TYPE (0xa1)
#define B_NOTIFICATION (0x20)
#define W_VALUE (0x0)
-#define W_INDEX (0x2)
#define W_LENGTH (0x2)
#define B_OVERRUN (0x1<<6)
@@ -1487,6 +1486,7 @@ static void tiocmget_intr_callback(struct urb *urb)
struct uart_icount *icount;
struct hso_serial_state_notification *serial_state_notification;
struct usb_device *usb;
+ int if_num;
/* Sanity checks */
if (!serial)
@@ -1495,15 +1495,24 @@ static void tiocmget_intr_callback(struct urb *urb)
handle_usb_error(status, __func__, serial->parent);
return;
}
+
+ /* tiocmget is only supported on HSO_PORT_MODEM */
tiocmget = serial->tiocmget;
if (!tiocmget)
return;
+ BUG_ON((serial->parent->port_spec & HSO_PORT_MASK) != HSO_PORT_MODEM);
+
usb = serial->parent->usb;
+ if_num = serial->parent->interface->altsetting->desc.bInterfaceNumber;
+
+ /* wIndex should be the USB interface number of the port to which the
+ * notification applies, which should always be the Modem port.
+ */
serial_state_notification = &tiocmget->serial_state_notification;
if (serial_state_notification->bmRequestType != BM_REQUEST_TYPE ||
serial_state_notification->bNotification != B_NOTIFICATION ||
le16_to_cpu(serial_state_notification->wValue) != W_VALUE ||
- le16_to_cpu(serial_state_notification->wIndex) != W_INDEX ||
+ le16_to_cpu(serial_state_notification->wIndex) != if_num ||
le16_to_cpu(serial_state_notification->wLength) != W_LENGTH) {
dev_warn(&usb->dev,
"hso received invalid serial state notification\n");
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index 03832d3..f546378 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -117,7 +117,6 @@ enum {
struct mcs7830_data {
u8 multi_filter[8];
u8 config;
- u8 link_counter;
};
static const char driver_name[] = "MOSCHIP usb-ethernet driver";
@@ -561,26 +560,16 @@ static void mcs7830_status(struct usbnet *dev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
bool link, link_changed;
- struct mcs7830_data *data = mcs7830_get_data(dev);
if (urb->actual_length < 16)
return;
- link = !(buf[1] & 0x20);
+ link = !(buf[1] == 0x20);
link_changed = netif_carrier_ok(dev->net) != link;
if (link_changed) {
- data->link_counter++;
- /*
- track link state 20 times to guard against erroneous
- link state changes reported sometimes by the chip
- */
- if (data->link_counter > 20) {
- data->link_counter = 0;
- usbnet_link_change(dev, link, 0);
- netdev_dbg(dev->net, "Link Status is: %d\n", link);
- }
- } else
- data->link_counter = 0;
+ usbnet_link_change(dev, link, 0);
+ netdev_dbg(dev->net, "Link Status is: %d\n", link);
+ }
}
static const struct driver_info moschip_info = {
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 916241d..5d77644 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -426,10 +426,10 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
pr_debug("%s: short packet %i\n", dev->name, len);
dev->stats.rx_length_errors++;
- if (vi->big_packets)
- give_pages(rq, buf);
- else if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(rq, buf);
else
dev_kfree_skb(buf);
return;
@@ -1367,6 +1367,11 @@ static void virtnet_config_changed(struct virtio_device *vdev)
static void virtnet_free_queues(struct virtnet_info *vi)
{
+ int i;
+
+ for (i = 0; i < vi->max_queue_pairs; i++)
+ netif_napi_del(&vi->rq[i].napi);
+
kfree(vi->rq);
kfree(vi->sq);
}
@@ -1396,10 +1401,10 @@ static void free_unused_bufs(struct virtnet_info *vi)
struct virtqueue *vq = vi->rq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (vi->big_packets)
- give_pages(&vi->rq[i], buf);
- else if (vi->mergeable_rx_bufs)
+ if (vi->mergeable_rx_bufs)
put_page(virt_to_head_page(buf));
+ else if (vi->big_packets)
+ give_pages(&vi->rq[i], buf);
else
dev_kfree_skb(buf);
--vi->rq[i].num;
@@ -1792,16 +1797,17 @@ static int virtnet_restore(struct virtio_device *vdev)
if (err)
return err;
- if (netif_running(vi->dev))
+ if (netif_running(vi->dev)) {
+ for (i = 0; i < vi->curr_queue_pairs; i++)
+ if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
+ schedule_delayed_work(&vi->refill, 0);
+
for (i = 0; i < vi->max_queue_pairs; i++)
virtnet_napi_enable(&vi->rq[i]);
+ }
netif_device_attach(vi->dev);
- for (i = 0; i < vi->curr_queue_pairs; i++)
- if (!try_fill_recv(&vi->rq[i], GFP_KERNEL))
- schedule_delayed_work(&vi->refill, 0);
-
mutex_lock(&vi->config_lock);
vi->config_enable = true;
mutex_unlock(&vi->config_lock);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 0358c07..ed384fe 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1668,7 +1668,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
netdev_dbg(dev, "circular route to %pI4\n",
&dst->sin.sin_addr.s_addr);
dev->stats.collisions++;
- goto tx_error;
+ goto rt_tx_error;
}
/* Bypass encapsulation if the destination is local */
@@ -2440,7 +2440,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
/* update header length based on lower device */
dev->hard_header_len = lowerdev->hard_header_len +
(use_ipv6 ? VXLAN6_HEADROOM : VXLAN_HEADROOM);
- }
+ } else if (use_ipv6)
+ vxlan->flags |= VXLAN_F_IPV6;
if (data[IFLA_VXLAN_TOS])
vxlan->tos = nla_get_u8(data[IFLA_VXLAN_TOS]);
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 8d78253..a366d6b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
mask2 |= ATH9K_INT_CST;
if (isr2 & AR_ISR_S2_TSFOOR)
mask2 |= ATH9K_INT_TSFOOR;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S2, isr2);
+ isr &= ~AR_ISR_BCNMISC;
+ }
}
- isr = REG_READ(ah, AR_ISR_RAC);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)
+ isr = REG_READ(ah, AR_ISR_RAC);
+
if (isr == 0xffffffff) {
*masked = 0;
return false;
@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= ATH9K_INT_TX;
- s0_s = REG_READ(ah, AR_ISR_S0_S);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s0_s = REG_READ(ah, AR_ISR_S0_S);
+ s1_s = REG_READ(ah, AR_ISR_S1_S);
+ } else {
+ s0_s = REG_READ(ah, AR_ISR_S0);
+ REG_WRITE(ah, AR_ISR_S0, s0_s);
+ s1_s = REG_READ(ah, AR_ISR_S1);
+ REG_WRITE(ah, AR_ISR_S1, s1_s);
+
+ isr &= ~(AR_ISR_TXOK |
+ AR_ISR_TXDESC |
+ AR_ISR_TXERR |
+ AR_ISR_TXEOL);
+ }
+
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK);
ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC);
-
- s1_s = REG_READ(ah, AR_ISR_S1_S);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR);
ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL);
}
@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
*masked |= mask2;
}
- if (AR_SREV_9100(ah))
- return true;
-
- if (isr & AR_ISR_GENTMR) {
+ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) {
u32 s5_s;
- s5_s = REG_READ(ah, AR_ISR_S5_S);
+ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) {
+ s5_s = REG_READ(ah, AR_ISR_S5_S);
+ } else {
+ s5_s = REG_READ(ah, AR_ISR_S5);
+ }
+
ah->intr_gen_timer_trigger =
MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
*masked |= ATH9K_INT_TIM_TIMER;
+
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR_S5, s5_s);
+ isr &= ~AR_ISR_GENTMR;
+ }
}
+ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) {
+ REG_WRITE(ah, AR_ISR, isr);
+ REG_READ(ah, AR_ISR);
+ }
+
+ if (AR_SREV_9100(ah))
+ return true;
+
if (sync_cause) {
ath9k_debug_sync_cause(common, sync_cause);
fatal_int =
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index 1ec5235..130657d 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3984,18 +3984,20 @@ static void ar9003_hw_quick_drop_apply(struct ath_hw *ah, u16 freq)
int quick_drop;
s32 t[3], f[3] = {5180, 5500, 5785};
- if (!(pBase->miscConfiguration & BIT(1)))
+ if (!(pBase->miscConfiguration & BIT(4)))
return;
- if (freq < 4000)
- quick_drop = eep->modalHeader2G.quick_drop;
- else {
- t[0] = eep->base_ext1.quick_drop_low;
- t[1] = eep->modalHeader5G.quick_drop;
- t[2] = eep->base_ext1.quick_drop_high;
- quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+ if (AR_SREV_9300(ah) || AR_SREV_9580(ah) || AR_SREV_9340(ah)) {
+ if (freq < 4000) {
+ quick_drop = eep->modalHeader2G.quick_drop;
+ } else {
+ t[0] = eep->base_ext1.quick_drop_low;
+ t[1] = eep->modalHeader5G.quick_drop;
+ t[2] = eep->base_ext1.quick_drop_high;
+ quick_drop = ar9003_hw_power_interpolate(freq, f, t, 3);
+ }
+ REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
}
- REG_RMW_FIELD(ah, AR_PHY_AGC, AR_PHY_AGC_QUICK_DROP, quick_drop);
}
static void ar9003_hw_txend_to_xpa_off_apply(struct ath_hw *ah, bool is2ghz)
@@ -4035,7 +4037,7 @@ static void ar9003_hw_xlna_bias_strength_apply(struct ath_hw *ah, bool is2ghz)
struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
u8 bias;
- if (!(eep->baseEepHeader.featureEnable & 0x40))
+ if (!(eep->baseEepHeader.miscConfiguration & 0x40))
return;
if (!AR_SREV_9300(ah))
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 9a2657f..608d739 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -127,21 +127,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
struct ath9k_vif_iter_data *iter_data = data;
int i;
- for (i = 0; i < ETH_ALEN; i++)
- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ if (iter_data->hw_macaddr != NULL) {
+ for (i = 0; i < ETH_ALEN; i++)
+ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]);
+ } else {
+ iter_data->hw_macaddr = mac;
+ }
}
-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
+static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath9k_hw_common(priv->ah);
struct ath9k_vif_iter_data iter_data;
/*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
*/
- iter_data.hw_macaddr = common->macaddr;
+ iter_data.hw_macaddr = NULL;
memset(&iter_data.mask, 0xff, ETH_ALEN);
if (vif)
@@ -153,6 +158,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv,
ath9k_htc_bssid_iter, &iter_data);
memcpy(common->bssidmask, iter_data.mask, ETH_ALEN);
+
+ if (iter_data.hw_macaddr)
+ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN);
+
ath_hw_setbssidmask(common);
}
@@ -1063,7 +1072,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
goto out;
}
- ath9k_htc_set_bssid_mask(priv, vif);
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
priv->vif_slot |= (1 << avp->index);
priv->nvifs++;
@@ -1128,7 +1137,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
ath9k_htc_set_opmode(priv);
- ath9k_htc_set_bssid_mask(priv, vif);
+ ath9k_htc_set_mac_bssid_mask(priv, vif);
/*
* Stop ANI only if there are no associated station interfaces.
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 54b0415..8918035 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -146,10 +146,9 @@ static void ath9k_hw_set_clockrate(struct ath_hw *ah)
else
clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
- if (IS_CHAN_HT40(chan))
- clockrate *= 2;
-
- if (ah->curchan) {
+ if (chan) {
+ if (IS_CHAN_HT40(chan))
+ clockrate *= 2;
if (IS_CHAN_HALF_RATE(chan))
clockrate /= 2;
if (IS_CHAN_QUARTER_RATE(chan))
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 74f452c..21aa09e 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -965,8 +965,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw,
struct ath_common *common = ath9k_hw_common(ah);
/*
- * Use the hardware MAC address as reference, the hardware uses it
- * together with the BSSID mask when matching addresses.
+ * Pick the MAC address of the first interface as the new hardware
+ * MAC address. The hardware will use it together with the BSSID mask
+ * when matching addresses.
*/
memset(iter_data, 0, sizeof(*iter_data));
memset(&iter_data->mask, 0xff, ETH_ALEN);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 09cdbcd..b5a19e0 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -1276,6 +1276,10 @@ static void ath_tx_fill_desc(struct ath_softc *sc, struct ath_buf *bf,
if (!rts_thresh || (len > rts_thresh))
rts = true;
}
+
+ if (!aggr)
+ len = fi->framelen;
+
ath_buf_set_rate(sc, bf, &info, len, rts);
}
diff --git a/drivers/net/wireless/ath/wcn36xx/smd.c b/drivers/net/wireless/ath/wcn36xx/smd.c
index de9eb2c..3663394 100644
--- a/drivers/net/wireless/ath/wcn36xx/smd.c
+++ b/drivers/net/wireless/ath/wcn36xx/smd.c
@@ -2041,13 +2041,20 @@ static void wcn36xx_smd_rsp_process(struct wcn36xx *wcn, void *buf, size_t len)
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
mutex_lock(&wcn->hal_ind_mutex);
msg_ind = kmalloc(sizeof(*msg_ind), GFP_KERNEL);
- msg_ind->msg_len = len;
- msg_ind->msg = kmalloc(len, GFP_KERNEL);
- memcpy(msg_ind->msg, buf, len);
- list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
- queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
- wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ if (msg_ind) {
+ msg_ind->msg_len = len;
+ msg_ind->msg = kmalloc(len, GFP_KERNEL);
+ memcpy(msg_ind->msg, buf, len);
+ list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
+ queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
+ wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
+ }
mutex_unlock(&wcn->hal_ind_mutex);
+ if (msg_ind)
+ break;
+ /* FIXME: Do something smarter then just printing an error. */
+ wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
+ msg_header->msg_type);
break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
diff --git a/drivers/net/wireless/brcm80211/Kconfig b/drivers/net/wireless/brcm80211/Kconfig
index b00a7e9..54e36fc 100644
--- a/drivers/net/wireless/brcm80211/Kconfig
+++ b/drivers/net/wireless/brcm80211/Kconfig
@@ -5,6 +5,8 @@ config BRCMSMAC
tristate "Broadcom IEEE802.11n PCIe SoftMAC WLAN driver"
depends on MAC80211
depends on BCMA
+ select NEW_LEDS if BCMA_DRIVER_GPIO
+ select LEDS_CLASS if BCMA_DRIVER_GPIO
select BRCMUTIL
select FW_LOADER
select CRC_CCITT
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
index 905704e..abc9cec 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh_sdmmc.c
@@ -109,6 +109,8 @@ static inline int brcmf_sdioh_f0_write_byte(struct brcmf_sdio_dev *sdiodev,
brcmf_err("Disable F2 failed:%d\n",
err_ret);
}
+ } else {
+ err_ret = -ENOENT;
}
} else if ((regaddr == SDIO_CCCR_ABORT) ||
(regaddr == SDIO_CCCR_IENx)) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-7000.c b/drivers/net/wireless/iwlwifi/iwl-7000.c
index 85879db..3c34a72 100644
--- a/drivers/net/wireless/iwlwifi/iwl-7000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-7000.c
@@ -67,8 +67,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
-#define IWL7260_UCODE_API_MAX 7
-#define IWL3160_UCODE_API_MAX 7
+#define IWL7260_UCODE_API_MAX 8
+#define IWL3160_UCODE_API_MAX 8
/* Oldest version we won't warn about */
#define IWL7260_UCODE_API_OK 7
@@ -130,6 +130,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
@@ -140,6 +141,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.high_temp = true,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl7260_2n_cfg = {
@@ -149,6 +151,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl7260_n_cfg = {
@@ -158,6 +161,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl3160_2ac_cfg = {
@@ -167,6 +171,7 @@ const struct iwl_cfg iwl3160_2ac_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3160_NVM_VERSION,
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl3160_2n_cfg = {
@@ -176,6 +181,7 @@ const struct iwl_cfg iwl3160_2n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3160_NVM_VERSION,
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl3160_n_cfg = {
@@ -185,6 +191,7 @@ const struct iwl_cfg iwl3160_n_cfg = {
.ht_params = &iwl7000_ht_params,
.nvm_ver = IWL3160_NVM_VERSION,
.nvm_calib_ver = IWL3160_TX_POWER_VERSION,
+ .host_interrupt_operation_mode = true,
};
const struct iwl_cfg iwl7265_2ac_cfg = {
@@ -196,5 +203,23 @@ const struct iwl_cfg iwl7265_2ac_cfg = {
.nvm_calib_ver = IWL7265_TX_POWER_VERSION,
};
+const struct iwl_cfg iwl7265_2n_cfg = {
+ .name = "Intel(R) Dual Band Wireless N 7265",
+ .fw_name_pre = IWL7265_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7265_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
+const struct iwl_cfg iwl7265_n_cfg = {
+ .name = "Intel(R) Wireless N 7265",
+ .fw_name_pre = IWL7265_FW_PRE,
+ IWL_DEVICE_7000,
+ .ht_params = &iwl7000_ht_params,
+ .nvm_ver = IWL7265_NVM_VERSION,
+ .nvm_calib_ver = IWL7265_TX_POWER_VERSION,
+};
+
MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
diff --git a/drivers/net/wireless/iwlwifi/iwl-config.h b/drivers/net/wireless/iwlwifi/iwl-config.h
index 18f232e..03fd9aa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-config.h
+++ b/drivers/net/wireless/iwlwifi/iwl-config.h
@@ -207,6 +207,8 @@ struct iwl_eeprom_params {
* @rx_with_siso_diversity: 1x1 device with rx antenna diversity
* @internal_wimax_coex: internal wifi/wimax combo device
* @high_temp: Is this NIC is designated to be in high temperature.
+ * @host_interrupt_operation_mode: device needs host interrupt operation
+ * mode set
*
* We enable the driver to be backward compatible wrt. hardware features.
* API differences in uCode shouldn't be handled here but through TLVs
@@ -235,6 +237,7 @@ struct iwl_cfg {
enum iwl_led_mode led_mode;
const bool rx_with_siso_diversity;
const bool internal_wimax_coex;
+ const bool host_interrupt_operation_mode;
bool high_temp;
};
@@ -294,6 +297,8 @@ extern const struct iwl_cfg iwl3160_2ac_cfg;
extern const struct iwl_cfg iwl3160_2n_cfg;
extern const struct iwl_cfg iwl3160_n_cfg;
extern const struct iwl_cfg iwl7265_2ac_cfg;
+extern const struct iwl_cfg iwl7265_2n_cfg;
+extern const struct iwl_cfg iwl7265_n_cfg;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 54a4fdc..da4eca8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -495,14 +495,11 @@ enum secure_load_status_reg {
* the CSR_INT_COALESCING is an 8 bit register in 32-usec unit
*
* default interrupt coalescing timer is 64 x 32 = 2048 usecs
- * default interrupt coalescing calibration timer is 16 x 32 = 512 usecs
*/
#define IWL_HOST_INT_TIMEOUT_MAX (0xFF)
#define IWL_HOST_INT_TIMEOUT_DEF (0x40)
#define IWL_HOST_INT_TIMEOUT_MIN (0x0)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MAX (0xFF)
-#define IWL_HOST_INT_CALIB_TIMEOUT_DEF (0x10)
-#define IWL_HOST_INT_CALIB_TIMEOUT_MIN (0x0)
+#define IWL_HOST_INT_OPER_MODE BIT(31)
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
diff --git a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
index 5d066cb..75b72a9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/bt-coex.c
@@ -391,7 +391,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_LUT |
BT_VALID_WIFI_RX_SW_PRIO_BOOST |
BT_VALID_WIFI_TX_SW_PRIO_BOOST |
- BT_VALID_MULTI_PRIO_LUT |
BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40 |
BT_VALID_ANT_ISOLATION |
@@ -842,6 +841,11 @@ static void iwl_mvm_bt_rssi_iterator(void *_data, u8 *mac,
sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id],
lockdep_is_held(&mvm->mutex));
+
+ /* This can happen if the station has been removed right now */
+ if (IS_ERR_OR_NULL(sta))
+ return;
+
mvmsta = (void *)sta->drv_priv;
data->num_bss_ifaces++;
diff --git a/drivers/net/wireless/iwlwifi/mvm/d3.c b/drivers/net/wireless/iwlwifi/mvm/d3.c
index 6f45966..b9b81e8 100644
--- a/drivers/net/wireless/iwlwifi/mvm/d3.c
+++ b/drivers/net/wireless/iwlwifi/mvm/d3.c
@@ -895,7 +895,7 @@ static int iwl_mvm_get_last_nonqos_seq(struct iwl_mvm *mvm,
/* new API returns next, not last-used seqno */
if (mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_D3_CONTINUITY_API)
- err -= 0x10;
+ err = (u16) (err - 0x10);
}
iwl_free_resp(&cmd);
@@ -1549,7 +1549,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
if (gtkdata.unhandled_cipher)
return false;
if (!gtkdata.num_keys)
- return true;
+ goto out;
if (!gtkdata.last_gtk)
return false;
@@ -1600,6 +1600,7 @@ static bool iwl_mvm_setup_connection_keep(struct iwl_mvm *mvm,
(void *)&replay_ctr, GFP_KERNEL);
}
+out:
mvmvif->seqno_valid = true;
/* +0x10 because the set API expects next-to-use, not last-used */
mvmvif->seqno = le16_to_cpu(status->non_qos_seq_ctr) + 0x10;
diff --git a/drivers/net/wireless/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
index 9864d71..a8fe6b4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/debugfs.c
+++ b/drivers/net/wireless/iwlwifi/mvm/debugfs.c
@@ -119,6 +119,10 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct file *file,
if (sscanf(buf, "%d %d", &sta_id, &drain) != 2)
return -EINVAL;
+ if (sta_id < 0 || sta_id >= IWL_MVM_STATION_COUNT)
+ return -EINVAL;
+ if (drain < 0 || drain > 1)
+ return -EINVAL;
mutex_lock(&mvm->mutex);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index 33cf56f..95ce4b6 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -176,8 +176,11 @@ static void iwl_mvm_te_handle_notif(struct iwl_mvm *mvm,
* P2P Device discoveribility, while there are other higher priority
* events in the system).
*/
- if (WARN_ONCE(!le32_to_cpu(notif->status),
- "Failed to schedule time event\n")) {
+ if (!le32_to_cpu(notif->status)) {
+ bool start = le32_to_cpu(notif->action) &
+ TE_V2_NOTIF_HOST_EVENT_START;
+ IWL_WARN(mvm, "Time Event %s notification failure\n",
+ start ? "start" : "end");
if (iwl_mvm_te_check_disconnect(mvm, te_data->vif, NULL)) {
iwl_mvm_te_clear_data(mvm, te_data);
return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 941c0c8..8660502 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -353,6 +353,27 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
/* 7265 Series */
{IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5110, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5310, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5302, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5012, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x500A, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5200, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5002, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5202, iwl7265_n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9010, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5020, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x502A, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5420, iwl7265_2n_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5090, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095B, 0x5290, iwl7265_2ac_cfg)},
+ {IWL_PCI_DEVICE(0x095A, 0x5490, iwl7265_2ac_cfg)},
#endif /* CONFIG_IWLMVM */
{0}
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index fa22639..051268c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -477,4 +477,12 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
+static inline void iwl_nic_error(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ set_bit(STATUS_FW_ERROR, &trans_pcie->status);
+ iwl_op_mode_nic_error(trans->op_mode);
+}
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 3f237b4..be3995a 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -489,6 +489,10 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
/* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
+
+ /* W/A for interrupt coalescing bug in 7260 and 3160 */
+ if (trans->cfg->host_interrupt_operation_mode)
+ iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
}
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
@@ -796,12 +800,13 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
iwl_pcie_dump_csr(trans);
iwl_dump_fh(trans, NULL);
+ /* set the ERROR bit before we wake up the caller */
set_bit(STATUS_FW_ERROR, &trans_pcie->status);
clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
wake_up(&trans_pcie->wait_command_queue);
local_bh_disable();
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_nic_error(trans);
local_bh_enable();
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 5d9337b..cde9c16 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -279,9 +279,6 @@ static int iwl_pcie_nic_init(struct iwl_trans *trans)
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
iwl_pcie_apm_init(trans);
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
iwl_pcie_set_pwr(trans, false);
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 059c5ac..0adde91 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -207,7 +207,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "scratch %d = 0x%08x\n", i,
le32_to_cpu(txq->scratchbufs[i].scratch));
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_nic_error(trans);
}
/*
@@ -1023,7 +1023,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, q->write_ptr, q->read_ptr);
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_nic_error(trans);
}
}
@@ -1562,7 +1562,7 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
get_cmd_string(trans_pcie, cmd->id));
ret = -ETIMEDOUT;
- iwl_op_mode_nic_error(trans->op_mode);
+ iwl_nic_error(trans);
goto cancel;
}
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 9df7bc9..c72438b 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -383,6 +383,14 @@ struct hwsim_radiotap_hdr {
__le16 rt_chbitmask;
} __packed;
+struct hwsim_radiotap_ack_hdr {
+ struct ieee80211_radiotap_header hdr;
+ u8 rt_flags;
+ u8 pad;
+ __le16 rt_channel;
+ __le16 rt_chbitmask;
+} __packed;
+
/* MAC80211_HWSIM netlinf family */
static struct genl_family hwsim_genl_family = {
.id = GENL_ID_GENERATE,
@@ -500,7 +508,7 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
const u8 *addr)
{
struct sk_buff *skb;
- struct hwsim_radiotap_hdr *hdr;
+ struct hwsim_radiotap_ack_hdr *hdr;
u16 flags;
struct ieee80211_hdr *hdr11;
@@ -511,14 +519,14 @@ static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan,
if (skb == NULL)
return;
- hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr));
+ hdr = (struct hwsim_radiotap_ack_hdr *) skb_put(skb, sizeof(*hdr));
hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
hdr->hdr.it_pad = 0;
hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
(1 << IEEE80211_RADIOTAP_CHANNEL));
hdr->rt_flags = 0;
- hdr->rt_rate = 0;
+ hdr->pad = 0;
hdr->rt_channel = cpu_to_le16(chan->center_freq);
flags = IEEE80211_CHAN_2GHZ;
hdr->rt_chbitmask = cpu_to_le16(flags);
@@ -1230,7 +1238,7 @@ static void mac80211_hwsim_bss_info_changed(struct ieee80211_hw *hw,
HRTIMER_MODE_REL);
} else if (!info->enable_beacon) {
unsigned int count = 0;
- ieee80211_iterate_active_interfaces(
+ ieee80211_iterate_active_interfaces_atomic(
data->hw, IEEE80211_IFACE_ITER_NORMAL,
mac80211_hwsim_bcn_en_iter, &count);
wiphy_debug(hw->wiphy, " beaconing vifs remaining: %u",
diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c
index c8e029d..a09398f 100644
--- a/drivers/net/wireless/mwifiex/sta_ioctl.c
+++ b/drivers/net/wireless/mwifiex/sta_ioctl.c
@@ -319,8 +319,8 @@ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss,
if (bss_desc && bss_desc->ssid.ssid_len &&
(!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor.
ssid, &bss_desc->ssid))) {
- kfree(bss_desc);
- return 0;
+ ret = 0;
+ goto done;
}
/* Exit Adhoc mode first */
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 0f49444..5a53195 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -740,6 +740,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
};
int index = rtlpci->rx_ring[rx_queue_idx].idx;
+ if (rtlpci->driver_is_goingto_unload)
+ return;
/*RX NORMAL PKT */
while (count--) {
/*rx descriptor */
@@ -1636,6 +1638,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
*/
set_hal_stop(rtlhal);
+ rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->disable_interrupt(hw);
cancel_work_sync(&rtlpriv->works.lps_change_work);
@@ -1653,7 +1656,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw)
ppsc->rfchange_inprogress = true;
spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
- rtlpci->driver_is_goingto_unload = true;
rtlpriv->cfg->ops->hw_disable(hw);
/* some things are not needed if firmware not available */
if (!rtlpriv->max_fw_size)
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 08ae01b..c47794b 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -101,6 +101,13 @@ struct xenvif_rx_meta {
#define MAX_PENDING_REQS 256
+/* It's possible for an skb to have a maximal number of frags
+ * but still be less than MAX_BUFFER_OFFSET in size. Thus the
+ * worst-case number of copy operations is MAX_SKB_FRAGS per
+ * ring slot.
+ */
+#define MAX_GRANT_COPY_OPS (MAX_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE)
+
struct xenvif {
/* Unique identifier for this interface. */
domid_t domid;
@@ -143,13 +150,13 @@ struct xenvif {
*/
RING_IDX rx_req_cons_peek;
- /* Given MAX_BUFFER_OFFSET of 4096 the worst case is that each
- * head/fragment page uses 2 copy operations because it
- * straddles two buffers in the frontend.
- */
- struct gnttab_copy grant_copy_op[2*XEN_NETIF_RX_RING_SIZE];
- struct xenvif_rx_meta meta[2*XEN_NETIF_RX_RING_SIZE];
+ /* This array is allocated seperately as it is large */
+ struct gnttab_copy *grant_copy_op;
+ /* We create one meta structure per ring request we consume, so
+ * the maximum number is the same as the ring size.
+ */
+ struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE];
u8 fe_dev_addr[6];
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 2329ccc..fff8cdd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/if_vlan.h>
+#include <linux/vmalloc.h>
#include <xen/events.h>
#include <asm/xen/hypercall.h>
@@ -307,6 +308,15 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
SET_NETDEV_DEV(dev, parent);
vif = netdev_priv(dev);
+
+ vif->grant_copy_op = vmalloc(sizeof(struct gnttab_copy) *
+ MAX_GRANT_COPY_OPS);
+ if (vif->grant_copy_op == NULL) {
+ pr_warn("Could not allocate grant copy space for %s\n", name);
+ free_netdev(dev);
+ return ERR_PTR(-ENOMEM);
+ }
+
vif->domid = domid;
vif->handle = handle;
vif->can_sg = 1;
@@ -368,11 +378,11 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
unsigned long rx_ring_ref, unsigned int tx_evtchn,
unsigned int rx_evtchn)
{
+ struct task_struct *task;
int err = -ENOMEM;
- /* Already connected through? */
- if (vif->tx_irq)
- return 0;
+ BUG_ON(vif->tx_irq);
+ BUG_ON(vif->task);
err = xenvif_map_frontend_rings(vif, tx_ring_ref, rx_ring_ref);
if (err < 0)
@@ -411,14 +421,16 @@ int xenvif_connect(struct xenvif *vif, unsigned long tx_ring_ref,
}
init_waitqueue_head(&vif->wq);
- vif->task = kthread_create(xenvif_kthread,
- (void *)vif, "%s", vif->dev->name);
- if (IS_ERR(vif->task)) {
+ task = kthread_create(xenvif_kthread,
+ (void *)vif, "%s", vif->dev->name);
+ if (IS_ERR(task)) {
pr_warn("Could not allocate kthread for %s\n", vif->dev->name);
- err = PTR_ERR(vif->task);
+ err = PTR_ERR(task);
goto err_rx_unbind;
}
+ vif->task = task;
+
rtnl_lock();
if (!vif->can_sg && vif->dev->mtu > ETH_DATA_LEN)
dev_set_mtu(vif->dev, ETH_DATA_LEN);
@@ -461,8 +473,10 @@ void xenvif_disconnect(struct xenvif *vif)
if (netif_carrier_ok(vif->dev))
xenvif_carrier_off(vif);
- if (vif->task)
+ if (vif->task) {
kthread_stop(vif->task);
+ vif->task = NULL;
+ }
if (vif->tx_irq) {
if (vif->tx_irq == vif->rx_irq)
@@ -483,6 +497,7 @@ void xenvif_free(struct xenvif *vif)
unregister_netdev(vif->dev);
+ vfree(vif->grant_copy_op);
free_netdev(vif->dev);
module_put(THIS_MODULE);
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 64f0e0d..7842555 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -452,7 +452,7 @@ static int xenvif_gop_skb(struct sk_buff *skb,
}
/* Set up a GSO prefix descriptor, if necessary */
- if ((1 << skb_shinfo(skb)->gso_type) & vif->gso_prefix_mask) {
+ if ((1 << gso_type) & vif->gso_prefix_mask) {
req = RING_GET_REQUEST(&vif->rx, vif->rx.req_cons++);
meta = npo->meta + npo->meta_prod++;
meta->gso_type = gso_type;
@@ -608,7 +608,7 @@ void xenvif_rx_action(struct xenvif *vif)
if (!npo.copy_prod)
return;
- BUG_ON(npo.copy_prod > ARRAY_SIZE(vif->grant_copy_op));
+ BUG_ON(npo.copy_prod > MAX_GRANT_COPY_OPS);
gnttab_batch_copy(vif->grant_copy_op, npo.copy_prod);
while ((skb = __skb_dequeue(&rxq)) != NULL) {
@@ -1149,75 +1149,99 @@ static int xenvif_set_skb_gso(struct xenvif *vif,
return 0;
}
-static inline void maybe_pull_tail(struct sk_buff *skb, unsigned int len)
+static inline int maybe_pull_tail(struct sk_buff *skb, unsigned int len,
+ unsigned int max)
{
- if (skb_is_nonlinear(skb) && skb_headlen(skb) < len) {
- /* If we need to pullup then pullup to the max, so we
- * won't need to do it again.
- */
- int target = min_t(int, skb->len, MAX_TCP_HEADER);
- __pskb_pull_tail(skb, target - skb_headlen(skb));
- }
+ if (skb_headlen(skb) >= len)
+ return 0;
+
+ /* If we need to pullup then pullup to the max, so we
+ * won't need to do it again.
+ */
+ if (max > skb->len)
+ max = skb->len;
+
+ if (__pskb_pull_tail(skb, max - skb_headlen(skb)) == NULL)
+ return -ENOMEM;
+
+ if (skb_headlen(skb) < len)
+ return -EPROTO;
+
+ return 0;
}
+/* This value should be large enough to cover a tagged ethernet header plus
+ * maximally sized IP and TCP or UDP headers.
+ */
+#define MAX_IP_HDR_LEN 128
+
static int checksum_setup_ip(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum)
{
- struct iphdr *iph = (void *)skb->data;
- unsigned int header_size;
unsigned int off;
- int err = -EPROTO;
+ bool fragment;
+ int err;
- off = sizeof(struct iphdr);
+ fragment = false;
- header_size = skb->network_header + off + MAX_IPOPTLEN;
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb,
+ sizeof(struct iphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
- off = iph->ihl * 4;
+ if (ip_hdr(skb)->frag_off & htons(IP_OFFSET | IP_MF))
+ fragment = true;
- switch (iph->protocol) {
- case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
- goto out;
+ off = ip_hdrlen(skb);
- if (recalculate_partial_csum) {
- struct tcphdr *tcph = tcp_hdr(skb);
+ err = -EPROTO;
+
+ if (fragment)
+ goto out;
- header_size = skb->network_header +
- off +
- sizeof(struct tcphdr);
- maybe_pull_tail(skb, header_size);
+ switch (ip_hdr(skb)->protocol) {
+ case IPPROTO_TCP:
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct tcphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
+ goto out;
- tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ tcp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct udphdr),
+ MAX_IP_HDR_LEN);
+ if (err < 0)
goto out;
- if (recalculate_partial_csum) {
- struct udphdr *udph = udp_hdr(skb);
-
- header_size = skb->network_header +
- off +
- sizeof(struct udphdr);
- maybe_pull_tail(skb, header_size);
-
- udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ udp_hdr(skb)->check =
+ ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
+ ip_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
break;
default:
- if (net_ratelimit())
- netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, "
- "dropping a protocol %d packet\n",
- iph->protocol);
goto out;
}
@@ -1227,121 +1251,142 @@ out:
return err;
}
+/* This value should be large enough to cover a tagged ethernet header plus
+ * an IPv6 header, all options, and a maximal TCP or UDP header.
+ */
+#define MAX_IPV6_HDR_LEN 256
+
+#define OPT_HDR(type, skb, off) \
+ (type *)(skb_network_header(skb) + (off))
+
static int checksum_setup_ipv6(struct xenvif *vif, struct sk_buff *skb,
int recalculate_partial_csum)
{
- int err = -EPROTO;
- struct ipv6hdr *ipv6h = (void *)skb->data;
+ int err;
u8 nexthdr;
- unsigned int header_size;
unsigned int off;
+ unsigned int len;
bool fragment;
bool done;
+ fragment = false;
done = false;
off = sizeof(struct ipv6hdr);
- header_size = skb->network_header + off;
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb, off, MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
- nexthdr = ipv6h->nexthdr;
+ nexthdr = ipv6_hdr(skb)->nexthdr;
- while ((off <= sizeof(struct ipv6hdr) + ntohs(ipv6h->payload_len)) &&
- !done) {
+ len = sizeof(struct ipv6hdr) + ntohs(ipv6_hdr(skb)->payload_len);
+ while (off <= len && !done) {
switch (nexthdr) {
case IPPROTO_DSTOPTS:
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING: {
- struct ipv6_opt_hdr *hp = (void *)(skb->data + off);
+ struct ipv6_opt_hdr *hp;
- header_size = skb->network_header +
- off +
- sizeof(struct ipv6_opt_hdr);
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb,
+ off +
+ sizeof(struct ipv6_opt_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+ hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
nexthdr = hp->nexthdr;
off += ipv6_optlen(hp);
break;
}
case IPPROTO_AH: {
- struct ip_auth_hdr *hp = (void *)(skb->data + off);
+ struct ip_auth_hdr *hp;
- header_size = skb->network_header +
- off +
- sizeof(struct ip_auth_hdr);
- maybe_pull_tail(skb, header_size);
+ err = maybe_pull_tail(skb,
+ off +
+ sizeof(struct ip_auth_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+ hp = OPT_HDR(struct ip_auth_hdr, skb, off);
nexthdr = hp->nexthdr;
- off += (hp->hdrlen+2)<<2;
+ off += ipv6_authlen(hp);
+ break;
+ }
+ case IPPROTO_FRAGMENT: {
+ struct frag_hdr *hp;
+
+ err = maybe_pull_tail(skb,
+ off +
+ sizeof(struct frag_hdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
+ goto out;
+
+ hp = OPT_HDR(struct frag_hdr, skb, off);
+
+ if (hp->frag_off & htons(IP6_OFFSET | IP6_MF))
+ fragment = true;
+
+ nexthdr = hp->nexthdr;
+ off += sizeof(struct frag_hdr);
break;
}
- case IPPROTO_FRAGMENT:
- fragment = true;
- /* fall through */
default:
done = true;
break;
}
}
- if (!done) {
- if (net_ratelimit())
- netdev_err(vif->dev, "Failed to parse packet header\n");
- goto out;
- }
+ err = -EPROTO;
- if (fragment) {
- if (net_ratelimit())
- netdev_err(vif->dev, "Packet is a fragment!\n");
+ if (!done || fragment)
goto out;
- }
switch (nexthdr) {
case IPPROTO_TCP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct tcphdr, check)))
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct tcphdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
goto out;
- if (recalculate_partial_csum) {
- struct tcphdr *tcph = tcp_hdr(skb);
-
- header_size = skb->network_header +
- off +
- sizeof(struct tcphdr);
- maybe_pull_tail(skb, header_size);
-
- tcph->check = ~csum_ipv6_magic(&ipv6h->saddr,
- &ipv6h->daddr,
- skb->len - off,
- IPPROTO_TCP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct tcphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ tcp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_TCP, 0);
break;
case IPPROTO_UDP:
- if (!skb_partial_csum_set(skb, off,
- offsetof(struct udphdr, check)))
+ err = maybe_pull_tail(skb,
+ off + sizeof(struct udphdr),
+ MAX_IPV6_HDR_LEN);
+ if (err < 0)
goto out;
- if (recalculate_partial_csum) {
- struct udphdr *udph = udp_hdr(skb);
-
- header_size = skb->network_header +
- off +
- sizeof(struct udphdr);
- maybe_pull_tail(skb, header_size);
-
- udph->check = ~csum_ipv6_magic(&ipv6h->saddr,
- &ipv6h->daddr,
- skb->len - off,
- IPPROTO_UDP, 0);
+ if (!skb_partial_csum_set(skb, off,
+ offsetof(struct udphdr, check))) {
+ err = -EPROTO;
+ goto out;
}
+
+ if (recalculate_partial_csum)
+ udp_hdr(skb)->check =
+ ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+ &ipv6_hdr(skb)->daddr,
+ skb->len - off,
+ IPPROTO_UDP, 0);
break;
default:
- if (net_ratelimit())
- netdev_err(vif->dev,
- "Attempting to checksum a non-TCP/UDP packet, "
- "dropping a protocol %d packet\n",
- nexthdr);
goto out;
}
@@ -1411,14 +1456,15 @@ static bool tx_credit_exceeded(struct xenvif *vif, unsigned size)
return false;
}
-static unsigned xenvif_tx_build_gops(struct xenvif *vif)
+static unsigned xenvif_tx_build_gops(struct xenvif *vif, int budget)
{
struct gnttab_copy *gop = vif->tx_copy_ops, *request_gop;
struct sk_buff *skb;
int ret;
while ((nr_pending_reqs(vif) + XEN_NETBK_LEGACY_SLOTS_MAX
- < MAX_PENDING_REQS)) {
+ < MAX_PENDING_REQS) &&
+ (skb_queue_len(&vif->tx_queue) < budget)) {
struct xen_netif_tx_request txreq;
struct xen_netif_tx_request txfrags[XEN_NETBK_LEGACY_SLOTS_MAX];
struct page *page;
@@ -1440,7 +1486,7 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
continue;
}
- RING_FINAL_CHECK_FOR_REQUESTS(&vif->tx, work_to_do);
+ work_to_do = RING_HAS_UNCONSUMED_REQUESTS(&vif->tx);
if (!work_to_do)
break;
@@ -1580,14 +1626,13 @@ static unsigned xenvif_tx_build_gops(struct xenvif *vif)
}
-static int xenvif_tx_submit(struct xenvif *vif, int budget)
+static int xenvif_tx_submit(struct xenvif *vif)
{
struct gnttab_copy *gop = vif->tx_copy_ops;
struct sk_buff *skb;
int work_done = 0;
- while (work_done < budget &&
- (skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
+ while ((skb = __skb_dequeue(&vif->tx_queue)) != NULL) {
struct xen_netif_tx_request *txp;
u16 pending_idx;
unsigned data_len;
@@ -1662,14 +1707,14 @@ int xenvif_tx_action(struct xenvif *vif, int budget)
if (unlikely(!tx_work_todo(vif)))
return 0;
- nr_gops = xenvif_tx_build_gops(vif);
+ nr_gops = xenvif_tx_build_gops(vif, budget);
if (nr_gops == 0)
return 0;
gnttab_batch_copy(vif->tx_copy_ops, nr_gops);
- work_done = xenvif_tx_submit(vif, nr_gops);
+ work_done = xenvif_tx_submit(vif);
return work_done;
}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index de6f899..c6973f1 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -20,7 +20,7 @@ config OF_SELFTEST
depends on OF_IRQ
help
This option builds in test cases for the device tree infrastructure
- that are executed one at boot time, and the results dumped to the
+ that are executed once at boot time, and the results dumped to the
console.
If unsure, say N here, but this option is safe to enable.
diff --git a/drivers/of/address.c b/drivers/of/address.c
index 4b9317b..d3dd41c 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -69,14 +69,6 @@ static u64 of_bus_default_map(__be32 *addr, const __be32 *range,
(unsigned long long)cp, (unsigned long long)s,
(unsigned long long)da);
- /*
- * If the number of address cells is larger than 2 we assume the
- * mapping doesn't specify a physical address. Rather, the address
- * specifies an identifier that must match exactly.
- */
- if (na > 2 && memcmp(range, addr, na * 4) != 0)
- return OF_BAD_ADDR;
-
if (da < cp || da >= (cp + s))
return OF_BAD_ADDR;
return da - cp;
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 2fa024b..758b4f8 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -922,8 +922,16 @@ void __init unflatten_device_tree(void)
*/
void __init unflatten_and_copy_device_tree(void)
{
- int size = __be32_to_cpu(initial_boot_params->totalsize);
- void *dt = early_init_dt_alloc_memory_arch(size,
+ int size;
+ void *dt;
+
+ if (!initial_boot_params) {
+ pr_warn("No valid device tree found, continuing without\n");
+ return;
+ }
+
+ size = __be32_to_cpu(initial_boot_params->totalsize);
+ dt = early_init_dt_alloc_memory_arch(size,
__alignof__(struct boot_param_header));
if (dt) {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 786b0b4..2721240 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -165,7 +165,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
if (of_get_property(ipar, "interrupt-controller", NULL) !=
NULL) {
pr_debug(" -> got it !\n");
- of_node_put(old);
return 0;
}
@@ -250,8 +249,7 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
* Successfully parsed an interrrupt-map translation; copy new
* interrupt specifier into the out_irq structure
*/
- of_node_put(out_irq->np);
- out_irq->np = of_node_get(newpar);
+ out_irq->np = newpar;
match_array = imap - newaddrsize - newintsize;
for (i = 0; i < newintsize; i++)
@@ -268,7 +266,6 @@ int of_irq_parse_raw(const __be32 *addr, struct of_phandle_args *out_irq)
}
fail:
of_node_put(ipar);
- of_node_put(out_irq->np);
of_node_put(newpar);
return -EINVAL;
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index c269e43..2aa7b77c 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -447,6 +447,11 @@ static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
*value = 0;
break;
+ case PCI_INTERRUPT_LINE:
+ /* LINE PIN MIN_GNT MAX_LAT */
+ *value = 0;
+ break;
+
default:
*value = 0xffffffff;
return PCIBIOS_BAD_REGISTER_NUMBER;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 1cf605f..e864392 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -279,7 +279,9 @@ static acpi_status register_slot(acpi_handle handle, u32 lvl, void *data,
status = acpi_evaluate_integer(handle, "_ADR", NULL, &adr);
if (ACPI_FAILURE(status)) {
- acpi_handle_warn(handle, "can't evaluate _ADR (%#x)\n", status);
+ if (status != AE_NOT_FOUND)
+ acpi_handle_warn(handle,
+ "can't evaluate _ADR (%#x)\n", status);
return AE_OK;
}
@@ -643,6 +645,24 @@ static void disable_slot(struct acpiphp_slot *slot)
slot->flags &= (~SLOT_ENABLED);
}
+static bool acpiphp_no_hotplug(acpi_handle handle)
+{
+ struct acpi_device *adev = NULL;
+
+ acpi_bus_get_device(handle, &adev);
+ return adev && adev->flags.no_hotplug;
+}
+
+static bool slot_no_hotplug(struct acpiphp_slot *slot)
+{
+ struct acpiphp_func *func;
+
+ list_for_each_entry(func, &slot->funcs, sibling)
+ if (acpiphp_no_hotplug(func_to_handle(func)))
+ return true;
+
+ return false;
+}
/**
* get_slot_status - get ACPI slot status
@@ -701,7 +721,8 @@ static void trim_stale_devices(struct pci_dev *dev)
unsigned long long sta;
status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
- alive = ACPI_SUCCESS(status) && sta == ACPI_STA_ALL;
+ alive = (ACPI_SUCCESS(status) && sta == ACPI_STA_ALL)
+ || acpiphp_no_hotplug(handle);
}
if (!alive) {
u32 v;
@@ -741,8 +762,9 @@ static void acpiphp_check_bridge(struct acpiphp_bridge *bridge)
struct pci_dev *dev, *tmp;
mutex_lock(&slot->crit_sect);
- /* wake up all functions */
- if (get_slot_status(slot) == ACPI_STA_ALL) {
+ if (slot_no_hotplug(slot)) {
+ ; /* do nothing */
+ } else if (get_slot_status(slot) == ACPI_STA_ALL) {
/* remove stale devices if any */
list_for_each_entry_safe(dev, tmp, &bus->devices,
bus_list)
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 577074e..f7ebdba 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -330,29 +330,32 @@ static int acpi_pci_find_device(struct device *dev, acpi_handle *handle)
static void pci_acpi_setup(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
- if (acpi_bus_get_device(handle, &adev) || !adev->wakeup.flags.valid)
+ if (!adev)
+ return;
+
+ pci_acpi_add_pm_notifier(adev, pci_dev);
+ if (!adev->wakeup.flags.valid)
return;
device_set_wakeup_capable(dev, true);
acpi_pci_sleep_wake(pci_dev, false);
-
- pci_acpi_add_pm_notifier(adev, pci_dev);
if (adev->wakeup.flags.run_wake)
device_set_run_wake(dev, true);
}
static void pci_acpi_cleanup(struct device *dev)
{
- acpi_handle handle = ACPI_HANDLE(dev);
- struct acpi_device *adev;
+ struct acpi_device *adev = ACPI_COMPANION(dev);
+
+ if (!adev)
+ return;
- if (!acpi_bus_get_device(handle, &adev) && adev->wakeup.flags.valid) {
+ pci_acpi_remove_pm_notifier(adev);
+ if (adev->wakeup.flags.valid) {
device_set_wakeup_capable(dev, false);
device_set_run_wake(dev, false);
- pci_acpi_remove_pm_notifier(adev);
}
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 9042fdb..25f0bc6 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -19,6 +19,7 @@
#include <linux/cpu.h>
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
+#include <linux/kexec.h>
#include "pci.h"
struct pci_dynid {
@@ -288,12 +289,27 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
int error, node;
struct drv_dev_and_id ddi = { drv, dev, id };
- /* Execute driver initialization on node where the device's
- bus is attached to. This way the driver likely allocates
- its local memory on the right node without any need to
- change it. */
+ /*
+ * Execute driver initialization on node where the device is
+ * attached. This way the driver likely allocates its local memory
+ * on the right node.
+ */
node = dev_to_node(&dev->dev);
- if (node >= 0) {
+
+ /*
+ * On NUMA systems, we are likely to call a PF probe function using
+ * work_on_cpu(). If that probe calls pci_enable_sriov() (which
+ * adds the VF devices via pci_bus_add_device()), we may re-enter
+ * this function to call the VF probe function. Calling
+ * work_on_cpu() again will cause a lockdep warning. Since VFs are
+ * always on the same node as the PF, we can work around this by
+ * avoiding work_on_cpu() when we're already on the correct node.
+ *
+ * Preemption is enabled, so it's theoretically unsafe to use
+ * numa_node_id(), but even if we run the probe function on the
+ * wrong node, it should be functionally correct.
+ */
+ if (node >= 0 && node != numa_node_id()) {
int cpu;
get_online_cpus();
@@ -305,6 +321,7 @@ static int pci_call_probe(struct pci_driver *drv, struct pci_dev *dev,
put_online_cpus();
} else
error = local_pci_probe(&ddi);
+
return error;
}
@@ -399,12 +416,17 @@ static void pci_device_shutdown(struct device *dev)
pci_msi_shutdown(pci_dev);
pci_msix_shutdown(pci_dev);
+#ifdef CONFIG_KEXEC
/*
- * Turn off Bus Master bit on the device to tell it to not
- * continue to do DMA. Don't touch devices in D3cold or unknown states.
+ * If this is a kexec reboot, turn off Bus Master bit on the
+ * device to tell it to not continue to do DMA. Don't touch
+ * devices in D3cold or unknown states.
+ * If it is not a kexec reboot, firmware will hit the PCI
+ * devices with big hammer and stop their DMA any way.
*/
- if (pci_dev->current_state <= PCI_D3hot)
+ if (kexec_in_progress && (pci_dev->current_state <= PCI_D3hot))
pci_clear_master(pci_dev);
+#endif
}
#ifdef CONFIG_PM
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 33120d1..07369f3 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -4165,6 +4165,14 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
return 0;
}
+bool pci_device_is_present(struct pci_dev *pdev)
+{
+ u32 v;
+
+ return pci_bus_read_dev_vendor_id(pdev->bus, pdev->devfn, &v, 0);
+}
+EXPORT_SYMBOL_GPL(pci_device_is_present);
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
static DEFINE_SPINLOCK(resource_alignment_lock);
diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c
index 1576851..cc9337a 100644
--- a/drivers/pci/remove.c
+++ b/drivers/pci/remove.c
@@ -24,7 +24,7 @@ static void pci_stop_dev(struct pci_dev *dev)
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
- device_del(&dev->dev);
+ device_release_driver(&dev->dev);
dev->is_added = 0;
}
@@ -34,6 +34,8 @@ static void pci_stop_dev(struct pci_dev *dev)
static void pci_destroy_dev(struct pci_dev *dev)
{
+ device_del(&dev->dev);
+
down_write(&pci_bus_sem);
list_del(&dev->bus_list);
up_write(&pci_bus_sem);
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index a344f3d..330ef2d 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -24,8 +24,8 @@ config PHY_EXYNOS_MIPI_VIDEO
config OMAP_USB2
tristate "OMAP USB2 PHY Driver"
depends on ARCH_OMAP2PLUS
+ depends on USB_PHY
select GENERIC_PHY
- select USB_PHY
select OMAP_CONTROL_USB
help
Enable this to support the transceiver that is part of SOC. This
@@ -36,8 +36,8 @@ config OMAP_USB2
config TWL4030_USB
tristate "TWL4030 USB Transceiver Driver"
depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
+ depends on USB_PHY
select GENERIC_PHY
- select USB_PHY
help
Enable this to support the USB OTG transceiver on TWL4030
family chips (including the TWL5030 and TPS659x0 devices).
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 03cf8fb..58e0e97 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -437,23 +437,18 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
int id;
struct phy *phy;
- if (!dev) {
- dev_WARN(dev, "no device provided for PHY\n");
- ret = -EINVAL;
- goto err0;
- }
+ if (WARN_ON(!dev))
+ return ERR_PTR(-EINVAL);
phy = kzalloc(sizeof(*phy), GFP_KERNEL);
- if (!phy) {
- ret = -ENOMEM;
- goto err0;
- }
+ if (!phy)
+ return ERR_PTR(-ENOMEM);
id = ida_simple_get(&phy_ida, 0, 0, GFP_KERNEL);
if (id < 0) {
dev_err(dev, "unable to get id\n");
ret = id;
- goto err0;
+ goto free_phy;
}
device_initialize(&phy->dev);
@@ -468,11 +463,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
ret = dev_set_name(&phy->dev, "phy-%s.%d", dev_name(dev), id);
if (ret)
- goto err1;
+ goto put_dev;
ret = device_add(&phy->dev);
if (ret)
- goto err1;
+ goto put_dev;
if (pm_runtime_enabled(dev)) {
pm_runtime_enable(&phy->dev);
@@ -481,12 +476,11 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
return phy;
-err1:
- ida_remove(&phy_ida, phy->id);
+put_dev:
put_device(&phy->dev);
+ ida_remove(&phy_ida, phy->id);
+free_phy:
kfree(phy);
-
-err0:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(phy_create);
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index 2832576..114f5ef 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -512,6 +512,7 @@ static const struct dev_pm_ops byt_gpio_pm_ops = {
static const struct acpi_device_id byt_gpio_acpi_match[] = {
{ "INT33B2", 0 },
+ { "INT33FC", 0 },
{ }
};
MODULE_DEVICE_TABLE(acpi, byt_gpio_acpi_match);
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index 11bd0d9..e214295 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -254,7 +254,7 @@ struct sh_pfc_soc_info {
#define PINMUX_GPIO(_pin) \
[GPIO_##_pin] = { \
.pin = (u16)-1, \
- .name = __stringify(name), \
+ .name = __stringify(GPIO_##_pin), \
.enum_id = _pin##_DATA, \
}
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 5e2054a..85ad58c 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -196,6 +196,7 @@ config BATTERY_MAX17040
config BATTERY_MAX17042
tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
depends on I2C
+ select REGMAP_I2C
help
MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
in handheld and portable equipment. The MAX17042 is configured
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 00e6672..557af94 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -511,6 +511,10 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
dev_set_drvdata(dev, psy);
psy->dev = dev;
+ rc = dev_set_name(dev, "%s", psy->name);
+ if (rc)
+ goto dev_set_name_failed;
+
INIT_WORK(&psy->changed_work, power_supply_changed_work);
rc = power_supply_check_supplies(psy);
@@ -524,10 +528,6 @@ int power_supply_register(struct device *parent, struct power_supply *psy)
if (rc)
goto wakeup_init_failed;
- rc = kobject_set_name(&dev->kobj, "%s", psy->name);
- if (rc)
- goto kobject_set_name_failed;
-
rc = device_add(dev);
if (rc)
goto device_add_failed;
@@ -553,11 +553,11 @@ create_triggers_failed:
register_cooler_failed:
psy_unregister_thermal(psy);
register_thermal_failed:
-wakeup_init_failed:
device_del(dev);
-kobject_set_name_failed:
device_add_failed:
+wakeup_init_failed:
check_supplies_failed:
+dev_set_name_failed:
put_device(dev);
success:
return rc;
diff --git a/drivers/powercap/intel_rapl.c b/drivers/powercap/intel_rapl.c
index 2a786c5..3c67683 100644
--- a/drivers/powercap/intel_rapl.c
+++ b/drivers/powercap/intel_rapl.c
@@ -833,6 +833,11 @@ static int rapl_write_data_raw(struct rapl_domain *rd,
return 0;
}
+static const struct x86_cpu_id energy_unit_quirk_ids[] = {
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
+ {}
+};
+
static int rapl_check_unit(struct rapl_package *rp, int cpu)
{
u64 msr_val;
@@ -853,8 +858,11 @@ static int rapl_check_unit(struct rapl_package *rp, int cpu)
* time unit: 1/time_unit_divisor Seconds
*/
value = (msr_val & ENERGY_UNIT_MASK) >> ENERGY_UNIT_OFFSET;
- rp->energy_unit_divisor = 1 << value;
-
+ /* some CPUs have different way to calculate energy unit */
+ if (x86_match_cpu(energy_unit_quirk_ids))
+ rp->energy_unit_divisor = 1000000 / (1 << value);
+ else
+ rp->energy_unit_divisor = 1 << value;
value = (msr_val & POWER_UNIT_MASK) >> POWER_UNIT_OFFSET;
rp->power_unit_divisor = 1 << value;
@@ -941,6 +949,7 @@ static void package_power_limit_irq_restore(int package_id)
static const struct x86_cpu_id rapl_ids[] = {
{ X86_VENDOR_INTEL, 6, 0x2a},/* SNB */
{ X86_VENDOR_INTEL, 6, 0x2d},/* SNB EP */
+ { X86_VENDOR_INTEL, 6, 0x37},/* VLV */
{ X86_VENDOR_INTEL, 6, 0x3a},/* IVB */
{ X86_VENDOR_INTEL, 6, 0x45},/* HSW */
/* TODO: Add more CPU IDs after testing */
diff --git a/drivers/regulator/as3722-regulator.c b/drivers/regulator/as3722-regulator.c
index 5917fe3..b9f1d24 100644
--- a/drivers/regulator/as3722-regulator.c
+++ b/drivers/regulator/as3722-regulator.c
@@ -590,8 +590,8 @@ static int as3722_sd016_set_current_limit(struct regulator_dev *rdev,
default:
return -EINVAL;
}
+ ret <<= ffs(mask) - 1;
val = ret & mask;
- val <<= ffs(mask) - 1;
return as3722_update_bits(as3722, reg, mask, val);
}
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 3fe1313..d85f313 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -119,6 +119,11 @@ static const char *rdev_get_name(struct regulator_dev *rdev)
return "";
}
+static bool have_full_constraints(void)
+{
+ return has_full_constraints || of_have_populated_dt();
+}
+
/**
* of_get_regulator - get a regulator device node based on supply name
* @dev: Device pointer for the consumer (of regulator) device
@@ -1340,7 +1345,7 @@ static struct regulator *_regulator_get(struct device *dev, const char *id,
* Assume that a regulator is physically present and enabled
* even if it isn't hooked up and just provide a dummy.
*/
- if (has_full_constraints && allow_dummy) {
+ if (have_full_constraints() && allow_dummy) {
pr_warn("%s supply %s not found, using dummy regulator\n",
devname, id);
@@ -3627,7 +3632,7 @@ int regulator_suspend_finish(void)
if (error)
ret = error;
} else {
- if (!has_full_constraints)
+ if (!have_full_constraints())
goto unlock;
if (!ops->disable)
goto unlock;
@@ -3825,7 +3830,7 @@ static int __init regulator_init_complete(void)
if (!enabled)
goto unlock;
- if (has_full_constraints) {
+ if (have_full_constraints()) {
/* We log since this may kill the system if it
* goes wrong. */
rdev_info(rdev, "disabling\n");
diff --git a/drivers/regulator/pfuze100-regulator.c b/drivers/regulator/pfuze100-regulator.c
index 032df37..8b5e4c7 100644
--- a/drivers/regulator/pfuze100-regulator.c
+++ b/drivers/regulator/pfuze100-regulator.c
@@ -38,7 +38,7 @@
#define PFUZE100_DEVICEID 0x0
#define PFUZE100_REVID 0x3
-#define PFUZE100_FABID 0x3
+#define PFUZE100_FABID 0x4
#define PFUZE100_SW1ABVOL 0x20
#define PFUZE100_SW1CVOL 0x2e
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 333677d..9e61922 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -438,7 +438,7 @@ common_reg:
platform_set_drvdata(pdev, s2mps11);
config.dev = &pdev->dev;
- config.regmap = iodev->regmap;
+ config.regmap = iodev->regmap_pmic;
config.driver_data = s2mps11;
for (i = 0; i < S2MPS11_REGULATOR_MAX; i++) {
if (!reg_np) {
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index cbf91e2..aeb40aa 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -925,7 +925,7 @@ static int s5m8767_pmic_probe(struct platform_device *pdev)
config.dev = s5m8767->dev;
config.init_data = pdata->regulators[i].initdata;
config.driver_data = s5m8767;
- config.regmap = iodev->regmap;
+ config.regmap = iodev->regmap_pmic;
config.of_node = pdata->regulators[i].reg_node;
rdev[i] = devm_regulator_register(&pdev->dev, &regulators[id],
diff --git a/drivers/rtc/rtc-at91rm9200.c b/drivers/rtc/rtc-at91rm9200.c
index c0da95e..3281c90 100644
--- a/drivers/rtc/rtc-at91rm9200.c
+++ b/drivers/rtc/rtc-at91rm9200.c
@@ -220,6 +220,8 @@ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
at91_alarm_year = tm.tm_year;
+ tm.tm_mon = alrm->time.tm_mon;
+ tm.tm_mday = alrm->time.tm_mday;
tm.tm_hour = alrm->time.tm_hour;
tm.tm_min = alrm->time.tm_min;
tm.tm_sec = alrm->time.tm_sec;
diff --git a/drivers/rtc/rtc-s5m.c b/drivers/rtc/rtc-s5m.c
index b7fd02b..ae8119d 100644
--- a/drivers/rtc/rtc-s5m.c
+++ b/drivers/rtc/rtc-s5m.c
@@ -28,10 +28,20 @@
#include <linux/mfd/samsung/irq.h>
#include <linux/mfd/samsung/rtc.h>
+/*
+ * Maximum number of retries for checking changes in UDR field
+ * of SEC_RTC_UDR_CON register (to limit possible endless loop).
+ *
+ * After writing to RTC registers (setting time or alarm) read the UDR field
+ * in SEC_RTC_UDR_CON register. UDR is auto-cleared when data have
+ * been transferred.
+ */
+#define UDR_READ_RETRY_CNT 5
+
struct s5m_rtc_info {
struct device *dev;
struct sec_pmic_dev *s5m87xx;
- struct regmap *rtc;
+ struct regmap *regmap;
struct rtc_device *rtc_dev;
int irq;
int device_type;
@@ -84,12 +94,31 @@ static int s5m8767_tm_to_data(struct rtc_time *tm, u8 *data)
}
}
+/*
+ * Read RTC_UDR_CON register and wait till UDR field is cleared.
+ * This indicates that time/alarm update ended.
+ */
+static inline int s5m8767_wait_for_udr_update(struct s5m_rtc_info *info)
+{
+ int ret, retry = UDR_READ_RETRY_CNT;
+ unsigned int data;
+
+ do {
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
+ } while (--retry && (data & RTC_UDR_MASK) && !ret);
+
+ if (!retry)
+ dev_err(info->dev, "waiting for UDR update, reached max number of retries\n");
+
+ return ret;
+}
+
static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
{
int ret;
unsigned int data;
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
if (ret < 0) {
dev_err(info->dev, "failed to read update reg(%d)\n", ret);
return ret;
@@ -98,15 +127,13 @@ static inline int s5m8767_rtc_set_time_reg(struct s5m_rtc_info *info)
data |= RTC_TIME_EN_MASK;
data |= RTC_UDR_MASK;
- ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+ ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
if (ret < 0) {
dev_err(info->dev, "failed to write update reg(%d)\n", ret);
return ret;
}
- do {
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
- } while ((data & RTC_UDR_MASK) && !ret);
+ ret = s5m8767_wait_for_udr_update(info);
return ret;
}
@@ -116,7 +143,7 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
int ret;
unsigned int data;
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to read update reg(%d)\n",
__func__, ret);
@@ -126,16 +153,14 @@ static inline int s5m8767_rtc_set_alarm_reg(struct s5m_rtc_info *info)
data &= ~RTC_TIME_EN_MASK;
data |= RTC_UDR_MASK;
- ret = regmap_write(info->rtc, SEC_RTC_UDR_CON, data);
+ ret = regmap_write(info->regmap, SEC_RTC_UDR_CON, data);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write update reg(%d)\n",
__func__, ret);
return ret;
}
- do {
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &data);
- } while ((data & RTC_UDR_MASK) && !ret);
+ ret = s5m8767_wait_for_udr_update(info);
return ret;
}
@@ -178,7 +203,7 @@ static int s5m_rtc_read_time(struct device *dev, struct rtc_time *tm)
u8 data[8];
int ret;
- ret = regmap_bulk_read(info->rtc, SEC_RTC_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_RTC_SEC, data, 8);
if (ret < 0)
return ret;
@@ -226,7 +251,7 @@ static int s5m_rtc_set_time(struct device *dev, struct rtc_time *tm)
1900 + tm->tm_year, 1 + tm->tm_mon, tm->tm_mday,
tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday);
- ret = regmap_raw_write(info->rtc, SEC_RTC_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_RTC_SEC, data, 8);
if (ret < 0)
return ret;
@@ -242,20 +267,20 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
unsigned int val;
int ret, i;
- ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
switch (info->device_type) {
case S5M8763X:
s5m8763_data_to_tm(data, &alrm->time);
- ret = regmap_read(info->rtc, SEC_ALARM0_CONF, &val);
+ ret = regmap_read(info->regmap, SEC_ALARM0_CONF, &val);
if (ret < 0)
return ret;
alrm->enabled = !!val;
- ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+ ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
if (ret < 0)
return ret;
@@ -278,7 +303,7 @@ static int s5m_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm)
}
alrm->pending = 0;
- ret = regmap_read(info->rtc, SEC_RTC_STATUS, &val);
+ ret = regmap_read(info->regmap, SEC_RTC_STATUS, &val);
if (ret < 0)
return ret;
break;
@@ -301,7 +326,7 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
int ret, i;
struct rtc_time tm;
- ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -312,14 +337,14 @@ static int s5m_rtc_stop_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8763X:
- ret = regmap_write(info->rtc, SEC_ALARM0_CONF, 0);
+ ret = regmap_write(info->regmap, SEC_ALARM0_CONF, 0);
break;
case S5M8767X:
for (i = 0; i < 7; i++)
data[i] &= ~ALARM_ENABLE_MASK;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -341,7 +366,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
u8 alarm0_conf;
struct rtc_time tm;
- ret = regmap_bulk_read(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_bulk_read(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -353,7 +378,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
switch (info->device_type) {
case S5M8763X:
alarm0_conf = 0x77;
- ret = regmap_write(info->rtc, SEC_ALARM0_CONF, alarm0_conf);
+ ret = regmap_write(info->regmap, SEC_ALARM0_CONF, alarm0_conf);
break;
case S5M8767X:
@@ -368,7 +393,7 @@ static int s5m_rtc_start_alarm(struct s5m_rtc_info *info)
if (data[RTC_YEAR1] & 0x7f)
data[RTC_YEAR1] |= ALARM_ENABLE_MASK;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
ret = s5m8767_rtc_set_alarm_reg(info);
@@ -410,7 +435,7 @@ static int s5m_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm)
if (ret < 0)
return ret;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_SEC, data, 8);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_SEC, data, 8);
if (ret < 0)
return ret;
@@ -455,7 +480,7 @@ static const struct rtc_class_ops s5m_rtc_ops = {
static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
{
int ret;
- ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+ ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
WTSR_ENABLE_MASK,
enable ? WTSR_ENABLE_MASK : 0);
if (ret < 0)
@@ -466,7 +491,7 @@ static void s5m_rtc_enable_wtsr(struct s5m_rtc_info *info, bool enable)
static void s5m_rtc_enable_smpl(struct s5m_rtc_info *info, bool enable)
{
int ret;
- ret = regmap_update_bits(info->rtc, SEC_WTSR_SMPL_CNTL,
+ ret = regmap_update_bits(info->regmap, SEC_WTSR_SMPL_CNTL,
SMPL_ENABLE_MASK,
enable ? SMPL_ENABLE_MASK : 0);
if (ret < 0)
@@ -481,7 +506,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
int ret;
struct rtc_time tm;
- ret = regmap_read(info->rtc, SEC_RTC_UDR_CON, &tp_read);
+ ret = regmap_read(info->regmap, SEC_RTC_UDR_CON, &tp_read);
if (ret < 0) {
dev_err(info->dev, "%s: fail to read control reg(%d)\n",
__func__, ret);
@@ -493,7 +518,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
data[1] = (0 << BCD_EN_SHIFT) | (1 << MODEL24_SHIFT);
info->rtc_24hr_mode = 1;
- ret = regmap_raw_write(info->rtc, SEC_ALARM0_CONF, data, 2);
+ ret = regmap_raw_write(info->regmap, SEC_ALARM0_CONF, data, 2);
if (ret < 0) {
dev_err(info->dev, "%s: fail to write controlm reg(%d)\n",
__func__, ret);
@@ -515,7 +540,7 @@ static int s5m8767_rtc_init_reg(struct s5m_rtc_info *info)
ret = s5m_rtc_set_time(info->dev, &tm);
}
- ret = regmap_update_bits(info->rtc, SEC_RTC_UDR_CON,
+ ret = regmap_update_bits(info->regmap, SEC_RTC_UDR_CON,
RTC_TCON_MASK, tp_read | RTC_TCON_MASK);
if (ret < 0)
dev_err(info->dev, "%s: fail to update TCON reg(%d)\n",
@@ -542,17 +567,19 @@ static int s5m_rtc_probe(struct platform_device *pdev)
info->dev = &pdev->dev;
info->s5m87xx = s5m87xx;
- info->rtc = s5m87xx->rtc;
+ info->regmap = s5m87xx->regmap_rtc;
info->device_type = s5m87xx->device_type;
info->wtsr_smpl = s5m87xx->wtsr_smpl;
switch (pdata->device_type) {
case S5M8763X:
- info->irq = s5m87xx->irq_base + S5M8763_IRQ_ALARM0;
+ info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
+ S5M8763_IRQ_ALARM0);
break;
case S5M8767X:
- info->irq = s5m87xx->irq_base + S5M8767_IRQ_RTCA1;
+ info->irq = regmap_irq_get_virq(s5m87xx->irq_data,
+ S5M8767_IRQ_RTCA1);
break;
default:
@@ -596,7 +623,7 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
if (info->wtsr_smpl) {
for (i = 0; i < 3; i++) {
s5m_rtc_enable_wtsr(info, false);
- regmap_read(info->rtc, SEC_WTSR_SMPL_CNTL, &val);
+ regmap_read(info->regmap, SEC_WTSR_SMPL_CNTL, &val);
pr_debug("%s: WTSR_SMPL reg(0x%02x)\n", __func__, val);
if (val & WTSR_ENABLE_MASK)
pr_emerg("%s: fail to disable WTSR\n",
@@ -612,6 +639,30 @@ static void s5m_rtc_shutdown(struct platform_device *pdev)
s5m_rtc_enable_smpl(info, false);
}
+static int s5m_rtc_resume(struct device *dev)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ ret = disable_irq_wake(info->irq);
+
+ return ret;
+}
+
+static int s5m_rtc_suspend(struct device *dev)
+{
+ struct s5m_rtc_info *info = dev_get_drvdata(dev);
+ int ret = 0;
+
+ if (device_may_wakeup(dev))
+ ret = enable_irq_wake(info->irq);
+
+ return ret;
+}
+
+static SIMPLE_DEV_PM_OPS(s5m_rtc_pm_ops, s5m_rtc_suspend, s5m_rtc_resume);
+
static const struct platform_device_id s5m_rtc_id[] = {
{ "s5m-rtc", 0 },
};
@@ -620,6 +671,7 @@ static struct platform_driver s5m_rtc_driver = {
.driver = {
.name = "s5m-rtc",
.owner = THIS_MODULE,
+ .pm = &s5m_rtc_pm_ops,
},
.probe = s5m_rtc_probe,
.shutdown = s5m_rtc_shutdown,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f649217..f224d59 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -87,7 +87,6 @@ void dasd_gendisk_free(struct dasd_block *block)
{
if (block->gdp) {
del_gendisk(block->gdp);
- block->gdp->queue = NULL;
block->gdp->private_data = NULL;
put_disk(block->gdp);
block->gdp = NULL;
diff --git a/drivers/s390/char/sclp_early.c b/drivers/s390/char/sclp_early.c
index f7aa080..1465e95 100644
--- a/drivers/s390/char/sclp_early.c
+++ b/drivers/s390/char/sclp_early.c
@@ -35,7 +35,6 @@ struct read_info_sccb {
u8 _reserved5[4096 - 112]; /* 112-4095 */
} __packed __aligned(PAGE_SIZE);
-static __initdata struct init_sccb early_event_mask_sccb __aligned(PAGE_SIZE);
static __initdata struct read_info_sccb early_read_info_sccb;
static __initdata char sccb_early[PAGE_SIZE] __aligned(PAGE_SIZE);
static unsigned long sclp_hsa_size;
@@ -113,7 +112,7 @@ static void __init sclp_facilities_detect(void)
bool __init sclp_has_linemode(void)
{
- struct init_sccb *sccb = &early_event_mask_sccb;
+ struct init_sccb *sccb = (void *) &sccb_early;
if (sccb->header.response_code != 0x20)
return 0;
@@ -126,7 +125,7 @@ bool __init sclp_has_linemode(void)
bool __init sclp_has_vt220(void)
{
- struct init_sccb *sccb = &early_event_mask_sccb;
+ struct init_sccb *sccb = (void *) &sccb_early;
if (sccb->header.response_code != 0x20)
return 0;
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c
index 3f4ca4e..34629ea 100644
--- a/drivers/s390/char/tty3270.c
+++ b/drivers/s390/char/tty3270.c
@@ -942,7 +942,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty)
return rc;
}
- tp->screen = tty3270_alloc_screen(tp->view.cols, tp->view.rows);
+ tp->screen = tty3270_alloc_screen(tp->view.rows, tp->view.cols);
if (IS_ERR(tp->screen)) {
rc = PTR_ERR(tp->screen);
raw3270_put_view(&tp->view);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 5964800..38a1257 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -471,7 +471,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
schedule_delayed_work(&tgt->sess_del_work, 0);
else
schedule_delayed_work(&tgt->sess_del_work,
- jiffies - sess->expires);
+ sess->expires - jiffies);
}
/* ha->hardware_lock supposed to be held on entry */
@@ -550,13 +550,14 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
struct scsi_qla_host *vha = tgt->vha;
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess;
- unsigned long flags;
+ unsigned long flags, elapsed;
spin_lock_irqsave(&ha->hardware_lock, flags);
while (!list_empty(&tgt->del_sess_list)) {
sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
del_list_entry);
- if (time_after_eq(jiffies, sess->expires)) {
+ elapsed = jiffies;
+ if (time_after_eq(elapsed, sess->expires)) {
qlt_undelete_sess(sess);
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
@@ -566,7 +567,7 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
ha->tgt.tgt_ops->put_sess(sess);
} else {
schedule_delayed_work(&tgt->sess_del_work,
- jiffies - sess->expires);
+ sess->expires - elapsed);
break;
}
}
@@ -4290,6 +4291,7 @@ int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
if (rc != 0) {
ha->tgt.tgt_ops = NULL;
ha->tgt.target_lport_ptr = NULL;
+ scsi_host_put(host);
}
mutex_unlock(&qla_tgt_mutex);
return rc;
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 8f02bf6..4964d2a 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -446,7 +446,7 @@ int comedi_load_firmware(struct comedi_device *dev,
release_firmware(fw);
}
- return ret;
+ return ret < 0 ? ret : 0;
}
EXPORT_SYMBOL_GPL(comedi_load_firmware);
diff --git a/drivers/staging/comedi/drivers/8255_pci.c b/drivers/staging/comedi/drivers/8255_pci.c
index 432e3f9..c55f234 100644
--- a/drivers/staging/comedi/drivers/8255_pci.c
+++ b/drivers/staging/comedi/drivers/8255_pci.c
@@ -63,7 +63,8 @@ enum pci_8255_boardid {
BOARD_ADLINK_PCI7296,
BOARD_CB_PCIDIO24,
BOARD_CB_PCIDIO24H,
- BOARD_CB_PCIDIO48H,
+ BOARD_CB_PCIDIO48H_OLD,
+ BOARD_CB_PCIDIO48H_NEW,
BOARD_CB_PCIDIO96H,
BOARD_NI_PCIDIO96,
BOARD_NI_PCIDIO96B,
@@ -106,11 +107,16 @@ static const struct pci_8255_boardinfo pci_8255_boards[] = {
.dio_badr = 2,
.n_8255 = 1,
},
- [BOARD_CB_PCIDIO48H] = {
+ [BOARD_CB_PCIDIO48H_OLD] = {
.name = "cb_pci-dio48h",
.dio_badr = 1,
.n_8255 = 2,
},
+ [BOARD_CB_PCIDIO48H_NEW] = {
+ .name = "cb_pci-dio48h",
+ .dio_badr = 2,
+ .n_8255 = 2,
+ },
[BOARD_CB_PCIDIO96H] = {
.name = "cb_pci-dio96h",
.dio_badr = 2,
@@ -263,7 +269,10 @@ static DEFINE_PCI_DEVICE_TABLE(pci_8255_pci_table) = {
{ PCI_VDEVICE(ADLINK, 0x7296), BOARD_ADLINK_PCI7296 },
{ PCI_VDEVICE(CB, 0x0028), BOARD_CB_PCIDIO24 },
{ PCI_VDEVICE(CB, 0x0014), BOARD_CB_PCIDIO24H },
- { PCI_VDEVICE(CB, 0x000b), BOARD_CB_PCIDIO48H },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, 0x0000, 0x0000),
+ .driver_data = BOARD_CB_PCIDIO48H_OLD },
+ { PCI_DEVICE_SUB(PCI_VENDOR_ID_CB, 0x000b, PCI_VENDOR_ID_CB, 0x000b),
+ .driver_data = BOARD_CB_PCIDIO48H_NEW },
{ PCI_VDEVICE(CB, 0x0017), BOARD_CB_PCIDIO96H },
{ PCI_VDEVICE(NI, 0x0160), BOARD_NI_PCIDIO96 },
{ PCI_VDEVICE(NI, 0x1630), BOARD_NI_PCIDIO96B },
diff --git a/drivers/staging/iio/magnetometer/hmc5843.c b/drivers/staging/iio/magnetometer/hmc5843.c
index 99421f9..0485d7f 100644
--- a/drivers/staging/iio/magnetometer/hmc5843.c
+++ b/drivers/staging/iio/magnetometer/hmc5843.c
@@ -451,7 +451,12 @@ done:
.info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
BIT(IIO_CHAN_INFO_SAMP_FREQ), \
.scan_index = idx, \
- .scan_type = IIO_ST('s', 16, 16, IIO_BE), \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_BE, \
+ }, \
}
static const struct iio_chan_spec hmc5843_channels[] = {
diff --git a/drivers/staging/imx-drm/imx-drm-core.c b/drivers/staging/imx-drm/imx-drm-core.c
index 6bd015a..96e4eee 100644
--- a/drivers/staging/imx-drm/imx-drm-core.c
+++ b/drivers/staging/imx-drm/imx-drm-core.c
@@ -88,8 +88,9 @@ static int imx_drm_driver_unload(struct drm_device *drm)
imx_drm_device_put();
- drm_mode_config_cleanup(imxdrm->drm);
+ drm_vblank_cleanup(imxdrm->drm);
drm_kms_helper_poll_fini(imxdrm->drm);
+ drm_mode_config_cleanup(imxdrm->drm);
return 0;
}
@@ -199,8 +200,8 @@ static void imx_drm_driver_preclose(struct drm_device *drm,
if (!file->is_master)
return;
- for (i = 0; i < 4; i++)
- imx_drm_disable_vblank(drm , i);
+ for (i = 0; i < MAX_CRTC; i++)
+ imx_drm_disable_vblank(drm, i);
}
static const struct file_operations imx_drm_driver_fops = {
@@ -376,8 +377,6 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
struct imx_drm_device *imxdrm = __imx_drm_device();
int ret;
- drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
- imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
ret = drm_mode_crtc_set_gamma_size(imx_drm_crtc->crtc, 256);
if (ret)
return ret;
@@ -385,6 +384,9 @@ static int imx_drm_crtc_register(struct imx_drm_crtc *imx_drm_crtc)
drm_crtc_helper_add(imx_drm_crtc->crtc,
imx_drm_crtc->imx_drm_helper_funcs.crtc_helper_funcs);
+ drm_crtc_init(imxdrm->drm, imx_drm_crtc->crtc,
+ imx_drm_crtc->imx_drm_helper_funcs.crtc_funcs);
+
drm_mode_group_reinit(imxdrm->drm);
return 0;
@@ -428,11 +430,11 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
ret = drm_mode_group_init_legacy_group(imxdrm->drm,
&imxdrm->drm->primary->mode_group);
if (ret)
- goto err_init;
+ goto err_kms;
ret = drm_vblank_init(imxdrm->drm, MAX_CRTC);
if (ret)
- goto err_init;
+ goto err_kms;
/*
* with vblank_disable_allowed = true, vblank interrupt will be disabled
@@ -441,12 +443,19 @@ static int imx_drm_driver_load(struct drm_device *drm, unsigned long flags)
*/
imxdrm->drm->vblank_disable_allowed = true;
- if (!imx_drm_device_get())
+ if (!imx_drm_device_get()) {
ret = -EINVAL;
+ goto err_vblank;
+ }
- ret = 0;
+ mutex_unlock(&imxdrm->mutex);
+ return 0;
-err_init:
+err_vblank:
+ drm_vblank_cleanup(drm);
+err_kms:
+ drm_kms_helper_poll_fini(drm);
+ drm_mode_config_cleanup(drm);
mutex_unlock(&imxdrm->mutex);
return ret;
@@ -492,6 +501,15 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
mutex_lock(&imxdrm->mutex);
+ /*
+ * The vblank arrays are dimensioned by MAX_CRTC - we can't
+ * pass IDs greater than this to those functions.
+ */
+ if (imxdrm->pipes >= MAX_CRTC) {
+ ret = -EINVAL;
+ goto err_busy;
+ }
+
if (imxdrm->drm->open_count) {
ret = -EBUSY;
goto err_busy;
@@ -528,6 +546,7 @@ int imx_drm_add_crtc(struct drm_crtc *crtc,
return 0;
err_register:
+ list_del(&imx_drm_crtc->list);
kfree(imx_drm_crtc);
err_alloc:
err_busy:
diff --git a/drivers/staging/imx-drm/imx-tve.c b/drivers/staging/imx-drm/imx-tve.c
index 680f4c8..2c44fef 100644
--- a/drivers/staging/imx-drm/imx-tve.c
+++ b/drivers/staging/imx-drm/imx-tve.c
@@ -114,7 +114,6 @@ struct imx_tve {
struct drm_encoder encoder;
struct imx_drm_encoder *imx_drm_encoder;
struct device *dev;
- spinlock_t enable_lock; /* serializes tve_enable/disable */
spinlock_t lock; /* register lock */
bool enabled;
int mode;
@@ -146,10 +145,8 @@ __releases(&tve->lock)
static void tve_enable(struct imx_tve *tve)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&tve->enable_lock, flags);
if (!tve->enabled) {
tve->enabled = true;
clk_prepare_enable(tve->clk);
@@ -169,23 +166,18 @@ static void tve_enable(struct imx_tve *tve)
TVE_CD_SM_IEN |
TVE_CD_LM_IEN |
TVE_CD_MON_END_IEN);
-
- spin_unlock_irqrestore(&tve->enable_lock, flags);
}
static void tve_disable(struct imx_tve *tve)
{
- unsigned long flags;
int ret;
- spin_lock_irqsave(&tve->enable_lock, flags);
if (tve->enabled) {
tve->enabled = false;
ret = regmap_update_bits(tve->regmap, TVE_COM_CONF_REG,
TVE_IPU_CLK_EN | TVE_EN, 0);
clk_disable_unprepare(tve->clk);
}
- spin_unlock_irqrestore(&tve->enable_lock, flags);
}
static int tve_setup_tvout(struct imx_tve *tve)
@@ -601,7 +593,6 @@ static int imx_tve_probe(struct platform_device *pdev)
tve->dev = &pdev->dev;
spin_lock_init(&tve->lock);
- spin_lock_init(&tve->enable_lock);
ddc_node = of_parse_phandle(np, "ddc", 0);
if (ddc_node) {
diff --git a/drivers/staging/imx-drm/ipu-v3/ipu-common.c b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
index 7a22ce6..97ca692 100644
--- a/drivers/staging/imx-drm/ipu-v3/ipu-common.c
+++ b/drivers/staging/imx-drm/ipu-v3/ipu-common.c
@@ -996,35 +996,35 @@ static const struct ipu_platform_reg client_reg[] = {
},
};
+static DEFINE_MUTEX(ipu_client_id_mutex);
static int ipu_client_id;
-static int ipu_add_subdevice_pdata(struct device *dev,
- const struct ipu_platform_reg *reg)
-{
- struct platform_device *pdev;
-
- pdev = platform_device_register_data(dev, reg->name, ipu_client_id++,
- &reg->pdata, sizeof(struct ipu_platform_reg));
-
- return PTR_ERR_OR_ZERO(pdev);
-}
-
static int ipu_add_client_devices(struct ipu_soc *ipu)
{
- int ret;
- int i;
+ struct device *dev = ipu->dev;
+ unsigned i;
+ int id, ret;
+
+ mutex_lock(&ipu_client_id_mutex);
+ id = ipu_client_id;
+ ipu_client_id += ARRAY_SIZE(client_reg);
+ mutex_unlock(&ipu_client_id_mutex);
for (i = 0; i < ARRAY_SIZE(client_reg); i++) {
const struct ipu_platform_reg *reg = &client_reg[i];
- ret = ipu_add_subdevice_pdata(ipu->dev, reg);
- if (ret)
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_data(dev, reg->name,
+ id++, &reg->pdata, sizeof(reg->pdata));
+
+ if (IS_ERR(pdev))
goto err_register;
}
return 0;
err_register:
- platform_device_unregister_children(to_platform_device(ipu->dev));
+ platform_device_unregister_children(to_platform_device(dev));
return ret;
}
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index d70e911..0086719 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -465,6 +465,7 @@ int iscsit_del_np(struct iscsi_np *np)
*/
send_sig(SIGINT, np->np_thread, 1);
kthread_stop(np->np_thread);
+ np->np_thread = NULL;
}
np->np_transport->iscsit_free_np(np);
@@ -823,24 +824,22 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
(hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
/*
- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
- * that adds support for RESERVE/RELEASE. There is a bug
- * add with this new functionality that sets R/W bits when
- * neither CDB carries any READ or WRITE datapayloads.
+ * From RFC-3720 Section 10.3.1:
+ *
+ * "Either or both of R and W MAY be 1 when either the
+ * Expected Data Transfer Length and/or Bidirectional Read
+ * Expected Data Transfer Length are 0"
+ *
+ * For this case, go ahead and clear the unnecssary bits
+ * to avoid any confusion with ->data_direction.
*/
- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
- hdr->flags &= ~ISCSI_FLAG_CMD_READ;
- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- goto done;
- }
+ hdr->flags &= ~ISCSI_FLAG_CMD_READ;
+ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
+ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
" set when Expected Data Transfer Length is 0 for"
- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
- return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_BOOKMARK_INVALID, buf);
+ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
}
-done:
if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
!(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index e3318ed..1c0088f 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -474,7 +474,8 @@ static ssize_t __iscsi_##prefix##_store_##name( \
\
if (!capable(CAP_SYS_ADMIN)) \
return -EPERM; \
- \
+ if (count >= sizeof(auth->name)) \
+ return -EINVAL; \
snprintf(auth->name, sizeof(auth->name), "%s", page); \
if (!strncmp("NULL", auth->name, 4)) \
auth->naf_flags &= ~flags; \
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 4eb93b2..e29279e 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1403,11 +1403,6 @@ old_sess_out:
out:
stop = kthread_should_stop();
- if (!stop && signal_pending(current)) {
- spin_lock_bh(&np->np_thread_lock);
- stop = (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN);
- spin_unlock_bh(&np->np_thread_lock);
- }
/* Wait for another socket.. */
if (!stop)
return 1;
@@ -1415,7 +1410,6 @@ exit:
iscsi_stop_login_thread_timer(np);
spin_lock_bh(&np->np_thread_lock);
np->np_thread_state = ISCSI_NP_THREAD_EXIT;
- np->np_thread = NULL;
spin_unlock_bh(&np->np_thread_lock);
return 0;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 207b340..d06de84 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -1106,6 +1106,11 @@ int se_dev_set_block_size(struct se_device *dev, u32 block_size)
dev->dev_attrib.block_size = block_size;
pr_debug("dev[%p]: SE Device block_size changed to %u\n",
dev, block_size);
+
+ if (dev->dev_attrib.max_bytes_per_io)
+ dev->dev_attrib.hw_max_sectors =
+ dev->dev_attrib.max_bytes_per_io / block_size;
+
return 0;
}
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 0e34cda..78241a5 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -66,9 +66,8 @@ static int fd_attach_hba(struct se_hba *hba, u32 host_id)
pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
TARGET_CORE_MOD_VERSION);
- pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
- " MaxSectors: %u\n",
- hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
+ pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
+ hba->hba_id, fd_host->fd_host_id);
return 0;
}
@@ -220,7 +219,8 @@ static int fd_configure_device(struct se_device *dev)
}
dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
- dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
+ dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
+ dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 37ffc5b..d7772c1 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -7,7 +7,10 @@
#define FD_DEVICE_QUEUE_DEPTH 32
#define FD_MAX_DEVICE_QUEUE_DEPTH 128
#define FD_BLOCKSIZE 512
-#define FD_MAX_SECTORS 2048
+/*
+ * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
+ */
+#define FD_MAX_BYTES 8388608
#define RRF_EMULATE_CDB 0x01
#define RRF_GOT_LBA 0x02
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index f697f8b..2a573de 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -278,7 +278,6 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
acl->dynamic_node_acl = 1;
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -406,7 +405,6 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(
snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
acl->se_tpg = tpg;
acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
- spin_lock_init(&acl->stats_lock);
tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@ -658,15 +656,9 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
spin_lock_init(&lun->lun_sep_lock);
init_completion(&lun->lun_ref_comp);
- ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
- if (ret < 0)
- return ret;
-
ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
- if (ret < 0) {
- percpu_ref_cancel_init(&lun->lun_ref);
+ if (ret < 0)
return ret;
- }
return 0;
}
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 268b627..34aacaa 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -93,6 +93,7 @@ struct n_tty_data {
size_t canon_head;
size_t echo_head;
size_t echo_commit;
+ size_t echo_mark;
DECLARE_BITMAP(char_map, 256);
/* private to n_tty_receive_overrun (single-threaded) */
@@ -336,6 +337,7 @@ static void reset_buffer_flags(struct n_tty_data *ldata)
{
ldata->read_head = ldata->canon_head = ldata->read_tail = 0;
ldata->echo_head = ldata->echo_tail = ldata->echo_commit = 0;
+ ldata->echo_mark = 0;
ldata->line_start = 0;
ldata->erasing = 0;
@@ -787,6 +789,7 @@ static void commit_echoes(struct tty_struct *tty)
size_t head;
head = ldata->echo_head;
+ ldata->echo_mark = head;
old = ldata->echo_commit - ldata->echo_tail;
/* Process committed echoes if the accumulated # of bytes
@@ -811,10 +814,11 @@ static void process_echoes(struct tty_struct *tty)
size_t echoed;
if ((!L_ECHO(tty) && !L_ECHONL(tty)) ||
- ldata->echo_commit == ldata->echo_tail)
+ ldata->echo_mark == ldata->echo_tail)
return;
mutex_lock(&ldata->output_lock);
+ ldata->echo_commit = ldata->echo_mark;
echoed = __process_echoes(tty);
mutex_unlock(&ldata->output_lock);
@@ -822,6 +826,7 @@ static void process_echoes(struct tty_struct *tty)
tty->ops->flush_chars(tty);
}
+/* NB: echo_mark and echo_head should be equivalent here */
static void flush_echoes(struct tty_struct *tty)
{
struct n_tty_data *ldata = tty->disc_data;
diff --git a/drivers/tty/serial/8250/8250_dw.c b/drivers/tty/serial/8250/8250_dw.c
index 4658e3e..06525f1 100644
--- a/drivers/tty/serial/8250/8250_dw.c
+++ b/drivers/tty/serial/8250/8250_dw.c
@@ -96,7 +96,8 @@ static void dw8250_serial_out(struct uart_port *p, int offset, int value)
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
- if (value == p->serial_in(p, UART_LCR))
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writeb(value, p->membase + (UART_LCR << p->regshift));
@@ -132,7 +133,8 @@ static void dw8250_serial_out32(struct uart_port *p, int offset, int value)
if (offset == UART_LCR) {
int tries = 1000;
while (tries--) {
- if (value == p->serial_in(p, UART_LCR))
+ unsigned int lcr = p->serial_in(p, UART_LCR);
+ if ((value & ~UART_LCR_SPAR) == (lcr & ~UART_LCR_SPAR))
return;
dw8250_force_idle(p);
writel(value, p->membase + (UART_LCR << p->regshift));
@@ -455,6 +457,8 @@ MODULE_DEVICE_TABLE(of, dw8250_of_match);
static const struct acpi_device_id dw8250_acpi_match[] = {
{ "INT33C4", 0 },
{ "INT33C5", 0 },
+ { "INT3434", 0 },
+ { "INT3435", 0 },
{ "80860F0A", 0 },
{ },
};
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index e46e9f3..f619ad5 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -240,6 +240,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
continue;
}
+#ifdef SUPPORT_SYSRQ
/*
* uart_handle_sysrq_char() doesn't work if
* spinlocked, for some reason
@@ -253,6 +254,7 @@ static irqreturn_t xuartps_isr(int irq, void *dev_id)
}
spin_lock(&port->lock);
}
+#endif
port->icount.rx++;
diff --git a/drivers/tty/tty_ldsem.c b/drivers/tty/tty_ldsem.c
index 22fad8a..d8a55e8 100644
--- a/drivers/tty/tty_ldsem.c
+++ b/drivers/tty/tty_ldsem.c
@@ -86,11 +86,21 @@ static inline long ldsem_atomic_update(long delta, struct ld_semaphore *sem)
return atomic_long_add_return(delta, (atomic_long_t *)&sem->count);
}
+/*
+ * ldsem_cmpxchg() updates @*old with the last-known sem->count value.
+ * Returns 1 if count was successfully changed; @*old will have @new value.
+ * Returns 0 if count was not changed; @*old will have most recent sem->count
+ */
static inline int ldsem_cmpxchg(long *old, long new, struct ld_semaphore *sem)
{
- long tmp = *old;
- *old = atomic_long_cmpxchg(&sem->count, *old, new);
- return *old == tmp;
+ long tmp = atomic_long_cmpxchg(&sem->count, *old, new);
+ if (tmp == *old) {
+ *old = new;
+ return 1;
+ } else {
+ *old = tmp;
+ return 0;
+ }
}
/*
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 5d8981c..6e73f8c 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -642,6 +642,10 @@ static int ci_hdrc_probe(struct platform_device *pdev)
: CI_ROLE_GADGET;
}
+ /* only update vbus status for peripheral */
+ if (ci->role == CI_ROLE_GADGET)
+ ci_handle_vbus_change(ci);
+
ret = ci_role_start(ci, ci->role);
if (ret) {
dev_err(dev, "can't start %s role\n", ci_role(ci)->name);
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 59e6020..526cd77 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -88,7 +88,8 @@ static int host_start(struct ci_hdrc *ci)
return ret;
disable_reg:
- regulator_disable(ci->platdata->reg_vbus);
+ if (ci->platdata->reg_vbus)
+ regulator_disable(ci->platdata->reg_vbus);
put_hcd:
usb_put_hcd(hcd);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index b34c819..69d20fb 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1795,9 +1795,6 @@ static int udc_start(struct ci_hdrc *ci)
pm_runtime_no_callbacks(&ci->gadget.dev);
pm_runtime_enable(&ci->gadget.dev);
- /* Update ci->vbus_active */
- ci_handle_vbus_change(ci);
-
return retval;
destroy_eps:
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 4d38759..0b23a86 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -854,13 +854,11 @@ static int wdm_manage_power(struct usb_interface *intf, int on)
{
/* need autopm_get/put here to ensure the usbcore sees the new value */
int rv = usb_autopm_get_interface(intf);
- if (rv < 0)
- goto err;
intf->needs_remote_wakeup = on;
- usb_autopm_put_interface(intf);
-err:
- return rv;
+ if (!rv)
+ usb_autopm_put_interface(intf);
+ return 0;
}
static int wdm_probe(struct usb_interface *intf, const struct usb_device_id *id)
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 74f9cf0..a49217a 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -455,9 +455,6 @@ static int dwc3_probe(struct platform_device *pdev)
if (IS_ERR(regs))
return PTR_ERR(regs);
- usb_phy_set_suspend(dwc->usb2_phy, 0);
- usb_phy_set_suspend(dwc->usb3_phy, 0);
-
spin_lock_init(&dwc->lock);
platform_set_drvdata(pdev, dwc);
@@ -488,6 +485,9 @@ static int dwc3_probe(struct platform_device *pdev)
goto err0;
}
+ usb_phy_set_suspend(dwc->usb2_phy, 0);
+ usb_phy_set_suspend(dwc->usb3_phy, 0);
+
ret = dwc3_event_buffers_setup(dwc);
if (ret) {
dev_err(dwc->dev, "failed to setup event buffers\n");
@@ -569,6 +569,8 @@ err2:
dwc3_event_buffers_cleanup(dwc);
err1:
+ usb_phy_set_suspend(dwc->usb2_phy, 1);
+ usb_phy_set_suspend(dwc->usb3_phy, 1);
dwc3_core_exit(dwc);
err0:
diff --git a/drivers/usb/host/ohci-at91.c b/drivers/usb/host/ohci-at91.c
index 418444e..8c356af 100644
--- a/drivers/usb/host/ohci-at91.c
+++ b/drivers/usb/host/ohci-at91.c
@@ -136,23 +136,27 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
struct ohci_hcd *ohci;
int retval;
struct usb_hcd *hcd = NULL;
-
- if (pdev->num_resources != 2) {
- pr_debug("hcd probe: invalid num_resources");
- return -ENODEV;
+ struct device *dev = &pdev->dev;
+ struct resource *res;
+ int irq;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_dbg(dev, "hcd probe: missing memory resource\n");
+ return -ENXIO;
}
- if ((pdev->resource[0].flags != IORESOURCE_MEM)
- || (pdev->resource[1].flags != IORESOURCE_IRQ)) {
- pr_debug("hcd probe: invalid resource type\n");
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_dbg(dev, "hcd probe: missing irq resource\n");
+ return irq;
}
hcd = usb_create_hcd(driver, &pdev->dev, "at91");
if (!hcd)
return -ENOMEM;
- hcd->rsrc_start = pdev->resource[0].start;
- hcd->rsrc_len = resource_size(&pdev->resource[0]);
+ hcd->rsrc_start = res->start;
+ hcd->rsrc_len = resource_size(res);
if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) {
pr_debug("request_mem_region failed\n");
@@ -199,7 +203,7 @@ static int usb_hcd_at91_probe(const struct hc_driver *driver,
ohci->num_ports = board->ports;
at91_start_hc(pdev);
- retval = usb_add_hcd(hcd, pdev->resource[1].start, IRQF_SHARED);
+ retval = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (retval == 0)
return retval;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index b8dffd5..73f5208 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,7 +128,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
* any other sleep) on Haswell machines with LPT and LPT-LP
* with the new Intel BIOS
*/
- xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
+ /* Limit the quirk to only known vendors, as this triggers
+ * yet another BIOS bug on some other machines
+ * https://bugzilla.kernel.org/show_bug.cgi?id=66171
+ */
+ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
+ xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 08e2f39..2b41c63 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -19,8 +19,9 @@ config AB8500_USB
in host mode, low speed.
config FSL_USB2_OTG
- bool "Freescale USB OTG Transceiver Driver"
+ tristate "Freescale USB OTG Transceiver Driver"
depends on USB_EHCI_FSL && USB_FSL_USB2 && PM_RUNTIME
+ depends on USB
select USB_OTG
select USB_PHY
help
@@ -29,6 +30,7 @@ config FSL_USB2_OTG
config ISP1301_OMAP
tristate "Philips ISP1301 with OMAP OTG"
depends on I2C && ARCH_OMAP_OTG
+ depends on USB
select USB_PHY
help
If you say yes here you get support for the Philips ISP1301
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 82232ac..bbe4f8e 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -876,7 +876,7 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
- if (!tegra_phy->regs) {
+ if (!tegra_phy->pad_regs) {
dev_err(&pdev->dev, "Failed to remap UTMI Pad regs\n");
return -ENOMEM;
}
diff --git a/drivers/usb/phy/phy-twl6030-usb.c b/drivers/usb/phy/phy-twl6030-usb.c
index 30e8a61..bad57ce 100644
--- a/drivers/usb/phy/phy-twl6030-usb.c
+++ b/drivers/usb/phy/phy-twl6030-usb.c
@@ -127,7 +127,8 @@ static inline int twl6030_writeb(struct twl6030_usb *twl, u8 module,
static inline u8 twl6030_readb(struct twl6030_usb *twl, u8 module, u8 address)
{
- u8 data, ret = 0;
+ u8 data;
+ int ret;
ret = twl_i2c_read_u8(module, &data, address);
if (ret >= 0)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 496b7e39..cc7a241 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -251,6 +251,7 @@ static void option_instat_callback(struct urb *urb);
#define ZTE_PRODUCT_MF628 0x0015
#define ZTE_PRODUCT_MF626 0x0031
#define ZTE_PRODUCT_MC2718 0xffe8
+#define ZTE_PRODUCT_AC2726 0xfff1
#define BENQ_VENDOR_ID 0x04a5
#define BENQ_PRODUCT_H10 0x4068
@@ -1453,6 +1454,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x01) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x02, 0x05) },
{ USB_VENDOR_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0xff, 0x86, 0x10) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
diff --git a/drivers/usb/serial/zte_ev.c b/drivers/usb/serial/zte_ev.c
index fca4c75..eae2c87 100644
--- a/drivers/usb/serial/zte_ev.c
+++ b/drivers/usb/serial/zte_ev.c
@@ -281,8 +281,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x19d2, 0xfffd) },
{ USB_DEVICE(0x19d2, 0xfffc) },
{ USB_DEVICE(0x19d2, 0xfffb) },
- /* AC2726, AC8710_V3 */
- { USB_DEVICE_AND_INTERFACE_INFO(0x19d2, 0xfff1, 0xff, 0xff, 0xff) },
+ /* AC8710_V3 */
{ USB_DEVICE(0x19d2, 0xfff6) },
{ USB_DEVICE(0x19d2, 0xfff7) },
{ USB_DEVICE(0x19d2, 0xfff8) },
diff --git a/drivers/video/offb.c b/drivers/video/offb.c
index 9dbea22..7d44d66 100644
--- a/drivers/video/offb.c
+++ b/drivers/video/offb.c
@@ -91,6 +91,15 @@ extern boot_infos_t *boot_infos;
#define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN 0x6cd4
#define AVIVO_DC_LUTB_WHITE_OFFSET_RED 0x6cd8
+#define FB_RIGHT_POS(p, bpp) (fb_be_math(p) ? 0 : (32 - (bpp)))
+
+static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
+{
+ u32 bpp = info->var.bits_per_pixel;
+
+ return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
+}
+
/*
* Set a single color register. The values supplied are already
* rounded down to the hardware's capabilities (according to the
@@ -120,7 +129,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
mask <<= info->var.transp.offset;
value |= mask;
}
- pal[regno] = value;
+ pal[regno] = offb_cmap_byteswap(info, value);
return 0;
}
@@ -301,7 +310,7 @@ static struct fb_ops offb_ops = {
static void __iomem *offb_map_reg(struct device_node *np, int index,
unsigned long offset, unsigned long size)
{
- const u32 *addrp;
+ const __be32 *addrp;
u64 asize, taddr;
unsigned int flags;
@@ -369,7 +378,11 @@ static void offb_init_palette_hacks(struct fb_info *info, struct device_node *dp
}
of_node_put(pciparent);
} else if (dp && of_device_is_compatible(dp, "qemu,std-vga")) {
- const u32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+#ifdef __BIG_ENDIAN
+ const __be32 io_of_addr[3] = { 0x01000000, 0x0, 0x0 };
+#else
+ const __be32 io_of_addr[3] = { 0x00000001, 0x0, 0x0 };
+#endif
u64 io_addr = of_translate_address(dp, io_of_addr);
if (io_addr != OF_BAD_ADDR) {
par->cmap_adr = ioremap(io_addr + 0x3c8, 2);
@@ -535,7 +548,7 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
unsigned int flags, rsize, addr_prop = 0;
unsigned long max_size = 0;
u64 rstart, address = OF_BAD_ADDR;
- const u32 *pp, *addrp, *up;
+ const __be32 *pp, *addrp, *up;
u64 asize;
int foreign_endian = 0;
@@ -551,25 +564,25 @@ static void __init offb_init_nodriver(struct device_node *dp, int no_real_node)
if (pp == NULL)
pp = of_get_property(dp, "depth", &len);
if (pp && len == sizeof(u32))
- depth = *pp;
+ depth = be32_to_cpup(pp);
pp = of_get_property(dp, "linux,bootx-width", &len);
if (pp == NULL)
pp = of_get_property(dp, "width", &len);
if (pp && len == sizeof(u32))
- width = *pp;
+ width = be32_to_cpup(pp);
pp = of_get_property(dp, "linux,bootx-height", &len);
if (pp == NULL)
pp = of_get_property(dp, "height", &len);
if (pp && len == sizeof(u32))
- height = *pp;
+ height = be32_to_cpup(pp);
pp = of_get_property(dp, "linux,bootx-linebytes", &len);
if (pp == NULL)
pp = of_get_property(dp, "linebytes", &len);
if (pp && len == sizeof(u32) && (*pp != 0xffffffffu))
- pitch = *pp;
+ pitch = be32_to_cpup(pp);
else
pitch = width * ((depth + 7) / 8);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index c444654..5c4a95b 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -285,7 +285,7 @@ static void update_balloon_size(struct virtio_balloon *vb)
{
__le32 actual = cpu_to_le32(vb->num_pages);
- virtio_cwrite(vb->vdev, struct virtio_balloon_config, num_pages,
+ virtio_cwrite(vb->vdev, struct virtio_balloon_config, actual,
&actual);
}
diff --git a/drivers/watchdog/bcm2835_wdt.c b/drivers/watchdog/bcm2835_wdt.c
index a6a2ceb..cafa973 100644
--- a/drivers/watchdog/bcm2835_wdt.c
+++ b/drivers/watchdog/bcm2835_wdt.c
@@ -19,7 +19,6 @@
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/of_address.h>
-#include <linux/miscdevice.h>
#define PM_RSTC 0x1c
#define PM_WDOG 0x24
diff --git a/drivers/watchdog/ep93xx_wdt.c b/drivers/watchdog/ep93xx_wdt.c
index 833e813..d1d07f2 100644
--- a/drivers/watchdog/ep93xx_wdt.c
+++ b/drivers/watchdog/ep93xx_wdt.c
@@ -28,7 +28,6 @@
#include <linux/platform_device.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/timer.h>
#include <linux/io.h>
diff --git a/drivers/watchdog/ie6xx_wdt.c b/drivers/watchdog/ie6xx_wdt.c
index 70a2402..07f88f5 100644
--- a/drivers/watchdog/ie6xx_wdt.c
+++ b/drivers/watchdog/ie6xx_wdt.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/watchdog.h>
-#include <linux/miscdevice.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
diff --git a/drivers/watchdog/jz4740_wdt.c b/drivers/watchdog/jz4740_wdt.c
index 2de486a..3aa50cf 100644
--- a/drivers/watchdog/jz4740_wdt.c
+++ b/drivers/watchdog/jz4740_wdt.c
@@ -17,7 +17,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/kempld_wdt.c b/drivers/watchdog/kempld_wdt.c
index a1a3638..20dc738 100644
--- a/drivers/watchdog/kempld_wdt.c
+++ b/drivers/watchdog/kempld_wdt.c
@@ -26,7 +26,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/max63xx_wdt.c b/drivers/watchdog/max63xx_wdt.c
index 6d4f399..bdb3f4a 100644
--- a/drivers/watchdog/max63xx_wdt.c
+++ b/drivers/watchdog/max63xx_wdt.c
@@ -19,7 +19,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/bitops.h>
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 44edca6..f7722a4 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -16,7 +16,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/watchdog.h>
#include <linux/init.h>
diff --git a/drivers/watchdog/pnx4008_wdt.c b/drivers/watchdog/pnx4008_wdt.c
index 1bdcc31..5bec20f 100644
--- a/drivers/watchdog/pnx4008_wdt.c
+++ b/drivers/watchdog/pnx4008_wdt.c
@@ -23,7 +23,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/rt2880_wdt.c b/drivers/watchdog/rt2880_wdt.c
index 53d37fe..d92c2d5 100644
--- a/drivers/watchdog/rt2880_wdt.c
+++ b/drivers/watchdog/rt2880_wdt.c
@@ -16,7 +16,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/watchdog.h>
-#include <linux/miscdevice.h>
#include <linux/moduleparam.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/sc1200wdt.c b/drivers/watchdog/sc1200wdt.c
index 3b9fff9..131193a 100644
--- a/drivers/watchdog/sc1200wdt.c
+++ b/drivers/watchdog/sc1200wdt.c
@@ -409,8 +409,9 @@ static int __init sc1200wdt_init(void)
#if defined CONFIG_PNP
/* now that the user has specified an IO port and we haven't detected
* any devices, disable pnp support */
+ if (isapnp)
+ pnp_unregister_driver(&scl200wdt_pnp_driver);
isapnp = 0;
- pnp_unregister_driver(&scl200wdt_pnp_driver);
#endif
if (!request_region(io, io_len, SC1200_MODULE_NAME)) {
diff --git a/drivers/watchdog/shwdt.c b/drivers/watchdog/shwdt.c
index f9b8e06..af3528f 100644
--- a/drivers/watchdog/shwdt.c
+++ b/drivers/watchdog/shwdt.c
@@ -26,7 +26,6 @@
#include <linux/init.h>
#include <linux/types.h>
#include <linux/spinlock.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/pm_runtime.h>
#include <linux/fs.h>
diff --git a/drivers/watchdog/softdog.c b/drivers/watchdog/softdog.c
index ef2638f..c04a1aa 100644
--- a/drivers/watchdog/softdog.c
+++ b/drivers/watchdog/softdog.c
@@ -42,7 +42,6 @@
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/timer.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/notifier.h>
#include <linux/reboot.h>
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index d667f6b..bb64ae3 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -12,7 +12,6 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/platform_device.h>
#include <linux/stmp3xxx_rtc_wdt.h>
diff --git a/drivers/watchdog/txx9wdt.c b/drivers/watchdog/txx9wdt.c
index 0fd0e8a..6a447e3 100644
--- a/drivers/watchdog/txx9wdt.c
+++ b/drivers/watchdog/txx9wdt.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
-#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/init.h>
#include <linux/platform_device.h>
diff --git a/drivers/watchdog/ux500_wdt.c b/drivers/watchdog/ux500_wdt.c
index e029b57..5aed9d7 100644
--- a/drivers/watchdog/ux500_wdt.c
+++ b/drivers/watchdog/ux500_wdt.c
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/moduleparam.h>
-#include <linux/miscdevice.h>
#include <linux/err.h>
#include <linux/uaccess.h>
#include <linux/watchdog.h>
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 55ea73f7..4c02e2b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -350,17 +350,19 @@ static enum bp_state increase_reservation(unsigned long nr_pages)
pfn = page_to_pfn(page);
- set_phys_to_machine(pfn, frame_list[i]);
-
#ifdef CONFIG_XEN_HAVE_PVMMU
- /* Link back into the page tables if not highmem. */
- if (xen_pv_domain() && !PageHighMem(page)) {
- int ret;
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- mfn_pte(frame_list[i], PAGE_KERNEL),
- 0);
- BUG_ON(ret);
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ set_phys_to_machine(pfn, frame_list[i]);
+
+ /* Link back into the page tables if not highmem. */
+ if (!PageHighMem(page)) {
+ int ret;
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ mfn_pte(frame_list[i], PAGE_KERNEL),
+ 0);
+ BUG_ON(ret);
+ }
}
#endif
@@ -378,7 +380,6 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
enum bp_state state = BP_DONE;
unsigned long pfn, i;
struct page *page;
- struct page *scratch_page;
int ret;
struct xen_memory_reservation reservation = {
.address_bits = 0,
@@ -411,27 +412,29 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
scrub_page(page);
+#ifdef CONFIG_XEN_HAVE_PVMMU
/*
* Ballooned out frames are effectively replaced with
* a scratch frame. Ensure direct mappings and the
* p2m are consistent.
*/
- scratch_page = get_balloon_scratch_page();
-#ifdef CONFIG_XEN_HAVE_PVMMU
- if (xen_pv_domain() && !PageHighMem(page)) {
- ret = HYPERVISOR_update_va_mapping(
- (unsigned long)__va(pfn << PAGE_SHIFT),
- pfn_pte(page_to_pfn(scratch_page),
- PAGE_KERNEL_RO), 0);
- BUG_ON(ret);
- }
-#endif
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
unsigned long p;
+ struct page *scratch_page = get_balloon_scratch_page();
+
+ if (!PageHighMem(page)) {
+ ret = HYPERVISOR_update_va_mapping(
+ (unsigned long)__va(pfn << PAGE_SHIFT),
+ pfn_pte(page_to_pfn(scratch_page),
+ PAGE_KERNEL_RO), 0);
+ BUG_ON(ret);
+ }
p = page_to_pfn(scratch_page);
__set_phys_to_machine(pfn, pfn_to_mfn(p));
+
+ put_balloon_scratch_page();
}
- put_balloon_scratch_page();
+#endif
balloon_append(pfn_to_page(pfn));
}
@@ -627,15 +630,17 @@ static int __init balloon_init(void)
if (!xen_domain())
return -ENODEV;
- for_each_online_cpu(cpu)
- {
- per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
- if (per_cpu(balloon_scratch_page, cpu) == NULL) {
- pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
- return -ENOMEM;
+ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
+ for_each_online_cpu(cpu)
+ {
+ per_cpu(balloon_scratch_page, cpu) = alloc_page(GFP_KERNEL);
+ if (per_cpu(balloon_scratch_page, cpu) == NULL) {
+ pr_warn("Failed to allocate balloon_scratch_page for cpu %d\n", cpu);
+ return -ENOMEM;
+ }
}
+ register_cpu_notifier(&balloon_cpu_notifier);
}
- register_cpu_notifier(&balloon_cpu_notifier);
pr_info("Initialising balloon driver\n");
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 02838719..aa846a4 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -1176,7 +1176,8 @@ static int gnttab_setup(void)
gnttab_shared.addr = xen_remap(xen_hvm_resume_frames,
PAGE_SIZE * max_nr_gframes);
if (gnttab_shared.addr == NULL) {
- pr_warn("Failed to ioremap gnttab share frames!\n");
+ pr_warn("Failed to ioremap gnttab share frames (addr=0x%08lx)!\n",
+ xen_hvm_resume_frames);
return -ENOMEM;
}
}
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8e74590..569a13b 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -533,12 +533,17 @@ static void privcmd_close(struct vm_area_struct *vma)
{
struct page **pages = vma->vm_private_data;
int numpgs = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+ int rc;
if (!xen_feature(XENFEAT_auto_translated_physmap) || !numpgs || !pages)
return;
- xen_unmap_domain_mfn_range(vma, numpgs, pages);
- free_xenballooned_pages(numpgs, pages);
+ rc = xen_unmap_domain_mfn_range(vma, numpgs, pages);
+ if (rc == 0)
+ free_xenballooned_pages(numpgs, pages);
+ else
+ pr_crit("unable to unmap MFN range: leaking %d pages. rc=%d\n",
+ numpgs, rc);
kfree(pages);
}
diff --git a/fs/aio.c b/fs/aio.c
index 6efb7f6..062a5f6 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -244,9 +244,14 @@ static void aio_free_ring(struct kioctx *ctx)
int i;
for (i = 0; i < ctx->nr_pages; i++) {
+ struct page *page;
pr_debug("pid(%d) [%d] page->count=%d\n", current->pid, i,
page_count(ctx->ring_pages[i]));
- put_page(ctx->ring_pages[i]);
+ page = ctx->ring_pages[i];
+ if (!page)
+ continue;
+ ctx->ring_pages[i] = NULL;
+ put_page(page);
}
put_aio_ring_file(ctx);
@@ -280,18 +285,38 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
unsigned long flags;
int rc;
+ rc = 0;
+
+ /* Make sure the old page hasn't already been changed */
+ spin_lock(&mapping->private_lock);
+ ctx = mapping->private_data;
+ if (ctx) {
+ pgoff_t idx;
+ spin_lock_irqsave(&ctx->completion_lock, flags);
+ idx = old->index;
+ if (idx < (pgoff_t)ctx->nr_pages) {
+ if (ctx->ring_pages[idx] != old)
+ rc = -EAGAIN;
+ } else
+ rc = -EINVAL;
+ spin_unlock_irqrestore(&ctx->completion_lock, flags);
+ } else
+ rc = -EINVAL;
+ spin_unlock(&mapping->private_lock);
+
+ if (rc != 0)
+ return rc;
+
/* Writeback must be complete */
BUG_ON(PageWriteback(old));
- put_page(old);
+ get_page(new);
- rc = migrate_page_move_mapping(mapping, new, old, NULL, mode);
+ rc = migrate_page_move_mapping(mapping, new, old, NULL, mode, 1);
if (rc != MIGRATEPAGE_SUCCESS) {
- get_page(old);
+ put_page(new);
return rc;
}
- get_page(new);
-
/* We can potentially race against kioctx teardown here. Use the
* address_space's private data lock to protect the mapping's
* private_data.
@@ -303,13 +328,24 @@ static int aio_migratepage(struct address_space *mapping, struct page *new,
spin_lock_irqsave(&ctx->completion_lock, flags);
migrate_page_copy(new, old);
idx = old->index;
- if (idx < (pgoff_t)ctx->nr_pages)
- ctx->ring_pages[idx] = new;
+ if (idx < (pgoff_t)ctx->nr_pages) {
+ /* And only do the move if things haven't changed */
+ if (ctx->ring_pages[idx] == old)
+ ctx->ring_pages[idx] = new;
+ else
+ rc = -EAGAIN;
+ } else
+ rc = -EINVAL;
spin_unlock_irqrestore(&ctx->completion_lock, flags);
} else
rc = -EBUSY;
spin_unlock(&mapping->private_lock);
+ if (rc == MIGRATEPAGE_SUCCESS)
+ put_page(old);
+ else
+ put_page(new);
+
return rc;
}
#endif
@@ -326,7 +362,7 @@ static int aio_setup_ring(struct kioctx *ctx)
struct aio_ring *ring;
unsigned nr_events = ctx->max_reqs;
struct mm_struct *mm = current->mm;
- unsigned long size, populate;
+ unsigned long size, unused;
int nr_pages;
int i;
struct file *file;
@@ -347,6 +383,20 @@ static int aio_setup_ring(struct kioctx *ctx)
return -EAGAIN;
}
+ ctx->aio_ring_file = file;
+ nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
+ / sizeof(struct io_event);
+
+ ctx->ring_pages = ctx->internal_pages;
+ if (nr_pages > AIO_RING_PAGES) {
+ ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
+ GFP_KERNEL);
+ if (!ctx->ring_pages) {
+ put_aio_ring_file(ctx);
+ return -ENOMEM;
+ }
+ }
+
for (i = 0; i < nr_pages; i++) {
struct page *page;
page = find_or_create_page(file->f_inode->i_mapping,
@@ -358,19 +408,14 @@ static int aio_setup_ring(struct kioctx *ctx)
SetPageUptodate(page);
SetPageDirty(page);
unlock_page(page);
+
+ ctx->ring_pages[i] = page;
}
- ctx->aio_ring_file = file;
- nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring))
- / sizeof(struct io_event);
+ ctx->nr_pages = i;
- ctx->ring_pages = ctx->internal_pages;
- if (nr_pages > AIO_RING_PAGES) {
- ctx->ring_pages = kcalloc(nr_pages, sizeof(struct page *),
- GFP_KERNEL);
- if (!ctx->ring_pages) {
- put_aio_ring_file(ctx);
- return -ENOMEM;
- }
+ if (unlikely(i != nr_pages)) {
+ aio_free_ring(ctx);
+ return -EAGAIN;
}
ctx->mmap_size = nr_pages * PAGE_SIZE;
@@ -379,9 +424,9 @@ static int aio_setup_ring(struct kioctx *ctx)
down_write(&mm->mmap_sem);
ctx->mmap_base = do_mmap_pgoff(ctx->aio_ring_file, 0, ctx->mmap_size,
PROT_READ | PROT_WRITE,
- MAP_SHARED | MAP_POPULATE, 0, &populate);
+ MAP_SHARED, 0, &unused);
+ up_write(&mm->mmap_sem);
if (IS_ERR((void *)ctx->mmap_base)) {
- up_write(&mm->mmap_sem);
ctx->mmap_size = 0;
aio_free_ring(ctx);
return -EAGAIN;
@@ -389,27 +434,6 @@ static int aio_setup_ring(struct kioctx *ctx)
pr_debug("mmap address: 0x%08lx\n", ctx->mmap_base);
- /* We must do this while still holding mmap_sem for write, as we
- * need to be protected against userspace attempting to mremap()
- * or munmap() the ring buffer.
- */
- ctx->nr_pages = get_user_pages(current, mm, ctx->mmap_base, nr_pages,
- 1, 0, ctx->ring_pages, NULL);
-
- /* Dropping the reference here is safe as the page cache will hold
- * onto the pages for us. It is also required so that page migration
- * can unmap the pages and get the right reference count.
- */
- for (i = 0; i < ctx->nr_pages; i++)
- put_page(ctx->ring_pages[i]);
-
- up_write(&mm->mmap_sem);
-
- if (unlikely(ctx->nr_pages != nr_pages)) {
- aio_free_ring(ctx);
- return -EAGAIN;
- }
-
ctx->user_id = ctx->mmap_base;
ctx->nr_events = nr_events; /* trusted copy */
@@ -652,7 +676,8 @@ static struct kioctx *ioctx_alloc(unsigned nr_events)
aio_nr += ctx->max_reqs;
spin_unlock(&aio_nr_lock);
- percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
+ percpu_ref_get(&ctx->users); /* io_setup() will drop this ref */
+ percpu_ref_get(&ctx->reqs); /* free_ioctx_users() will drop this */
err = ioctx_add_table(ctx, mm);
if (err)
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 45d98d0..9c01509 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -767,20 +767,19 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
if (!path)
return -ENOMEM;
- if (metadata) {
- key.objectid = bytenr;
- key.type = BTRFS_METADATA_ITEM_KEY;
- key.offset = offset;
- } else {
- key.objectid = bytenr;
- key.type = BTRFS_EXTENT_ITEM_KEY;
- key.offset = offset;
- }
-
if (!trans) {
path->skip_locking = 1;
path->search_commit_root = 1;
}
+
+search_again:
+ key.objectid = bytenr;
+ key.offset = offset;
+ if (metadata)
+ key.type = BTRFS_METADATA_ITEM_KEY;
+ else
+ key.type = BTRFS_EXTENT_ITEM_KEY;
+
again:
ret = btrfs_search_slot(trans, root->fs_info->extent_root,
&key, path, 0, 0);
@@ -788,7 +787,6 @@ again:
goto out_free;
if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
- metadata = 0;
if (path->slots[0]) {
path->slots[0]--;
btrfs_item_key_to_cpu(path->nodes[0], &key,
@@ -855,7 +853,7 @@ again:
mutex_lock(&head->mutex);
mutex_unlock(&head->mutex);
btrfs_put_delayed_ref(&head->node);
- goto again;
+ goto search_again;
}
if (head->extent_op && head->extent_op->update_flags)
extent_flags |= head->extent_op->flags_to_set;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index a1116225..21da576 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2121,7 +2121,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
err = mutex_lock_killable_nested(&dir->i_mutex, I_MUTEX_PARENT);
if (err == -EINTR)
- goto out;
+ goto out_drop_write;
dentry = lookup_one_len(vol_args->name, parent, namelen);
if (IS_ERR(dentry)) {
err = PTR_ERR(dentry);
@@ -2284,6 +2284,7 @@ out_dput:
dput(dentry);
out_unlock_dir:
mutex_unlock(&dir->i_mutex);
+out_drop_write:
mnt_drop_write_file(file);
out:
kfree(vol_args);
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
index ce459a7..429c73c 100644
--- a/fs/btrfs/relocation.c
+++ b/fs/btrfs/relocation.c
@@ -571,7 +571,9 @@ static int is_cowonly_root(u64 root_objectid)
root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
root_objectid == BTRFS_DEV_TREE_OBJECTID ||
root_objectid == BTRFS_TREE_LOG_OBJECTID ||
- root_objectid == BTRFS_CSUM_TREE_OBJECTID)
+ root_objectid == BTRFS_CSUM_TREE_OBJECTID ||
+ root_objectid == BTRFS_UUID_TREE_OBJECTID ||
+ root_objectid == BTRFS_QUOTA_TREE_OBJECTID)
return 1;
return 0;
}
@@ -1264,10 +1266,10 @@ static int __must_check __add_reloc_root(struct btrfs_root *root)
}
/*
- * helper to update/delete the 'address of tree root -> reloc tree'
+ * helper to delete the 'address of tree root -> reloc tree'
* mapping
*/
-static int __update_reloc_root(struct btrfs_root *root, int del)
+static void __del_reloc_root(struct btrfs_root *root)
{
struct rb_node *rb_node;
struct mapping_node *node = NULL;
@@ -1275,7 +1277,7 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
spin_lock(&rc->reloc_root_tree.lock);
rb_node = tree_search(&rc->reloc_root_tree.rb_root,
- root->commit_root->start);
+ root->node->start);
if (rb_node) {
node = rb_entry(rb_node, struct mapping_node, rb_node);
rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
@@ -1283,23 +1285,45 @@ static int __update_reloc_root(struct btrfs_root *root, int del)
spin_unlock(&rc->reloc_root_tree.lock);
if (!node)
- return 0;
+ return;
BUG_ON((struct btrfs_root *)node->data != root);
- if (!del) {
- spin_lock(&rc->reloc_root_tree.lock);
- node->bytenr = root->node->start;
- rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
- node->bytenr, &node->rb_node);
- spin_unlock(&rc->reloc_root_tree.lock);
- if (rb_node)
- backref_tree_panic(rb_node, -EEXIST, node->bytenr);
- } else {
- spin_lock(&root->fs_info->trans_lock);
- list_del_init(&root->root_list);
- spin_unlock(&root->fs_info->trans_lock);
- kfree(node);
+ spin_lock(&root->fs_info->trans_lock);
+ list_del_init(&root->root_list);
+ spin_unlock(&root->fs_info->trans_lock);
+ kfree(node);
+}
+
+/*
+ * helper to update the 'address of tree root -> reloc tree'
+ * mapping
+ */
+static int __update_reloc_root(struct btrfs_root *root, u64 new_bytenr)
+{
+ struct rb_node *rb_node;
+ struct mapping_node *node = NULL;
+ struct reloc_control *rc = root->fs_info->reloc_ctl;
+
+ spin_lock(&rc->reloc_root_tree.lock);
+ rb_node = tree_search(&rc->reloc_root_tree.rb_root,
+ root->node->start);
+ if (rb_node) {
+ node = rb_entry(rb_node, struct mapping_node, rb_node);
+ rb_erase(&node->rb_node, &rc->reloc_root_tree.rb_root);
}
+ spin_unlock(&rc->reloc_root_tree.lock);
+
+ if (!node)
+ return 0;
+ BUG_ON((struct btrfs_root *)node->data != root);
+
+ spin_lock(&rc->reloc_root_tree.lock);
+ node->bytenr = new_bytenr;
+ rb_node = tree_insert(&rc->reloc_root_tree.rb_root,
+ node->bytenr, &node->rb_node);
+ spin_unlock(&rc->reloc_root_tree.lock);
+ if (rb_node)
+ backref_tree_panic(rb_node, -EEXIST, node->bytenr);
return 0;
}
@@ -1420,7 +1444,6 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
{
struct btrfs_root *reloc_root;
struct btrfs_root_item *root_item;
- int del = 0;
int ret;
if (!root->reloc_root)
@@ -1432,11 +1455,9 @@ int btrfs_update_reloc_root(struct btrfs_trans_handle *trans,
if (root->fs_info->reloc_ctl->merge_reloc_tree &&
btrfs_root_refs(root_item) == 0) {
root->reloc_root = NULL;
- del = 1;
+ __del_reloc_root(reloc_root);
}
- __update_reloc_root(reloc_root, del);
-
if (reloc_root->commit_root != reloc_root->node) {
btrfs_set_root_node(root_item, reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
@@ -2287,7 +2308,7 @@ void free_reloc_roots(struct list_head *list)
while (!list_empty(list)) {
reloc_root = list_entry(list->next, struct btrfs_root,
root_list);
- __update_reloc_root(reloc_root, 1);
+ __del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
kfree(reloc_root);
@@ -2332,7 +2353,7 @@ again:
ret = merge_reloc_root(rc, root);
if (ret) {
- __update_reloc_root(reloc_root, 1);
+ __del_reloc_root(reloc_root);
free_extent_buffer(reloc_root->node);
free_extent_buffer(reloc_root->commit_root);
kfree(reloc_root);
@@ -2388,6 +2409,13 @@ out:
btrfs_std_error(root->fs_info, ret);
if (!list_empty(&reloc_roots))
free_reloc_roots(&reloc_roots);
+
+ /* new reloc root may be added */
+ mutex_lock(&root->fs_info->reloc_mutex);
+ list_splice_init(&rc->reloc_roots, &reloc_roots);
+ mutex_unlock(&root->fs_info->reloc_mutex);
+ if (!list_empty(&reloc_roots))
+ free_reloc_roots(&reloc_roots);
}
BUG_ON(!RB_EMPTY_ROOT(&rc->reloc_root_tree.rb_root));
@@ -4522,6 +4550,11 @@ int btrfs_reloc_cow_block(struct btrfs_trans_handle *trans,
BUG_ON(rc->stage == UPDATE_DATA_PTRS &&
root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID);
+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
+ if (buf == root->node)
+ __update_reloc_root(root, cow->start);
+ }
+
level = btrfs_header_level(buf);
if (btrfs_header_generation(buf) <=
btrfs_root_last_snapshot(&root->root_item))
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 6837fe8..945d1db 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -4723,8 +4723,8 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
}
if (!access_ok(VERIFY_READ, arg->clone_sources,
- sizeof(*arg->clone_sources *
- arg->clone_sources_count))) {
+ sizeof(*arg->clone_sources) *
+ arg->clone_sources_count)) {
ret = -EFAULT;
goto out;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 2d8ac1b..d71a11d 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -432,7 +432,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
} else {
printk(KERN_INFO "btrfs: setting nodatacow\n");
}
- info->compress_type = BTRFS_COMPRESS_NONE;
btrfs_clear_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
btrfs_set_opt(info->mount_opt, NODATACOW);
@@ -461,7 +460,6 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_fs_incompat(info, COMPRESS_LZO);
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
- info->compress_type = BTRFS_COMPRESS_NONE;
btrfs_clear_opt(info->mount_opt, COMPRESS);
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
compress_force = false;
@@ -474,9 +472,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
btrfs_set_opt(info->mount_opt, FORCE_COMPRESS);
pr_info("btrfs: force %s compression\n",
compress_type);
- } else
+ } else if (btrfs_test_opt(root, COMPRESS)) {
pr_info("btrfs: use %s compression\n",
compress_type);
+ }
break;
case Opt_ssd:
printk(KERN_INFO "btrfs: use ssd allocation scheme\n");
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 1e561c0..ec3ba43 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -210,9 +210,13 @@ static int readpage_nounlock(struct file *filp, struct page *page)
if (err < 0) {
SetPageError(page);
goto out;
- } else if (err < PAGE_CACHE_SIZE) {
+ } else {
+ if (err < PAGE_CACHE_SIZE) {
/* zero fill remainder of page */
- zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ zero_user_segment(page, err, PAGE_CACHE_SIZE);
+ } else {
+ flush_dcache_page(page);
+ }
}
SetPageUptodate(page);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 9a8e396..278fd28 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -978,7 +978,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
struct ceph_mds_reply_inode *ininfo;
struct ceph_vino vino;
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
- int i = 0;
int err = 0;
dout("fill_trace %p is_dentry %d is_target %d\n", req,
@@ -1039,6 +1038,29 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
}
}
+ if (rinfo->head->is_target) {
+ vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
+ vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
+
+ in = ceph_get_inode(sb, vino);
+ if (IS_ERR(in)) {
+ err = PTR_ERR(in);
+ goto done;
+ }
+ req->r_target_inode = in;
+
+ err = fill_inode(in, &rinfo->targeti, NULL,
+ session, req->r_request_started,
+ (le32_to_cpu(rinfo->head->result) == 0) ?
+ req->r_fmode : -1,
+ &req->r_caps_reservation);
+ if (err < 0) {
+ pr_err("fill_inode badness %p %llx.%llx\n",
+ in, ceph_vinop(in));
+ goto done;
+ }
+ }
+
/*
* ignore null lease/binding on snapdir ENOENT, or else we
* will have trouble splicing in the virtual snapdir later
@@ -1108,7 +1130,6 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
ceph_dentry(req->r_old_dentry)->offset);
dn = req->r_old_dentry; /* use old_dentry */
- in = dn->d_inode;
}
/* null dentry? */
@@ -1130,44 +1151,28 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
}
/* attach proper inode */
- ininfo = rinfo->targeti.in;
- vino.ino = le64_to_cpu(ininfo->ino);
- vino.snap = le64_to_cpu(ininfo->snapid);
- in = dn->d_inode;
- if (!in) {
- in = ceph_get_inode(sb, vino);
- if (IS_ERR(in)) {
- pr_err("fill_trace bad get_inode "
- "%llx.%llx\n", vino.ino, vino.snap);
- err = PTR_ERR(in);
- d_drop(dn);
- goto done;
- }
+ if (!dn->d_inode) {
+ ihold(in);
dn = splice_dentry(dn, in, &have_lease, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
}
req->r_dentry = dn; /* may have spliced */
- ihold(in);
- } else if (ceph_ino(in) == vino.ino &&
- ceph_snap(in) == vino.snap) {
- ihold(in);
- } else {
+ } else if (dn->d_inode && dn->d_inode != in) {
dout(" %p links to %p %llx.%llx, not %llx.%llx\n",
- dn, in, ceph_ino(in), ceph_snap(in),
- vino.ino, vino.snap);
+ dn, dn->d_inode, ceph_vinop(dn->d_inode),
+ ceph_vinop(in));
have_lease = false;
- in = NULL;
}
if (have_lease)
update_dentry_lease(dn, rinfo->dlease, session,
req->r_request_started);
dout(" final dn %p\n", dn);
- i++;
- } else if ((req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
- req->r_op == CEPH_MDS_OP_MKSNAP) && !req->r_aborted) {
+ } else if (!req->r_aborted &&
+ (req->r_op == CEPH_MDS_OP_LOOKUPSNAP ||
+ req->r_op == CEPH_MDS_OP_MKSNAP)) {
struct dentry *dn = req->r_dentry;
/* fill out a snapdir LOOKUPSNAP dentry */
@@ -1177,52 +1182,15 @@ int ceph_fill_trace(struct super_block *sb, struct ceph_mds_request *req,
ininfo = rinfo->targeti.in;
vino.ino = le64_to_cpu(ininfo->ino);
vino.snap = le64_to_cpu(ininfo->snapid);
- in = ceph_get_inode(sb, vino);
- if (IS_ERR(in)) {
- pr_err("fill_inode get_inode badness %llx.%llx\n",
- vino.ino, vino.snap);
- err = PTR_ERR(in);
- d_delete(dn);
- goto done;
- }
dout(" linking snapped dir %p to dn %p\n", in, dn);
+ ihold(in);
dn = splice_dentry(dn, in, NULL, true);
if (IS_ERR(dn)) {
err = PTR_ERR(dn);
goto done;
}
req->r_dentry = dn; /* may have spliced */
- ihold(in);
- rinfo->head->is_dentry = 1; /* fool notrace handlers */
- }
-
- if (rinfo->head->is_target) {
- vino.ino = le64_to_cpu(rinfo->targeti.in->ino);
- vino.snap = le64_to_cpu(rinfo->targeti.in->snapid);
-
- if (in == NULL || ceph_ino(in) != vino.ino ||
- ceph_snap(in) != vino.snap) {
- in = ceph_get_inode(sb, vino);
- if (IS_ERR(in)) {
- err = PTR_ERR(in);
- goto done;
- }
- }
- req->r_target_inode = in;
-
- err = fill_inode(in,
- &rinfo->targeti, NULL,
- session, req->r_request_started,
- (le32_to_cpu(rinfo->head->result) == 0) ?
- req->r_fmode : -1,
- &req->r_caps_reservation);
- if (err < 0) {
- pr_err("fill_inode badness %p %llx.%llx\n",
- in, ceph_vinop(in));
- goto done;
- }
}
-
done:
dout("fill_trace done err=%d\n", err);
return err;
@@ -1272,7 +1240,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
struct qstr dname;
struct dentry *dn;
struct inode *in;
- int err = 0, i;
+ int err = 0, ret, i;
struct inode *snapdir = NULL;
struct ceph_mds_request_head *rhead = req->r_request->front.iov_base;
struct ceph_dentry_info *di;
@@ -1305,6 +1273,7 @@ int ceph_readdir_prepopulate(struct ceph_mds_request *req,
ceph_fill_dirfrag(parent->d_inode, rinfo->dir_dir);
}
+ /* FIXME: release caps/leases if error occurs */
for (i = 0; i < rinfo->dir_nr; i++) {
struct ceph_vino vino;
@@ -1329,9 +1298,10 @@ retry_lookup:
err = -ENOMEM;
goto out;
}
- err = ceph_init_dentry(dn);
- if (err < 0) {
+ ret = ceph_init_dentry(dn);
+ if (ret < 0) {
dput(dn);
+ err = ret;
goto out;
}
} else if (dn->d_inode &&
@@ -1351,9 +1321,6 @@ retry_lookup:
spin_unlock(&parent->d_lock);
}
- di = dn->d_fsdata;
- di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
-
/* inode */
if (dn->d_inode) {
in = dn->d_inode;
@@ -1366,26 +1333,39 @@ retry_lookup:
err = PTR_ERR(in);
goto out;
}
- dn = splice_dentry(dn, in, NULL, false);
- if (IS_ERR(dn))
- dn = NULL;
}
if (fill_inode(in, &rinfo->dir_in[i], NULL, session,
req->r_request_started, -1,
&req->r_caps_reservation) < 0) {
pr_err("fill_inode badness on %p\n", in);
+ if (!dn->d_inode)
+ iput(in);
+ d_drop(dn);
goto next_item;
}
- if (dn)
- update_dentry_lease(dn, rinfo->dir_dlease[i],
- req->r_session,
- req->r_request_started);
+
+ if (!dn->d_inode) {
+ dn = splice_dentry(dn, in, NULL, false);
+ if (IS_ERR(dn)) {
+ err = PTR_ERR(dn);
+ dn = NULL;
+ goto next_item;
+ }
+ }
+
+ di = dn->d_fsdata;
+ di->offset = ceph_make_fpos(frag, i + r_readdir_offset);
+
+ update_dentry_lease(dn, rinfo->dir_dlease[i],
+ req->r_session,
+ req->r_request_started);
next_item:
if (dn)
dput(dn);
}
- req->r_did_prepopulate = true;
+ if (err == 0)
+ req->r_did_prepopulate = true;
out:
if (snapdir) {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index aa33976..2c29db6 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -477,9 +477,10 @@ extern int CIFSGetExtAttr(const unsigned int xid, struct cifs_tcon *tcon,
const int netfid, __u64 *pExtAttrBits, __u64 *pMask);
extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb);
extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr);
-extern int CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid);
+extern int CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb,
+ struct cifs_fattr *fattr,
+ const unsigned char *path);
extern int mdfour(unsigned char *, unsigned char *, int);
extern int E_md4hash(const unsigned char *passwd, unsigned char *p16,
const struct nls_table *codepage);
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 124aa02..d707edb 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -4010,7 +4010,7 @@ QFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+ cifs_dbg(FYI, "Send error in QFileInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4179,7 +4179,7 @@ UnixQFileInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+ cifs_dbg(FYI, "Send error in UnixQFileInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
@@ -4263,7 +4263,7 @@ UnixQPathInfoRetry:
rc = SendReceive(xid, tcon->ses, (struct smb_hdr *) pSMB,
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
- cifs_dbg(FYI, "Send error in QPathInfo = %d\n", rc);
+ cifs_dbg(FYI, "Send error in UnixQPathInfo = %d", rc);
} else { /* decode response */
rc = validate_t2((struct smb_t2_rsp *)pSMBr);
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 11ff5f1..a514e0a 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -193,7 +193,7 @@ check_name(struct dentry *direntry)
static int
cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
struct tcon_link *tlink, unsigned oflags, umode_t mode,
- __u32 *oplock, struct cifs_fid *fid, int *created)
+ __u32 *oplock, struct cifs_fid *fid)
{
int rc = -ENOENT;
int create_options = CREATE_NOT_DIR;
@@ -349,7 +349,6 @@ cifs_do_create(struct inode *inode, struct dentry *direntry, unsigned int xid,
.device = 0,
};
- *created |= FILE_CREATED;
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) {
args.uid = current_fsuid();
if (inode->i_mode & S_ISGID)
@@ -480,13 +479,16 @@ cifs_atomic_open(struct inode *inode, struct dentry *direntry,
cifs_add_pending_open(&fid, tlink, &open);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
- &oplock, &fid, opened);
+ &oplock, &fid);
if (rc) {
cifs_del_pending_open(&open);
goto out;
}
+ if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+ *opened |= FILE_CREATED;
+
rc = finish_open(file, direntry, generic_file_open, opened);
if (rc) {
if (server->ops->close)
@@ -529,7 +531,6 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
struct TCP_Server_Info *server;
struct cifs_fid fid;
__u32 oplock;
- int created = FILE_CREATED;
cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %s and dentry = 0x%p\n",
inode, direntry->d_name.name, direntry);
@@ -546,7 +547,7 @@ int cifs_create(struct inode *inode, struct dentry *direntry, umode_t mode,
server->ops->new_lease_key(&fid);
rc = cifs_do_create(inode, direntry, xid, tlink, oflags, mode,
- &oplock, &fid, &created);
+ &oplock, &fid);
if (!rc && server->ops->close)
server->ops->close(xid, tcon, &fid);
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 36f9ebb..49719b8 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -383,7 +383,8 @@ int cifs_get_inode_info_unix(struct inode **pinode,
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
- int tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+ int tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+ full_path);
if (tmprc)
cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
}
@@ -799,7 +800,8 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
/* check for Minshall+French symlinks */
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MF_SYMLINKS) {
- tmprc = CIFSCheckMFSymlink(&fattr, full_path, cifs_sb, xid);
+ tmprc = CIFSCheckMFSymlink(xid, tcon, cifs_sb, &fattr,
+ full_path);
if (tmprc)
cifs_dbg(FYI, "CIFSCheckMFSymlink: %d\n", tmprc);
}
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index cc023471..92aee08 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -354,34 +354,30 @@ open_query_close_cifs_symlink(const unsigned char *path, char *pbuf,
int
-CIFSCheckMFSymlink(struct cifs_fattr *fattr,
- const unsigned char *path,
- struct cifs_sb_info *cifs_sb, unsigned int xid)
+CIFSCheckMFSymlink(unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, struct cifs_fattr *fattr,
+ const unsigned char *path)
{
- int rc = 0;
+ int rc;
u8 *buf = NULL;
unsigned int link_len = 0;
unsigned int bytes_read = 0;
- struct cifs_tcon *ptcon;
if (!CIFSCouldBeMFSymlink(fattr))
/* it's not a symlink */
return 0;
buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL);
- if (!buf) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!buf)
+ return -ENOMEM;
- ptcon = tlink_tcon(cifs_sb_tlink(cifs_sb));
- if ((ptcon->ses) && (ptcon->ses->server->ops->query_mf_symlink))
- rc = ptcon->ses->server->ops->query_mf_symlink(path, buf,
- &bytes_read, cifs_sb, xid);
+ if (tcon->ses->server->ops->query_mf_symlink)
+ rc = tcon->ses->server->ops->query_mf_symlink(path, buf,
+ &bytes_read, cifs_sb, xid);
else
- goto out;
+ rc = -ENOSYS;
- if (rc != 0)
+ if (rc)
goto out;
if (bytes_read == 0) /* not a symlink */
diff --git a/fs/dcache.c b/fs/dcache.c
index 4bdb300..6055d61 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -192,7 +192,7 @@ static inline int dentry_string_cmp(const unsigned char *cs, const unsigned char
if (!tcount)
return 0;
}
- mask = ~(~0ul << tcount*8);
+ mask = bytemask_from_count(tcount);
return unlikely(!!((a ^ b) & mask));
}
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 8b5e258..af90312 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1907,10 +1907,6 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd,
}
}
}
- if (op == EPOLL_CTL_DEL && is_file_epoll(tf.file)) {
- tep = tf.file->private_data;
- mutex_lock_nested(&tep->mtx, 1);
- }
/*
* Try to lookup the file inside our RB tree, Since we grabbed "mtx"
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 2885349..20d6697 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -1493,6 +1493,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
sb->s_blocksize - offset : towrite;
tmp_bh.b_state = 0;
+ tmp_bh.b_size = sb->s_blocksize;
err = ext2_get_block(inode, blk, &tmp_bh, 1);
if (err < 0)
goto out;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index e618503..ece5556 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -268,6 +268,16 @@ struct ext4_io_submit {
/* Translate # of blks to # of clusters */
#define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \
(sbi)->s_cluster_bits)
+/* Mask out the low bits to get the starting block of the cluster */
+#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \
+ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \
+ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1))
+/* Get the cluster offset */
+#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \
+ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1))
+#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \
+ ((ext4_lblk_t) (s)->s_cluster_ratio - 1))
/*
* Structure of a blocks group descriptor
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 17ac112..3fe29de 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -259,6 +259,15 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
if (WARN_ON_ONCE(err)) {
ext4_journal_abort_handle(where, line, __func__, bh,
handle, err);
+ ext4_error_inode(inode, where, line,
+ bh->b_blocknr,
+ "journal_dirty_metadata failed: "
+ "handle type %u started at line %u, "
+ "credits %u/%u, errcode %d",
+ handle->h_type,
+ handle->h_line_no,
+ handle->h_requested_credits,
+ handle->h_buffer_credits, err);
}
} else {
if (inode)
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 35f65cf..3384dc4 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -360,8 +360,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
{
ext4_fsblk_t block = ext4_ext_pblock(ext);
int len = ext4_ext_get_actual_len(ext);
+ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
+ ext4_lblk_t last = lblock + len - 1;
- if (len == 0)
+ if (lblock > last)
return 0;
return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
}
@@ -387,11 +389,26 @@ static int ext4_valid_extent_entries(struct inode *inode,
if (depth == 0) {
/* leaf entries */
struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ ext4_fsblk_t pblock = 0;
+ ext4_lblk_t lblock = 0;
+ ext4_lblk_t prev = 0;
+ int len = 0;
while (entries) {
if (!ext4_valid_extent(inode, ext))
return 0;
+
+ /* Check for overlapping extents */
+ lblock = le32_to_cpu(ext->ee_block);
+ len = ext4_ext_get_actual_len(ext);
+ if ((lblock <= prev) && prev) {
+ pblock = ext4_ext_pblock(ext);
+ es->s_last_error_block = cpu_to_le64(pblock);
+ return 0;
+ }
ext++;
entries--;
+ prev = lblock + len - 1;
}
} else {
struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
@@ -1834,8 +1851,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
depth = ext_depth(inode);
if (!path[depth].p_ext)
goto out;
- b2 = le32_to_cpu(path[depth].p_ext->ee_block);
- b2 &= ~(sbi->s_cluster_ratio - 1);
+ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block));
/*
* get the next allocated block if the extent in the path
@@ -1845,7 +1861,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi,
b2 = ext4_ext_next_allocated_block(path);
if (b2 == EXT_MAX_BLOCKS)
goto out;
- b2 &= ~(sbi->s_cluster_ratio - 1);
+ b2 = EXT4_LBLK_CMASK(sbi, b2);
}
/* check for wrap through zero on extent logical start block*/
@@ -2504,7 +2520,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
* extent, we have to mark the cluster as used (store negative
* cluster number in partial_cluster).
*/
- unaligned = pblk & (sbi->s_cluster_ratio - 1);
+ unaligned = EXT4_PBLK_COFF(sbi, pblk);
if (unaligned && (ee_len == num) &&
(*partial_cluster != -((long long)EXT4_B2C(sbi, pblk))))
*partial_cluster = EXT4_B2C(sbi, pblk);
@@ -2598,7 +2614,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
* accidentally freeing it later on
*/
pblk = ext4_ext_pblock(ex);
- if (pblk & (sbi->s_cluster_ratio - 1))
+ if (EXT4_PBLK_COFF(sbi, pblk))
*partial_cluster =
-((long long)EXT4_B2C(sbi, pblk));
ex--;
@@ -3753,7 +3769,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk)
{
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
ext4_lblk_t lblk_start, lblk_end;
- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1));
+ lblk_start = EXT4_LBLK_CMASK(sbi, lblk);
lblk_end = lblk_start + sbi->s_cluster_ratio - 1;
return ext4_find_delalloc_range(inode, lblk_start, lblk_end);
@@ -3812,9 +3828,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks);
/* Check towards left side */
- c_offset = lblk_start & (sbi->s_cluster_ratio - 1);
+ c_offset = EXT4_LBLK_COFF(sbi, lblk_start);
if (c_offset) {
- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1));
+ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start);
lblk_to = lblk_from + c_offset - 1;
if (ext4_find_delalloc_range(inode, lblk_from, lblk_to))
@@ -3822,7 +3838,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
}
/* Now check towards right. */
- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1);
+ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks);
if (allocated_clusters && c_offset) {
lblk_from = lblk_start + num_blks;
lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1;
@@ -4030,7 +4046,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
struct ext4_ext_path *path)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ext4_lblk_t ex_cluster_start, ex_cluster_end;
ext4_lblk_t rr_cluster_start;
ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block);
@@ -4048,8 +4064,7 @@ static int get_implied_cluster_alloc(struct super_block *sb,
(rr_cluster_start == ex_cluster_start)) {
if (rr_cluster_start == ex_cluster_end)
ee_start += ee_len - 1;
- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) +
- c_offset;
+ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset;
map->m_len = min(map->m_len,
(unsigned) sbi->s_cluster_ratio - c_offset);
/*
@@ -4203,7 +4218,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
*/
map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
newex.ee_block = cpu_to_le32(map->m_lblk);
- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1);
+ cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
/*
* If we are doing bigalloc, check to see if the extent returned
@@ -4271,7 +4286,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
* needed so that future calls to get_implied_cluster_alloc()
* work correctly.
*/
- offset = map->m_lblk & (sbi->s_cluster_ratio - 1);
+ offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
ar.len = EXT4_NUM_B2C(sbi, offset+allocated);
ar.goal -= offset;
ar.logical -= offset;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 0757634..61d49ff 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1206,7 +1206,6 @@ static int ext4_journalled_write_end(struct file *file,
*/
static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
{
- int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
@@ -1218,7 +1217,6 @@ static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
* in order to allocate nrblocks
* worse case is one extent per block
*/
-repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
@@ -1238,10 +1236,6 @@ repeat:
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- cond_resched();
- goto repeat;
- }
return -ENOSPC;
}
ei->i_reserved_meta_blocks += md_needed;
@@ -1255,7 +1249,6 @@ repeat:
*/
static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
{
- int retries = 0;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct ext4_inode_info *ei = EXT4_I(inode);
unsigned int md_needed;
@@ -1277,7 +1270,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
* in order to allocate nrblocks
* worse case is one extent per block
*/
-repeat:
spin_lock(&ei->i_block_reservation_lock);
/*
* ext4_calc_metadata_amount() has side effects, which we have
@@ -1297,10 +1289,6 @@ repeat:
ei->i_da_metadata_calc_len = save_len;
ei->i_da_metadata_calc_last_lblock = save_last_lblock;
spin_unlock(&ei->i_block_reservation_lock);
- if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
- cond_resched();
- goto repeat;
- }
dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
return -ENOSPC;
}
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 4d113ef..04a5c75 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3442,6 +3442,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head)
{
struct ext4_prealloc_space *pa;
pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu);
+
+ BUG_ON(atomic_read(&pa->pa_count));
+ BUG_ON(pa->pa_deleted == 0);
kmem_cache_free(ext4_pspace_cachep, pa);
}
@@ -3455,11 +3458,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
ext4_group_t grp;
ext4_fsblk_t grp_blk;
- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
- return;
-
/* in this short window concurrent discard can set pa_deleted */
spin_lock(&pa->pa_lock);
+ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) {
+ spin_unlock(&pa->pa_lock);
+ return;
+ }
+
if (pa->pa_deleted == 1) {
spin_unlock(&pa->pa_lock);
return;
@@ -4121,7 +4126,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac,
ext4_get_group_no_and_offset(sb, goal, &group, &block);
/* set up allocation goals */
- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1);
+ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical);
ac->ac_status = AC_STATUS_CONTINUE;
ac->ac_sb = sb;
ac->ac_inode = ar->inode;
@@ -4663,7 +4668,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
* blocks at the beginning or the end unless we are explicitly
* requested to avoid doing so.
*/
- overflow = block & (sbi->s_cluster_ratio - 1);
+ overflow = EXT4_PBLK_COFF(sbi, block);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) {
overflow = sbi->s_cluster_ratio - overflow;
@@ -4677,7 +4682,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode,
count += overflow;
}
}
- overflow = count & (sbi->s_cluster_ratio - 1);
+ overflow = EXT4_LBLK_COFF(sbi, count);
if (overflow) {
if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) {
if (count > overflow)
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index c977f4e..1f7784d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -792,7 +792,7 @@ static void ext4_put_super(struct super_block *sb)
}
ext4_es_unregister_shrinker(sbi);
- del_timer(&sbi->s_err_report);
+ del_timer_sync(&sbi->s_err_report);
ext4_release_system_zone(sb);
ext4_mb_release(sb);
ext4_ext_release(sb);
@@ -3316,11 +3316,19 @@ int ext4_calculate_overhead(struct super_block *sb)
}
-static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
+static ext4_fsblk_t ext4_calculate_resv_clusters(struct super_block *sb)
{
ext4_fsblk_t resv_clusters;
/*
+ * There's no need to reserve anything when we aren't using extents.
+ * The space estimates are exact, there are no unwritten extents,
+ * hole punching doesn't need new metadata... This is needed especially
+ * to keep ext2/3 backward compatibility.
+ */
+ if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS))
+ return 0;
+ /*
* By default we reserve 2% or 4096 clusters, whichever is smaller.
* This should cover the situations where we can not afford to run
* out of space like for example punch hole, or converting
@@ -3328,7 +3336,8 @@ static ext4_fsblk_t ext4_calculate_resv_clusters(struct ext4_sb_info *sbi)
* allocation would require 1, or 2 blocks, higher numbers are
* very rare.
*/
- resv_clusters = ext4_blocks_count(sbi->s_es) >> sbi->s_cluster_bits;
+ resv_clusters = ext4_blocks_count(EXT4_SB(sb)->s_es) >>
+ EXT4_SB(sb)->s_cluster_bits;
do_div(resv_clusters, 50);
resv_clusters = min_t(ext4_fsblk_t, resv_clusters, 4096);
@@ -4071,10 +4080,10 @@ no_journal:
"available");
}
- err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sbi));
+ err = ext4_reserve_clusters(sbi, ext4_calculate_resv_clusters(sb));
if (err) {
ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
- "reserved pool", ext4_calculate_resv_clusters(sbi));
+ "reserved pool", ext4_calculate_resv_clusters(sb));
goto failed_mount4a;
}
@@ -4184,7 +4193,7 @@ failed_mount_wq:
}
failed_mount3:
ext4_es_unregister_shrinker(sbi);
- del_timer(&sbi->s_err_report);
+ del_timer_sync(&sbi->s_err_report);
if (sbi->s_flex_groups)
ext4_kvfree(sbi->s_flex_groups);
percpu_counter_destroy(&sbi->s_freeclusters_counter);
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index b7fc035..73f3e4e 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -986,6 +986,7 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
{
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
+ struct address_space *mapping = inode->i_mapping;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_holder gh;
int rv;
@@ -1006,6 +1007,35 @@ static ssize_t gfs2_direct_IO(int rw, struct kiocb *iocb,
if (rv != 1)
goto out; /* dio not valid, fall back to buffered i/o */
+ /*
+ * Now since we are holding a deferred (CW) lock at this point, you
+ * might be wondering why this is ever needed. There is a case however
+ * where we've granted a deferred local lock against a cached exclusive
+ * glock. That is ok provided all granted local locks are deferred, but
+ * it also means that it is possible to encounter pages which are
+ * cached and possibly also mapped. So here we check for that and sort
+ * them out ahead of the dio. The glock state machine will take care of
+ * everything else.
+ *
+ * If in fact the cached glock state (gl->gl_state) is deferred (CW) in
+ * the first place, mapping->nr_pages will always be zero.
+ */
+ if (mapping->nrpages) {
+ loff_t lstart = offset & (PAGE_CACHE_SIZE - 1);
+ loff_t len = iov_length(iov, nr_segs);
+ loff_t end = PAGE_ALIGN(offset + len) - 1;
+
+ rv = 0;
+ if (len == 0)
+ goto out;
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len);
+ rv = filemap_write_and_wait_range(mapping, lstart, end);
+ if (rv)
+ return rv;
+ truncate_inode_pages_range(mapping, lstart, end);
+ }
+
rv = __blockdev_direct_IO(rw, iocb, inode, inode->i_sb->s_bdev, iov,
offset, nr_segs, gfs2_get_block_direct,
NULL, NULL, 0);
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c8420f7..6f7a47c 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -1655,6 +1655,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
struct task_struct *gh_owner = NULL;
char flags_buf[32];
+ rcu_read_lock();
if (gh->gh_owner_pid)
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %pS\n",
@@ -1664,6 +1665,7 @@ static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
gh_owner ? gh_owner->comm : "(ended)",
(void *)gh->gh_ip);
+ rcu_read_unlock();
return 0;
}
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index db908f6..f88dcd9 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -192,8 +192,11 @@ static void inode_go_sync(struct gfs2_glock *gl)
if (ip && !S_ISREG(ip->i_inode.i_mode))
ip = NULL;
- if (ip && test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
- unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ if (ip) {
+ if (test_and_clear_bit(GIF_SW_PAGED, &ip->i_flags))
+ unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
+ inode_dio_wait(&ip->i_inode);
+ }
if (!test_and_clear_bit(GLF_DIRTY, &gl->gl_flags))
return;
@@ -410,6 +413,9 @@ static int inode_go_lock(struct gfs2_holder *gh)
return error;
}
+ if (gh->gh_state != LM_ST_DEFERRED)
+ inode_dio_wait(&ip->i_inode);
+
if ((ip->i_diskflags & GFS2_DIF_TRUNC_IN_PROG) &&
(gl->gl_state == LM_ST_EXCLUSIVE) &&
(gh->gh_state == LM_ST_EXCLUSIVE)) {
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 610613f..9dcb977 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -551,10 +551,10 @@ void gfs2_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
struct buffer_head *bh = bd->bd_bh;
struct gfs2_glock *gl = bd->bd_gl;
- gfs2_remove_from_ail(bd);
- bd->bd_bh = NULL;
bh->b_private = NULL;
bd->bd_blkno = bh->b_blocknr;
+ gfs2_remove_from_ail(bd); /* drops ref on bh */
+ bd->bd_bh = NULL;
bd->bd_ops = &gfs2_revoke_lops;
sdp->sd_log_num_revoke++;
atomic_inc(&gl->gl_revokes);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 9324150..52f177b 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -258,6 +258,7 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
struct address_space *mapping = bh->b_page->mapping;
struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
struct gfs2_bufdata *bd = bh->b_private;
+ int was_pinned = 0;
if (test_clear_buffer_pinned(bh)) {
trace_gfs2_pin(bd, 0);
@@ -273,12 +274,16 @@ void gfs2_remove_from_journal(struct buffer_head *bh, struct gfs2_trans *tr, int
tr->tr_num_databuf_rm++;
}
tr->tr_touched = 1;
+ was_pinned = 1;
brelse(bh);
}
if (bd) {
spin_lock(&sdp->sd_ail_lock);
if (bd->bd_tr) {
gfs2_trans_add_revoke(sdp, bd);
+ } else if (was_pinned) {
+ bh->b_private = NULL;
+ kmem_cache_free(gfs2_bufdata_cachep, bd);
}
spin_unlock(&sdp->sd_ail_lock);
}
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b4..52fa883 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -1366,8 +1366,18 @@ static struct dentry *gfs2_mount(struct file_system_type *fs_type, int flags,
if (IS_ERR(s))
goto error_bdev;
- if (s->s_root)
+ if (s->s_root) {
+ /*
+ * s_umount nests inside bd_mutex during
+ * __invalidate_device(). blkdev_put() acquires
+ * bd_mutex and can't be called under s_umount. Drop
+ * s_umount temporarily. This is safe as we're
+ * holding an active reference.
+ */
+ up_write(&s->s_umount);
blkdev_put(bdev, mode);
+ down_write(&s->s_umount);
+ }
memset(&args, 0, sizeof(args));
args.ar_quota = GFS2_QUOTA_DEFAULT;
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 5203264..5fa344a 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -702,7 +702,7 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
read_lock(&journal->j_state_lock);
#ifdef CONFIG_JBD2_DEBUG
if (!tid_geq(journal->j_commit_request, tid)) {
- printk(KERN_EMERG
+ printk(KERN_ERR
"%s: error: j_commit_request=%d, tid=%d\n",
__func__, journal->j_commit_request, tid);
}
@@ -718,10 +718,8 @@ int jbd2_log_wait_commit(journal_t *journal, tid_t tid)
}
read_unlock(&journal->j_state_lock);
- if (unlikely(is_journal_aborted(journal))) {
- printk(KERN_EMERG "journal commit I/O error\n");
+ if (unlikely(is_journal_aborted(journal)))
err = -EIO;
- }
return err;
}
@@ -1527,13 +1525,13 @@ static int journal_get_superblock(journal_t *journal)
if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
/* Can't have checksum v1 and v2 on at the same time! */
- printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
+ printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
"at the same time!\n");
goto out;
}
if (!jbd2_verify_csum_type(journal, sb)) {
- printk(KERN_ERR "JBD: Unknown checksum type\n");
+ printk(KERN_ERR "JBD2: Unknown checksum type\n");
goto out;
}
@@ -1541,7 +1539,7 @@ static int journal_get_superblock(journal_t *journal)
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
+ printk(KERN_ERR "JBD2: Cannot load crc32c driver.\n");
err = PTR_ERR(journal->j_chksum_driver);
journal->j_chksum_driver = NULL;
goto out;
@@ -1550,7 +1548,7 @@ static int journal_get_superblock(journal_t *journal)
/* Check superblock checksum */
if (!jbd2_superblock_csum_verify(journal, sb)) {
- printk(KERN_ERR "JBD: journal checksum error\n");
+ printk(KERN_ERR "JBD2: journal checksum error\n");
goto out;
}
@@ -1836,7 +1834,7 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
journal->j_chksum_driver = crypto_alloc_shash("crc32c",
0, 0);
if (IS_ERR(journal->j_chksum_driver)) {
- printk(KERN_ERR "JBD: Cannot load crc32c "
+ printk(KERN_ERR "JBD2: Cannot load crc32c "
"driver.\n");
journal->j_chksum_driver = NULL;
return 0;
@@ -2645,7 +2643,7 @@ static void __exit journal_exit(void)
#ifdef CONFIG_JBD2_DEBUG
int n = atomic_read(&nr_journal_heads);
if (n)
- printk(KERN_EMERG "JBD2: leaked %d journal_heads!\n", n);
+ printk(KERN_ERR "JBD2: leaked %d journal_heads!\n", n);
#endif
jbd2_remove_jbd_stats_proc_entry();
jbd2_journal_destroy_caches();
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 3929c50..3b6bb19 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -594,7 +594,7 @@ static int do_one_pass(journal_t *journal,
be32_to_cpu(tmp->h_sequence))) {
brelse(obh);
success = -EIO;
- printk(KERN_ERR "JBD: Invalid "
+ printk(KERN_ERR "JBD2: Invalid "
"checksum recovering "
"block %llu in log\n",
blocknr);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 7aa9a32..8360674 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -932,7 +932,7 @@ repeat:
jbd2_alloc(jh2bh(jh)->b_size,
GFP_NOFS);
if (!frozen_buffer) {
- printk(KERN_EMERG
+ printk(KERN_ERR
"%s: OOM for frozen_buffer\n",
__func__);
JBUFFER_TRACE(jh, "oom!");
@@ -1166,7 +1166,7 @@ repeat:
if (!jh->b_committed_data) {
committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS);
if (!committed_data) {
- printk(KERN_EMERG "%s: No memory for committed data\n",
+ printk(KERN_ERR "%s: No memory for committed data\n",
__func__);
err = -ENOMEM;
goto out;
@@ -1290,7 +1290,10 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
* once a transaction -bzzz
*/
jh->b_modified = 1;
- J_ASSERT_JH(jh, handle->h_buffer_credits > 0);
+ if (handle->h_buffer_credits <= 0) {
+ ret = -ENOSPC;
+ goto out_unlock_bh;
+ }
handle->h_buffer_credits--;
}
@@ -1305,7 +1308,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "fastpath");
if (unlikely(jh->b_transaction !=
journal->j_running_transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_transaction (%llu, %p, %u) != "
"journal->j_running_transaction (%p, %u)",
journal->j_devname,
@@ -1332,7 +1335,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
JBUFFER_TRACE(jh, "already on other transaction");
if (unlikely(jh->b_transaction !=
journal->j_committing_transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_transaction (%llu, %p, %u) != "
"journal->j_committing_transaction (%p, %u)",
journal->j_devname,
@@ -1345,7 +1348,7 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
ret = -EINVAL;
}
if (unlikely(jh->b_next_transaction != transaction)) {
- printk(KERN_EMERG "JBD: %s: "
+ printk(KERN_ERR "JBD2: %s: "
"jh->b_next_transaction (%llu, %p, %u) != "
"transaction (%p, %u)",
journal->j_devname,
@@ -1373,7 +1376,6 @@ out_unlock_bh:
jbd2_journal_put_journal_head(jh);
out:
JBUFFER_TRACE(jh, "exit");
- WARN_ON(ret); /* All errors are bugs, so dump the stack */
return ret;
}
diff --git a/fs/namei.c b/fs/namei.c
index c53d3a9..3531dee 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1598,11 +1598,6 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
* do a "get_unaligned()" if this helps and is sufficiently
* fast.
*
- * - Little-endian machines (so that we can generate the mask
- * of low bytes efficiently). Again, we *could* do a byte
- * swapping load on big-endian architectures if that is not
- * expensive enough to make the optimization worthless.
- *
* - non-CONFIG_DEBUG_PAGEALLOC configurations (so that we
* do not trap on the (extremely unlikely) case of a page
* crossing operation.
@@ -1646,7 +1641,7 @@ unsigned int full_name_hash(const unsigned char *name, unsigned int len)
if (!len)
goto done;
}
- mask = ~(~0ul << len*8);
+ mask = bytemask_from_count(len);
hash += mask & a;
done:
return fold_hash(hash);
diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
index 9186c7c..b6af150 100644
--- a/fs/nfsd/nfscache.c
+++ b/fs/nfsd/nfscache.c
@@ -132,6 +132,13 @@ nfsd_reply_cache_alloc(void)
}
static void
+nfsd_reply_cache_unhash(struct svc_cacherep *rp)
+{
+ hlist_del_init(&rp->c_hash);
+ list_del_init(&rp->c_lru);
+}
+
+static void
nfsd_reply_cache_free_locked(struct svc_cacherep *rp)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
@@ -417,7 +424,7 @@ nfsd_cache_lookup(struct svc_rqst *rqstp)
rp = list_first_entry(&lru_head, struct svc_cacherep, c_lru);
if (nfsd_cache_entry_expired(rp) ||
num_drc_entries >= max_drc_entries) {
- lru_put_end(rp);
+ nfsd_reply_cache_unhash(rp);
prune_cache_entries();
goto search_cache;
}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 28955d4..124fc43 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -292,16 +292,20 @@ proc_reg_get_unmapped_area(struct file *file, unsigned long orig_addr,
{
struct proc_dir_entry *pde = PDE(file_inode(file));
unsigned long rv = -EIO;
- unsigned long (*get_area)(struct file *, unsigned long, unsigned long,
- unsigned long, unsigned long) = NULL;
+
if (use_pde(pde)) {
+ typeof(proc_reg_get_unmapped_area) *get_area;
+
+ get_area = pde->proc_fops->get_unmapped_area;
#ifdef CONFIG_MMU
- get_area = current->mm->get_unmapped_area;
+ if (!get_area)
+ get_area = current->mm->get_unmapped_area;
#endif
- if (pde->proc_fops->get_unmapped_area)
- get_area = pde->proc_fops->get_unmapped_area;
+
if (get_area)
rv = get_area(file, orig_addr, len, pgoff, flags);
+ else
+ rv = orig_addr;
unuse_pde(pde);
}
return rv;
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b8e93a4..78c3c20 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -443,8 +443,11 @@ int pstore_register(struct pstore_info *psi)
pstore_get_records(0);
kmsg_dump_register(&pstore_dumper);
- pstore_register_console();
- pstore_register_ftrace();
+
+ if ((psi->flags & PSTORE_FLAGS_FRAGILE) == 0) {
+ pstore_register_console();
+ pstore_register_ftrace();
+ }
if (pstore_update_ms >= 0) {
pstore_timer.expires = jiffies +
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index b94f936..35e7d08 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -609,7 +609,7 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
struct sysfs_open_file *of;
- bool has_read, has_write, has_mmap;
+ bool has_read, has_write;
int error = -EACCES;
/* need attr_sd for attr and ops, its parent for kobj */
@@ -621,7 +621,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
has_read = battr->read || battr->mmap;
has_write = battr->write || battr->mmap;
- has_mmap = battr->mmap;
} else {
const struct sysfs_ops *ops = sysfs_file_ops(attr_sd);
@@ -633,7 +632,6 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
has_read = ops->show;
has_write = ops->store;
- has_mmap = false;
}
/* check perms and supported operations */
@@ -661,9 +659,9 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
* open file has a separate mutex, it's okay as long as those don't
* happen on the same file. At this point, we can't easily give
* each file a separate locking class. Let's differentiate on
- * whether the file has mmap or not for now.
+ * whether the file is bin or not for now.
*/
- if (has_mmap)
+ if (sysfs_is_bin(attr_sd))
mutex_init(&of->mutex);
else
mutex_init(&of->mutex);
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 3ef11b2..3b2c14b 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -1635,7 +1635,7 @@ xfs_bmap_last_extent(
* blocks at the end of the file which do not start at the previous data block,
* we will try to align the new blocks at stripe unit boundaries.
*
- * Returns 0 in bma->aeof if the file (fork) is empty as any new write will be
+ * Returns 1 in bma->aeof if the file (fork) is empty as any new write will be
* at, or past the EOF.
*/
STATIC int
@@ -1650,9 +1650,14 @@ xfs_bmap_isaeof(
bma->aeof = 0;
error = xfs_bmap_last_extent(NULL, bma->ip, whichfork, &rec,
&is_empty);
- if (error || is_empty)
+ if (error)
return error;
+ if (is_empty) {
+ bma->aeof = 1;
+ return 0;
+ }
+
/*
* Check if we are allocation or past the last extent, or at least into
* the last delayed allocated extent.
@@ -3643,10 +3648,19 @@ xfs_bmap_btalloc(
int isaligned;
int tryagain;
int error;
+ int stripe_align;
ASSERT(ap->length);
mp = ap->ip->i_mount;
+
+ /* stripe alignment for allocation is determined by mount parameters */
+ stripe_align = 0;
+ if (mp->m_swidth && (mp->m_flags & XFS_MOUNT_SWALLOC))
+ stripe_align = mp->m_swidth;
+ else if (mp->m_dalign)
+ stripe_align = mp->m_dalign;
+
align = ap->userdata ? xfs_get_extsz_hint(ap->ip) : 0;
if (unlikely(align)) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
@@ -3655,6 +3669,8 @@ xfs_bmap_btalloc(
ASSERT(!error);
ASSERT(ap->length);
}
+
+
nullfb = *ap->firstblock == NULLFSBLOCK;
fb_agno = nullfb ? NULLAGNUMBER : XFS_FSB_TO_AGNO(mp, *ap->firstblock);
if (nullfb) {
@@ -3730,7 +3746,7 @@ xfs_bmap_btalloc(
*/
if (!ap->flist->xbf_low && ap->aeof) {
if (!ap->offset) {
- args.alignment = mp->m_dalign;
+ args.alignment = stripe_align;
atype = args.type;
isaligned = 1;
/*
@@ -3755,13 +3771,13 @@ xfs_bmap_btalloc(
* of minlen+alignment+slop doesn't go up
* between the calls.
*/
- if (blen > mp->m_dalign && blen <= args.maxlen)
- nextminlen = blen - mp->m_dalign;
+ if (blen > stripe_align && blen <= args.maxlen)
+ nextminlen = blen - stripe_align;
else
nextminlen = args.minlen;
- if (nextminlen + mp->m_dalign > args.minlen + 1)
+ if (nextminlen + stripe_align > args.minlen + 1)
args.minalignslop =
- nextminlen + mp->m_dalign -
+ nextminlen + stripe_align -
args.minlen - 1;
else
args.minalignslop = 0;
@@ -3783,7 +3799,7 @@ xfs_bmap_btalloc(
*/
args.type = atype;
args.fsbno = ap->blkno;
- args.alignment = mp->m_dalign;
+ args.alignment = stripe_align;
args.minlen = nextminlen;
args.minalignslop = 0;
isaligned = 1;
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 5887e41..1394106 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1187,7 +1187,12 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNWRITE(bp);
XFS_BUF_READ(bp);
XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
- xfsbdstrat(mp, bp);
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ break;
+ }
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp,
@@ -1200,7 +1205,12 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNDONE(bp);
XFS_BUF_UNREAD(bp);
XFS_BUF_WRITE(bp);
- xfsbdstrat(mp, bp);
+
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = XFS_ERROR(EIO);
+ break;
+ }
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp,
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c7f0b77..afe7645 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -698,7 +698,11 @@ xfs_buf_read_uncached(
bp->b_flags |= XBF_READ;
bp->b_ops = ops;
- xfsbdstrat(target->bt_mount, bp);
+ if (XFS_FORCED_SHUTDOWN(target->bt_mount)) {
+ xfs_buf_relse(bp);
+ return NULL;
+ }
+ xfs_buf_iorequest(bp);
xfs_buf_iowait(bp);
return bp;
}
@@ -1089,7 +1093,7 @@ xfs_bioerror(
* This is meant for userdata errors; metadata bufs come with
* iodone functions attached, so that we can track down errors.
*/
-STATIC int
+int
xfs_bioerror_relse(
struct xfs_buf *bp)
{
@@ -1152,7 +1156,7 @@ xfs_bwrite(
ASSERT(xfs_buf_islocked(bp));
bp->b_flags |= XBF_WRITE;
- bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q);
+ bp->b_flags &= ~(XBF_ASYNC | XBF_READ | _XBF_DELWRI_Q | XBF_WRITE_FAIL);
xfs_bdstrat_cb(bp);
@@ -1164,25 +1168,6 @@ xfs_bwrite(
return error;
}
-/*
- * Wrapper around bdstrat so that we can stop data from going to disk in case
- * we are shutting down the filesystem. Typically user data goes thru this
- * path; one of the exceptions is the superblock.
- */
-void
-xfsbdstrat(
- struct xfs_mount *mp,
- struct xfs_buf *bp)
-{
- if (XFS_FORCED_SHUTDOWN(mp)) {
- trace_xfs_bdstrat_shut(bp, _RET_IP_);
- xfs_bioerror_relse(bp);
- return;
- }
-
- xfs_buf_iorequest(bp);
-}
-
STATIC void
_xfs_buf_ioend(
xfs_buf_t *bp,
@@ -1516,6 +1501,12 @@ xfs_wait_buftarg(
struct xfs_buf *bp;
bp = list_first_entry(&dispose, struct xfs_buf, b_lru);
list_del_init(&bp->b_lru);
+ if (bp->b_flags & XBF_WRITE_FAIL) {
+ xfs_alert(btp->bt_mount,
+"Corruption Alert: Buffer at block 0x%llx had permanent write failures!\n"
+"Please run xfs_repair to determine the extent of the problem.",
+ (long long)bp->b_bn);
+ }
xfs_buf_rele(bp);
}
if (loop++ != 0)
@@ -1799,7 +1790,7 @@ __xfs_buf_delwri_submit(
blk_start_plug(&plug);
list_for_each_entry_safe(bp, n, io_list, b_list) {
- bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC);
+ bp->b_flags &= ~(_XBF_DELWRI_Q | XBF_ASYNC | XBF_WRITE_FAIL);
bp->b_flags |= XBF_WRITE;
if (!wait) {
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index e656833..1cf21a4 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -45,6 +45,7 @@ typedef enum {
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
+#define XBF_WRITE_FAIL (1 << 24)/* async writes have failed on this buffer */
/* I/O hints for the BIO layer */
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
@@ -70,6 +71,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \
+ { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
{ XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \
@@ -80,6 +82,7 @@ typedef unsigned int xfs_buf_flags_t;
{ _XBF_DELWRI_Q, "DELWRI_Q" }, \
{ _XBF_COMPOUND, "COMPOUND" }
+
/*
* Internal state flags.
*/
@@ -269,9 +272,6 @@ extern void xfs_buf_unlock(xfs_buf_t *);
/* Buffer Read and Write Routines */
extern int xfs_bwrite(struct xfs_buf *bp);
-
-extern void xfsbdstrat(struct xfs_mount *, struct xfs_buf *);
-
extern void xfs_buf_ioend(xfs_buf_t *, int);
extern void xfs_buf_ioerror(xfs_buf_t *, int);
extern void xfs_buf_ioerror_alert(struct xfs_buf *, const char *func);
@@ -282,6 +282,8 @@ extern void xfs_buf_iomove(xfs_buf_t *, size_t, size_t, void *,
#define xfs_buf_zero(bp, off, len) \
xfs_buf_iomove((bp), (off), (len), NULL, XBRW_ZERO)
+extern int xfs_bioerror_relse(struct xfs_buf *);
+
static inline int xfs_buf_geterror(xfs_buf_t *bp)
{
return bp ? bp->b_error : ENOMEM;
@@ -301,7 +303,8 @@ extern void xfs_buf_terminate(void);
#define XFS_BUF_ZEROFLAGS(bp) \
((bp)->b_flags &= ~(XBF_READ|XBF_WRITE|XBF_ASYNC| \
- XBF_SYNCIO|XBF_FUA|XBF_FLUSH))
+ XBF_SYNCIO|XBF_FUA|XBF_FLUSH| \
+ XBF_WRITE_FAIL))
void xfs_buf_stale(struct xfs_buf *bp);
#define XFS_BUF_UNSTALE(bp) ((bp)->b_flags &= ~XBF_STALE)
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index a64f67b..2227b9b 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -496,6 +496,14 @@ xfs_buf_item_unpin(
}
}
+/*
+ * Buffer IO error rate limiting. Limit it to no more than 10 messages per 30
+ * seconds so as to not spam logs too much on repeated detection of the same
+ * buffer being bad..
+ */
+
+DEFINE_RATELIMIT_STATE(xfs_buf_write_fail_rl_state, 30 * HZ, 10);
+
STATIC uint
xfs_buf_item_push(
struct xfs_log_item *lip,
@@ -524,6 +532,14 @@ xfs_buf_item_push(
trace_xfs_buf_item_push(bip);
+ /* has a previous flush failed due to IO errors? */
+ if ((bp->b_flags & XBF_WRITE_FAIL) &&
+ ___ratelimit(&xfs_buf_write_fail_rl_state, "XFS:")) {
+ xfs_warn(bp->b_target->bt_mount,
+"Detected failing async write on buffer block 0x%llx. Retrying async write.\n",
+ (long long)bp->b_bn);
+ }
+
if (!xfs_buf_delwri_queue(bp, buffer_list))
rval = XFS_ITEM_FLUSHING;
xfs_buf_unlock(bp);
@@ -1096,8 +1112,9 @@ xfs_buf_iodone_callbacks(
xfs_buf_ioerror(bp, 0); /* errno of 0 unsets the flag */
- if (!XFS_BUF_ISSTALE(bp)) {
- bp->b_flags |= XBF_WRITE | XBF_ASYNC | XBF_DONE;
+ if (!(bp->b_flags & (XBF_STALE|XBF_WRITE_FAIL))) {
+ bp->b_flags |= XBF_WRITE | XBF_ASYNC |
+ XBF_DONE | XBF_WRITE_FAIL;
xfs_buf_iorequest(bp);
} else {
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 56369d4..48c7d18 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -2067,12 +2067,12 @@ xfs_dir2_node_lookup(
*/
int /* error */
xfs_dir2_node_removename(
- xfs_da_args_t *args) /* operation arguments */
+ struct xfs_da_args *args) /* operation arguments */
{
- xfs_da_state_blk_t *blk; /* leaf block */
+ struct xfs_da_state_blk *blk; /* leaf block */
int error; /* error return value */
int rval; /* operation return value */
- xfs_da_state_t *state; /* btree cursor */
+ struct xfs_da_state *state; /* btree cursor */
trace_xfs_dir2_node_removename(args);
@@ -2084,19 +2084,18 @@ xfs_dir2_node_removename(
state->mp = args->dp->i_mount;
state->blocksize = state->mp->m_dirblksize;
state->node_ents = state->mp->m_dir_node_ents;
- /*
- * Look up the entry we're deleting, set up the cursor.
- */
+
+ /* Look up the entry we're deleting, set up the cursor. */
error = xfs_da3_node_lookup_int(state, &rval);
if (error)
- rval = error;
- /*
- * Didn't find it, upper layer screwed up.
- */
+ goto out_free;
+
+ /* Didn't find it, upper layer screwed up. */
if (rval != EEXIST) {
- xfs_da_state_free(state);
- return rval;
+ error = rval;
+ goto out_free;
}
+
blk = &state->path.blk[state->path.active - 1];
ASSERT(blk->magic == XFS_DIR2_LEAFN_MAGIC);
ASSERT(state->extravalid);
@@ -2107,7 +2106,7 @@ xfs_dir2_node_removename(
error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
&state->extrablk, &rval);
if (error)
- return error;
+ goto out_free;
/*
* Fix the hash values up the btree.
*/
@@ -2122,6 +2121,7 @@ xfs_dir2_node_removename(
*/
if (!error)
error = xfs_dir2_node_to_leaf(state);
+out_free:
xfs_da_state_free(state);
return error;
}
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 8367d6d..4f11ef0 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -157,7 +157,7 @@ xfs_ioc_trim(
struct xfs_mount *mp,
struct fstrim_range __user *urange)
{
- struct request_queue *q = mp->m_ddev_targp->bt_bdev->bd_disk->queue;
+ struct request_queue *q = bdev_get_queue(mp->m_ddev_targp->bt_bdev);
unsigned int granularity = q->limits.discard_granularity;
struct fstrim_range range;
xfs_daddr_t start, end, minlen;
@@ -180,7 +180,8 @@ xfs_ioc_trim(
* matter as trimming blocks is an advisory interface.
*/
if (range.start >= XFS_FSB_TO_B(mp, mp->m_sb.sb_dblocks) ||
- range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)))
+ range.minlen > XFS_FSB_TO_B(mp, XFS_ALLOC_AG_MAX_USABLE(mp)) ||
+ range.len < mp->m_sb.sb_blocksize)
return -XFS_ERROR(EINVAL);
start = BTOBB(range.start);
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index a6e54b3..02fb943 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -220,6 +220,8 @@ xfs_growfs_data_private(
*/
nfree = 0;
for (agno = nagcount - 1; agno >= oagcount; agno--, new -= agsize) {
+ __be32 *agfl_bno;
+
/*
* AG freespace header block
*/
@@ -279,8 +281,10 @@ xfs_growfs_data_private(
agfl->agfl_seqno = cpu_to_be32(agno);
uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_uuid);
}
+
+ agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
for (bucket = 0; bucket < XFS_AGFL_SIZE(mp); bucket++)
- agfl->agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
+ agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
error = xfs_bwrite(bp);
xfs_buf_relse(bp);
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 4d61340..33ad9a7 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -442,7 +442,8 @@ xfs_attrlist_by_handle(
return -XFS_ERROR(EPERM);
if (copy_from_user(&al_hreq, arg, sizeof(xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT);
- if (al_hreq.buflen > XATTR_LIST_MAX)
+ if (al_hreq.buflen < sizeof(struct attrlist) ||
+ al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL);
/*
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index e8fb123..a7992f8 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -356,7 +356,8 @@ xfs_compat_attrlist_by_handle(
if (copy_from_user(&al_hreq, arg,
sizeof(compat_xfs_fsop_attrlist_handlereq_t)))
return -XFS_ERROR(EFAULT);
- if (al_hreq.buflen > XATTR_LIST_MAX)
+ if (al_hreq.buflen < sizeof(struct attrlist) ||
+ al_hreq.buflen > XATTR_LIST_MAX)
return -XFS_ERROR(EINVAL);
/*
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 27e0e54..104455b 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -618,7 +618,8 @@ xfs_setattr_nonsize(
}
if (!gid_eq(igid, gid)) {
if (XFS_IS_QUOTA_RUNNING(mp) && XFS_IS_GQUOTA_ON(mp)) {
- ASSERT(!XFS_IS_PQUOTA_ON(mp));
+ ASSERT(xfs_sb_version_has_pquotino(&mp->m_sb) ||
+ !XFS_IS_PQUOTA_ON(mp));
ASSERT(mask & ATTR_GID);
ASSERT(gdqp);
olddquot2 = xfs_qm_vop_chown(tp, ip,
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index b6b669d..eae1692 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -193,7 +193,10 @@ xlog_bread_noalign(
bp->b_io_length = nbblks;
bp->b_error = 0;
- xfsbdstrat(log->l_mp, bp);
+ if (XFS_FORCED_SHUTDOWN(log->l_mp))
+ return XFS_ERROR(EIO);
+
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error)
xfs_buf_ioerror_alert(bp, __func__);
@@ -4397,7 +4400,13 @@ xlog_do_recover(
XFS_BUF_READ(bp);
XFS_BUF_UNASYNC(bp);
bp->b_ops = &xfs_sb_buf_ops;
- xfsbdstrat(log->l_mp, bp);
+
+ if (XFS_FORCED_SHUTDOWN(log->l_mp)) {
+ xfs_buf_relse(bp);
+ return XFS_ERROR(EIO);
+ }
+
+ xfs_buf_iorequest(bp);
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 14a4996..dd88f0e 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -134,8 +134,6 @@ xfs_qm_dqpurge(
{
struct xfs_mount *mp = dqp->q_mount;
struct xfs_quotainfo *qi = mp->m_quotainfo;
- struct xfs_dquot *gdqp = NULL;
- struct xfs_dquot *pdqp = NULL;
xfs_dqlock(dqp);
if ((dqp->dq_flags & XFS_DQ_FREEING) || dqp->q_nrefs != 0) {
@@ -143,21 +141,6 @@ xfs_qm_dqpurge(
return EAGAIN;
}
- /*
- * If this quota has a hint attached, prepare for releasing it now.
- */
- gdqp = dqp->q_gdquot;
- if (gdqp) {
- xfs_dqlock(gdqp);
- dqp->q_gdquot = NULL;
- }
-
- pdqp = dqp->q_pdquot;
- if (pdqp) {
- xfs_dqlock(pdqp);
- dqp->q_pdquot = NULL;
- }
-
dqp->dq_flags |= XFS_DQ_FREEING;
xfs_dqflock(dqp);
@@ -206,11 +189,47 @@ xfs_qm_dqpurge(
XFS_STATS_DEC(xs_qm_dquot_unused);
xfs_qm_dqdestroy(dqp);
+ return 0;
+}
+
+/*
+ * Release the group or project dquot pointers the user dquots maybe carrying
+ * around as a hint, and proceed to purge the user dquot cache if requested.
+*/
+STATIC int
+xfs_qm_dqpurge_hints(
+ struct xfs_dquot *dqp,
+ void *data)
+{
+ struct xfs_dquot *gdqp = NULL;
+ struct xfs_dquot *pdqp = NULL;
+ uint flags = *((uint *)data);
+
+ xfs_dqlock(dqp);
+ if (dqp->dq_flags & XFS_DQ_FREEING) {
+ xfs_dqunlock(dqp);
+ return EAGAIN;
+ }
+
+ /* If this quota has a hint attached, prepare for releasing it now */
+ gdqp = dqp->q_gdquot;
+ if (gdqp)
+ dqp->q_gdquot = NULL;
+
+ pdqp = dqp->q_pdquot;
+ if (pdqp)
+ dqp->q_pdquot = NULL;
+
+ xfs_dqunlock(dqp);
if (gdqp)
- xfs_qm_dqput(gdqp);
+ xfs_qm_dqrele(gdqp);
if (pdqp)
- xfs_qm_dqput(pdqp);
+ xfs_qm_dqrele(pdqp);
+
+ if (flags & XFS_QMOPT_UQUOTA)
+ return xfs_qm_dqpurge(dqp, NULL);
+
return 0;
}
@@ -222,8 +241,18 @@ xfs_qm_dqpurge_all(
struct xfs_mount *mp,
uint flags)
{
- if (flags & XFS_QMOPT_UQUOTA)
- xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge, NULL);
+ /*
+ * We have to release group/project dquot hint(s) from the user dquot
+ * at first if they are there, otherwise we would run into an infinite
+ * loop while walking through radix tree to purge other type of dquots
+ * since their refcount is not zero if the user dquot refers to them
+ * as hint.
+ *
+ * Call the special xfs_qm_dqpurge_hints() will end up go through the
+ * general xfs_qm_dqpurge() against user dquot cache if requested.
+ */
+ xfs_qm_dquot_walk(mp, XFS_DQ_USER, xfs_qm_dqpurge_hints, &flags);
+
if (flags & XFS_QMOPT_GQUOTA)
xfs_qm_dquot_walk(mp, XFS_DQ_GROUP, xfs_qm_dqpurge, NULL);
if (flags & XFS_QMOPT_PQUOTA)
@@ -2082,24 +2111,21 @@ xfs_qm_vop_create_dqattach(
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
ASSERT(XFS_IS_QUOTA_RUNNING(mp));
- if (udqp) {
+ if (udqp && XFS_IS_UQUOTA_ON(mp)) {
ASSERT(ip->i_udquot == NULL);
- ASSERT(XFS_IS_UQUOTA_ON(mp));
ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));
ip->i_udquot = xfs_qm_dqhold(udqp);
xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
}
- if (gdqp) {
+ if (gdqp && XFS_IS_GQUOTA_ON(mp)) {
ASSERT(ip->i_gdquot == NULL);
- ASSERT(XFS_IS_GQUOTA_ON(mp));
ASSERT(ip->i_d.di_gid == be32_to_cpu(gdqp->q_core.d_id));
ip->i_gdquot = xfs_qm_dqhold(gdqp);
xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
}
- if (pdqp) {
+ if (pdqp && XFS_IS_PQUOTA_ON(mp)) {
ASSERT(ip->i_pdquot == NULL);
- ASSERT(XFS_IS_PQUOTA_ON(mp));
ASSERT(xfs_get_projid(ip) == be32_to_cpu(pdqp->q_core.d_id));
ip->i_pdquot = xfs_qm_dqhold(pdqp);
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index c035d11..647b6f1 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -314,7 +314,18 @@ xfs_trans_read_buf_map(
ASSERT(bp->b_iodone == NULL);
XFS_BUF_READ(bp);
bp->b_ops = ops;
- xfsbdstrat(tp->t_mountp, bp);
+
+ /*
+ * XXX(hch): clean up the error handling here to be less
+ * of a mess..
+ */
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ trace_xfs_bdstrat_shut(bp, _RET_IP_);
+ xfs_bioerror_relse(bp);
+ } else {
+ xfs_buf_iorequest(bp);
+ }
+
error = xfs_buf_iowait(bp);
if (error) {
xfs_buf_ioerror_alert(bp, __func__);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index c602c77..ddabed1 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -169,7 +169,8 @@ struct acpi_device_flags {
u32 ejectable:1;
u32 power_manageable:1;
u32 match_driver:1;
- u32 reserved:27;
+ u32 no_hotplug:1;
+ u32 reserved:26;
};
/* File System */
@@ -344,6 +345,7 @@ extern struct kobject *acpi_kobj;
extern int acpi_bus_generate_netlink_event(const char*, const char*, u8, int);
void acpi_bus_private_data_handler(acpi_handle, void *);
int acpi_bus_get_private_data(acpi_handle, void **);
+void acpi_bus_no_hotplug(acpi_handle handle);
extern int acpi_notifier_call_chain(struct acpi_device *, u32, u32);
extern int register_acpi_notifier(struct notifier_block *);
extern int unregister_acpi_notifier(struct notifier_block *);
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f330d28..db09234 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -217,7 +217,7 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif
#ifndef pte_accessible
-# define pte_accessible(pte) ((void)(pte),1)
+# define pte_accessible(mm, pte) ((void)(pte), 1)
#endif
#ifndef flush_tlb_fix_spurious_fault
@@ -599,11 +599,10 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
barrier();
#endif
- if (pmd_none(pmdval))
+ if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
return 1;
if (unlikely(pmd_bad(pmdval))) {
- if (!pmd_trans_huge(pmdval))
- pmd_clear_bad(pmd);
+ pmd_clear_bad(pmd);
return 1;
}
return 0;
diff --git a/include/asm-generic/preempt.h b/include/asm-generic/preempt.h
index ddf2b42..1cd3f5d 100644
--- a/include/asm-generic/preempt.h
+++ b/include/asm-generic/preempt.h
@@ -3,13 +3,11 @@
#include <linux/thread_info.h>
-/*
- * We mask the PREEMPT_NEED_RESCHED bit so as not to confuse all current users
- * that think a non-zero value indicates we cannot preempt.
- */
+#define PREEMPT_ENABLED (0)
+
static __always_inline int preempt_count(void)
{
- return current_thread_info()->preempt_count & ~PREEMPT_NEED_RESCHED;
+ return current_thread_info()->preempt_count;
}
static __always_inline int *preempt_count_ptr(void)
@@ -17,11 +15,6 @@ static __always_inline int *preempt_count_ptr(void)
return &current_thread_info()->preempt_count;
}
-/*
- * We now loose PREEMPT_NEED_RESCHED and cause an extra reschedule; however the
- * alternative is loosing a reschedule. Better schedule too often -- also this
- * should be a very rare operation.
- */
static __always_inline void preempt_count_set(int pc)
{
*preempt_count_ptr() = pc;
@@ -41,28 +34,17 @@ static __always_inline void preempt_count_set(int pc)
task_thread_info(p)->preempt_count = PREEMPT_ENABLED; \
} while (0)
-/*
- * We fold the NEED_RESCHED bit into the preempt count such that
- * preempt_enable() can decrement and test for needing to reschedule with a
- * single instruction.
- *
- * We invert the actual bit, so that when the decrement hits 0 we know we both
- * need to resched (the bit is cleared) and can resched (no preempt count).
- */
-
static __always_inline void set_preempt_need_resched(void)
{
- *preempt_count_ptr() &= ~PREEMPT_NEED_RESCHED;
}
static __always_inline void clear_preempt_need_resched(void)
{
- *preempt_count_ptr() |= PREEMPT_NEED_RESCHED;
}
static __always_inline bool test_preempt_need_resched(void)
{
- return !(*preempt_count_ptr() & PREEMPT_NEED_RESCHED);
+ return false;
}
/*
@@ -81,7 +63,12 @@ static __always_inline void __preempt_count_sub(int val)
static __always_inline bool __preempt_count_dec_and_test(void)
{
- return !--*preempt_count_ptr();
+ /*
+ * Because of load-store architectures cannot do per-cpu atomic
+ * operations; we cannot use PREEMPT_NEED_RESCHED because it might get
+ * lost.
+ */
+ return !--*preempt_count_ptr() && tif_need_resched();
}
/*
@@ -89,7 +76,7 @@ static __always_inline bool __preempt_count_dec_and_test(void)
*/
static __always_inline bool should_resched(void)
{
- return unlikely(!*preempt_count_ptr());
+ return unlikely(!preempt_count() && tif_need_resched());
}
#ifdef CONFIG_PREEMPT
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
index 3f21f1b..d3909ef 100644
--- a/include/asm-generic/word-at-a-time.h
+++ b/include/asm-generic/word-at-a-time.h
@@ -49,4 +49,12 @@ static inline bool has_zero(unsigned long val, unsigned long *data, const struct
return (val + c->high_bits) & ~rhs;
}
+#ifndef zero_bytemask
+#ifdef CONFIG_64BIT
+#define zero_bytemask(mask) (~0ul << fls64(mask))
+#else
+#define zero_bytemask(mask) (~0ul << fls(mask))
+#endif /* CONFIG_64BIT */
+#endif /* zero_bytemask */
+
#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 87578c1..49376ae 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -600,7 +600,7 @@
{0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
- {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
+ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
{0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
diff --git a/include/linux/assoc_array.h b/include/linux/assoc_array.h
index 9a193b8..a89df3b 100644
--- a/include/linux/assoc_array.h
+++ b/include/linux/assoc_array.h
@@ -41,10 +41,10 @@ struct assoc_array_ops {
/* Is this the object we're looking for? */
bool (*compare_object)(const void *object, const void *index_key);
- /* How different are two objects, to a bit position in their keys? (or
- * -1 if they're the same)
+ /* How different is an object from an index key, to a bit position in
+ * their keys? (or -1 if they're the same)
*/
- int (*diff_objects)(const void *a, const void *b);
+ int (*diff_objects)(const void *object, const void *index_key);
/* Method to free an object. */
void (*free_object)(void *object);
diff --git a/include/linux/auxvec.h b/include/linux/auxvec.h
index 669fef5..3e0fbe4 100644
--- a/include/linux/auxvec.h
+++ b/include/linux/auxvec.h
@@ -3,6 +3,6 @@
#include <uapi/linux/auxvec.h>
-#define AT_VECTOR_SIZE_BASE 19 /* NEW_AUX_ENT entries in auxiliary table */
+#define AT_VECTOR_SIZE_BASE 20 /* NEW_AUX_ENT entries in auxiliary table */
/* number of "#define AT_.*" above, minus {AT_NULL, AT_IGNORE, AT_NOTELF} */
#endif /* _LINUX_AUXVEC_H */
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 973ce10..dc1bd3d 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -28,8 +28,6 @@
#endif
-#define uninitialized_var(x) x
-
#ifndef __HAVE_BUILTIN_BSWAP16__
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 57e87e7..bf72e9a 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -29,8 +29,10 @@ struct vfsmount;
/* The hash is always the low bits of hash_len */
#ifdef __LITTLE_ENDIAN
#define HASH_LEN_DECLARE u32 hash; u32 len;
+ #define bytemask_from_count(cnt) (~(~0ul << (cnt)*8))
#else
#define HASH_LEN_DECLARE u32 len; u32 hash;
+ #define bytemask_from_count(cnt) (~(~0ul >> (cnt)*8))
#endif
/*
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 9649ff0..bd7e987 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -142,7 +142,10 @@ static inline int dequeue_hwpoisoned_huge_page(struct page *page)
return 0;
}
-#define isolate_huge_page(p, l) false
+static inline bool isolate_huge_page(struct page *page, struct list_head *list)
+{
+ return false;
+}
#define putback_active_hugepage(p) do {} while (0)
#define is_hugepage_active(x) false
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 5d89d1b..c56c350 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -4,6 +4,7 @@
#include <uapi/linux/ipv6.h>
#define ipv6_optlen(p) (((p)->hdrlen+1) << 3)
+#define ipv6_authlen(p) (((p)->hdrlen+2) << 2)
/*
* This structure contains configuration options per IPv6 link.
*/
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d4e98d1..ecb8754 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -193,7 +193,8 @@ extern int _cond_resched(void);
(__x < 0) ? -__x : __x; \
})
-#if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
+#if defined(CONFIG_MMU) && \
+ (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
void might_fault(void);
#else
static inline void might_fault(void) { }
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d78d28a..5fd33dc 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -198,6 +198,9 @@ extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size;
+/* flag to track if kexec reboot is in progress */
+extern bool kexec_in_progress;
+
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
unsigned long long *crash_size, unsigned long long *crash_base);
int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 0e23c26..9b50337 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -418,6 +418,7 @@ enum {
ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */
ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */
ATA_HORKAGE_ATAPI_DMADIR = (1 << 18), /* device requires dmadir */
+ ATA_HORKAGE_NO_NCQ_TRIM = (1 << 19), /* don't use queued TRIM */
/* DMA mask for user DMA control: User visible values; DO NOT
renumber */
diff --git a/include/linux/lockref.h b/include/linux/lockref.h
index c8929c3..4bfde0e 100644
--- a/include/linux/lockref.h
+++ b/include/linux/lockref.h
@@ -19,7 +19,7 @@
#define USE_CMPXCHG_LOCKREF \
(IS_ENABLED(CONFIG_ARCH_USE_CMPXCHG_LOCKREF) && \
- IS_ENABLED(CONFIG_SMP) && !BLOATED_SPINLOCKS)
+ IS_ENABLED(CONFIG_SMP) && SPINLOCK_SIZE <= 4)
struct lockref {
union {
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 69ed5f5..c45c089 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -133,4 +133,34 @@ __iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
return ret;
}
+#if defined(CONFIG_ARCH_SUPPORTS_INT128) && defined(__SIZEOF_INT128__)
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u32_shr */
+
+#else
+
+#ifndef mul_u64_u32_shr
+static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned int shift)
+{
+ u32 ah, al;
+ u64 ret;
+
+ al = a;
+ ah = a >> 32;
+
+ ret = ((u64)al * mul) >> shift;
+ if (ah)
+ ret += ((u64)ah * mul) << (32 - shift);
+
+ return ret;
+}
+#endif /* mul_u64_u32_shr */
+
+#endif
+
#endif /* _LINUX_MATH64_H */
diff --git a/include/linux/mfd/samsung/core.h b/include/linux/mfd/samsung/core.h
index 2d0c907..cab2dd2 100644
--- a/include/linux/mfd/samsung/core.h
+++ b/include/linux/mfd/samsung/core.h
@@ -39,7 +39,8 @@ enum sec_device_type {
struct sec_pmic_dev {
struct device *dev;
struct sec_platform_data *pdata;
- struct regmap *regmap;
+ struct regmap *regmap_pmic;
+ struct regmap *regmap_rtc;
struct i2c_client *i2c;
struct i2c_client *rtc;
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index ad05ce6..2e5b194 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -22,6 +22,8 @@
#define PHY_ID_KSZ8021 0x00221555
#define PHY_ID_KSZ8031 0x00221556
#define PHY_ID_KSZ8041 0x00221510
+/* undocumented */
+#define PHY_ID_KSZ8041RNLI 0x00221537
#define PHY_ID_KSZ8051 0x00221550
/* same id: ks8001 Rev. A/B, and ks8721 Rev 3. */
#define PHY_ID_KSZ8001 0x0022161A
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index f5096b5..f015c05 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -55,7 +55,8 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page);
extern int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode);
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count);
#else
static inline void putback_lru_pages(struct list_head *l) {}
@@ -90,10 +91,19 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
#ifdef CONFIG_NUMA_BALANCING
+extern bool pmd_trans_migrating(pmd_t pmd);
+extern void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd);
extern int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node);
extern bool migrate_ratelimited(int node);
#else
+static inline bool pmd_trans_migrating(pmd_t pmd)
+{
+ return false;
+}
+static inline void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+}
static inline int migrate_misplaced_page(struct page *page,
struct vm_area_struct *vma, int node)
{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 1cedd00..3552717 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1317,7 +1317,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
#if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
extern bool ptlock_alloc(struct page *page);
extern void ptlock_free(struct page *page);
@@ -1325,7 +1325,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{
return page->ptl;
}
-#else /* BLOATED_SPINLOCKS */
+#else /* ALLOC_SPLIT_PTLOCKS */
static inline bool ptlock_alloc(struct page *page)
{
return true;
@@ -1339,7 +1339,7 @@ static inline spinlock_t *ptlock_ptr(struct page *page)
{
return &page->ptl;
}
-#endif /* BLOATED_SPINLOCKS */
+#endif /* ALLOC_SPLIT_PTLOCKS */
static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
{
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index bd29941..290901a 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -26,6 +26,7 @@ struct address_space;
#define USE_SPLIT_PTE_PTLOCKS (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
#define USE_SPLIT_PMD_PTLOCKS (USE_SPLIT_PTE_PTLOCKS && \
IS_ENABLED(CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK))
+#define ALLOC_SPLIT_PTLOCKS (SPINLOCK_SIZE > BITS_PER_LONG/8)
/*
* Each physical page in the system has a struct page associated with
@@ -155,7 +156,7 @@ struct page {
* system if PG_buddy is set.
*/
#if USE_SPLIT_PTE_PTLOCKS
-#if BLOATED_SPINLOCKS
+#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
spinlock_t ptl;
@@ -443,6 +444,14 @@ struct mm_struct {
/* numa_scan_seq prevents two threads setting pte_numa */
int numa_scan_seq;
#endif
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+ /*
+ * An operation with batched TLB flushing is going on. Anything that
+ * can move process memory needs to flush the TLB when moving a
+ * PROT_NONE or PROT_NUMA mapped page.
+ */
+ bool tlb_flush_pending;
+#endif
struct uprobes_state uprobes_state;
};
@@ -459,4 +468,45 @@ static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
return mm->cpu_vm_mask_var;
}
+#if defined(CONFIG_NUMA_BALANCING) || defined(CONFIG_COMPACTION)
+/*
+ * Memory barriers to keep this state in sync are graciously provided by
+ * the page table locks, outside of which no page table modifications happen.
+ * The barriers below prevent the compiler from re-ordering the instructions
+ * around the memory barriers that are already present in the code.
+ */
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ return mm->tlb_flush_pending;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+ mm->tlb_flush_pending = true;
+
+ /*
+ * Guarantee that the tlb_flush_pending store does not leak into the
+ * critical section updating the page tables
+ */
+ smp_mb__before_spinlock();
+}
+/* Clearing is done after a TLB flush, which also provides a barrier. */
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+ barrier();
+ mm->tlb_flush_pending = false;
+}
+#else
+static inline bool mm_tlb_flush_pending(struct mm_struct *mm)
+{
+ return false;
+}
+static inline void set_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+static inline void clear_tlb_flush_pending(struct mm_struct *mm)
+{
+}
+#endif
+
#endif /* _LINUX_MM_TYPES_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 4bcee94..69be3e6 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -181,7 +181,7 @@ struct proto_ops {
int offset, size_t size, int flags);
ssize_t (*splice_read)(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len, unsigned int flags);
- void (*set_peek_off)(struct sock *sk, int val);
+ int (*set_peek_off)(struct sock *sk, int val);
};
#define DECLARE_SOCKADDR(type, dst, src) \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 7f0ed42..5faaadb 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -1255,7 +1255,7 @@ struct net_device {
unsigned char perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
unsigned char addr_assign_type; /* hw address assignment type */
unsigned char addr_len; /* hardware address length */
- unsigned char neigh_priv_len;
+ unsigned short neigh_priv_len;
unsigned short dev_id; /* Used to differentiate devices
* that share the same link
* layer address
@@ -1912,6 +1912,15 @@ static inline int dev_parse_header(const struct sk_buff *skb,
return dev->header_ops->parse(skb, haddr);
}
+static inline int dev_rebuild_header(struct sk_buff *skb)
+{
+ const struct net_device *dev = skb->dev;
+
+ if (!dev->header_ops || !dev->header_ops->rebuild)
+ return 0;
+ return dev->header_ops->rebuild(skb);
+}
+
typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len);
int register_gifconf(unsigned int family, gifconf_func_t *gifconf);
static inline int unregister_gifconf(unsigned int family)
@@ -3008,6 +3017,19 @@ static inline void netif_set_gso_max_size(struct net_device *dev,
dev->gso_max_size = size;
}
+static inline void skb_gso_error_unwind(struct sk_buff *skb, __be16 protocol,
+ int pulled_hlen, u16 mac_offset,
+ int mac_len)
+{
+ skb->protocol = protocol;
+ skb->encapsulation = 1;
+ skb_push(skb, pulled_hlen);
+ skb_reset_transport_header(skb);
+ skb->mac_header = mac_offset;
+ skb->network_header = skb->mac_header + mac_len;
+ skb->mac_len = mac_len;
+}
+
static inline bool netif_is_macvlan(struct net_device *dev)
{
return dev->priv_flags & IFF_MACVLAN;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 1084a15..a13d682 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -960,6 +960,7 @@ void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
+bool pci_device_is_present(struct pci_dev *pdev);
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
@@ -1567,65 +1568,65 @@ enum pci_fixup_pass {
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
class_shift, hook) \
- static const struct pci_fixup __pci_fixup_##name __used \
+ static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
__attribute__((__section__(#section), aligned((sizeof(void *))))) \
= { vendor, device, class, class_shift, hook };
#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_HEADER(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_FINAL(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_ENABLE(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, class, class_shift, hook)
+ hook, vendor, device, class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##vendor##device##hook, vendor, device, class, \
+ resume##hook, vendor, device, class, \
class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_RESUME_EARLY(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##vendor##device##hook, vendor, device, \
+ resume_early##hook, vendor, device, \
class, class_shift, hook)
#define DECLARE_PCI_FIXUP_CLASS_SUSPEND(vendor, device, class, \
class_shift, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##vendor##device##hook, vendor, device, class, \
+ suspend##hook, vendor, device, class, \
class_shift, hook)
#define DECLARE_PCI_FIXUP_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_early, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_HEADER(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_header, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_FINAL(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_final, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_ENABLE(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_enable, \
- vendor##device##hook, vendor, device, PCI_ANY_ID, 0, hook)
+ hook, vendor, device, PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume, \
- resume##vendor##device##hook, vendor, device, \
+ resume##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_RESUME_EARLY(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_resume_early, \
- resume_early##vendor##device##hook, vendor, device, \
+ resume_early##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#define DECLARE_PCI_FIXUP_SUSPEND(vendor, device, hook) \
DECLARE_PCI_FIXUP_SECTION(.pci_fixup_suspend, \
- suspend##vendor##device##hook, vendor, device, \
+ suspend##hook, vendor, device, \
PCI_ANY_ID, 0, hook)
#ifdef CONFIG_PCI_QUIRKS
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index 57e890a..a5fc7d0 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -69,6 +69,7 @@
__PCPU_DUMMY_ATTRS char __pcpu_scope_##name; \
extern __PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
__PCPU_DUMMY_ATTRS char __pcpu_unique_##name; \
+ extern __PCPU_ATTRS(sec) __typeof__(type) name; \
__PCPU_ATTRS(sec) PER_CPU_DEF_ATTRIBUTES __weak \
__typeof__(type) name
#else
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index abd437d..ece0c6b 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -51,6 +51,7 @@ struct pstore_info {
char *buf;
size_t bufsize;
struct mutex read_mutex; /* serialize open/read/close */
+ int flags;
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
ssize_t (*read)(u64 *id, enum pstore_type_id *type,
@@ -70,6 +71,8 @@ struct pstore_info {
void *data;
};
+#define PSTORE_FLAGS_FRAGILE 1
+
#ifdef CONFIG_PSTORE
extern int pstore_register(struct pstore_info *);
extern bool pstore_cannot_block_path(enum kmsg_dump_reason reason);
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 8e00f9f..9e7db9e 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -43,6 +43,7 @@ extern int unregister_reboot_notifier(struct notifier_block *);
* Architecture-specific implementations of sys_reboot commands.
*/
+extern void migrate_to_reboot_cpu(void);
extern void machine_restart(char *cmd);
extern void machine_halt(void);
extern void machine_power_off(void);
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 939428a..8e3e66a 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -24,6 +24,11 @@ extern int rtnl_trylock(void);
extern int rtnl_is_locked(void);
#ifdef CONFIG_PROVE_LOCKING
extern int lockdep_rtnl_is_held(void);
+#else
+static inline int lockdep_rtnl_is_held(void)
+{
+ return 1;
+}
#endif /* #ifdef CONFIG_PROVE_LOCKING */
/**
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 768b037..53f97eb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -440,8 +440,6 @@ struct task_cputime {
.sum_exec_runtime = 0, \
}
-#define PREEMPT_ENABLED (PREEMPT_NEED_RESCHED)
-
#ifdef CONFIG_PREEMPT_COUNT
#define PREEMPT_DISABLED (1 + PREEMPT_ENABLED)
#else
@@ -932,7 +930,8 @@ struct pipe_inode_info;
struct uts_namespace;
struct load_weight {
- unsigned long weight, inv_weight;
+ unsigned long weight;
+ u32 inv_weight;
};
struct sched_avg {
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 30aa0dc..9d55438 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -47,6 +47,8 @@ extern int shmem_init(void);
extern int shmem_fill_super(struct super_block *sb, void *data, int silent);
extern struct file *shmem_file_setup(const char *name,
loff_t size, unsigned long flags);
+extern struct file *shmem_kernel_file_setup(const char *name, loff_t size,
+ unsigned long flags);
extern int shmem_zero_setup(struct vm_area_struct *);
extern int shmem_lock(struct file *file, int lock, struct user_struct *user);
extern void shmem_unlock_mapping(struct address_space *mapping);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index bec1cc7..6f69b3f 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1638,6 +1638,11 @@ static inline void skb_set_mac_header(struct sk_buff *skb, const int offset)
skb->mac_header += offset;
}
+static inline void skb_pop_mac_header(struct sk_buff *skb)
+{
+ skb->mac_header = skb->network_header;
+}
+
static inline void skb_probe_transport_header(struct sk_buff *skb,
const int offset_hint)
{
@@ -2263,6 +2268,24 @@ static inline void skb_postpull_rcsum(struct sk_buff *skb,
unsigned char *skb_pull_rcsum(struct sk_buff *skb, unsigned int len);
+/**
+ * pskb_trim_rcsum - trim received skb and update checksum
+ * @skb: buffer to trim
+ * @len: new length
+ *
+ * This is exactly the same as pskb_trim except that it ensures the
+ * checksum of received packets are still valid after the operation.
+ */
+
+static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
+{
+ if (likely(len >= skb->len))
+ return 0;
+ if (skb->ip_summed == CHECKSUM_COMPLETE)
+ skb->ip_summed = CHECKSUM_NONE;
+ return __pskb_trim(skb, len);
+}
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -2360,27 +2383,6 @@ __wsum __skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum skb_checksum(const struct sk_buff *skb, int offset, int len,
__wsum csum);
-/**
- * pskb_trim_rcsum - trim received skb and update checksum
- * @skb: buffer to trim
- * @len: new length
- *
- * This is exactly the same as pskb_trim except that it ensures the
- * checksum of received packets are still valid after the operation.
- */
-
-static inline int pskb_trim_rcsum(struct sk_buff *skb, unsigned int len)
-{
- if (likely(len >= skb->len))
- return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE) {
- __wsum adj = skb_checksum(skb, len, skb->len - len, 0);
-
- skb->csum = csum_sub(skb->csum, adj);
- }
- return __pskb_trim(skb, len);
-}
-
static inline void *skb_header_pointer(const struct sk_buff *skb, int offset,
int len, void *buffer)
{
@@ -2529,6 +2531,10 @@ static inline void sw_tx_timestamp(struct sk_buff *skb)
* Ethernet MAC Drivers should call this function in their hard_xmit()
* function immediately before giving the sk_buff to the MAC hardware.
*
+ * Specifically, one should make absolutely sure that this function is
+ * called before TX completion of this packet can trigger. Otherwise
+ * the packet could potentially already be freed.
+ *
* @skb: A socket buffer.
*/
static inline void skb_tx_timestamp(struct sk_buff *skb)
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index bd8218b..941055e 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -83,7 +83,7 @@ struct vb2_fileio_data;
struct vb2_mem_ops {
void *(*alloc)(void *alloc_ctx, unsigned long size, gfp_t gfp_flags);
void (*put)(void *buf_priv);
- struct dma_buf *(*get_dmabuf)(void *buf_priv);
+ struct dma_buf *(*get_dmabuf)(void *buf_priv, unsigned long flags);
void *(*get_userptr)(void *alloc_ctx, unsigned long vaddr,
unsigned long size, int write);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index eb198ac..488316e 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -110,7 +110,8 @@ struct frag_hdr {
__be32 identification;
};
-#define IP6_MF 0x0001
+#define IP6_MF 0x0001
+#define IP6_OFFSET 0xFFF8
#include <net/sock.h>
diff --git a/include/net/llc_pdu.h b/include/net/llc_pdu.h
index 31e2de7..c0f0a13 100644
--- a/include/net/llc_pdu.h
+++ b/include/net/llc_pdu.h
@@ -142,7 +142,7 @@
#define LLC_S_PF_IS_1(pdu) ((pdu->ctrl_2 & LLC_S_PF_BIT_MASK) ? 1 : 0)
#define PDU_SUPV_GET_Nr(pdu) ((pdu->ctrl_2 & 0xFE) >> 1)
-#define PDU_GET_NEXT_Vr(sn) (++sn & ~LLC_2_SEQ_NBR_MODULO)
+#define PDU_GET_NEXT_Vr(sn) (((sn) + 1) & ~LLC_2_SEQ_NBR_MODULO)
/* FRMR information field macros */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index ea0ca5f..0a248b3 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1046,9 +1046,6 @@ struct sctp_outq {
/* Corked? */
char cork;
-
- /* Is this structure empty? */
- char empty;
};
void sctp_outq_init(struct sctp_association *, struct sctp_outq *);
@@ -1726,12 +1723,6 @@ struct sctp_association {
/* How many duplicated TSNs have we seen? */
int numduptsns;
- /* Number of seconds of idle time before an association is closed.
- * In the association context, this is really used as a boolean
- * since the real timeout is stored in the timeouts array
- */
- __u32 autoclose;
-
/* These are to support
* "SCTP Extensions for Dynamic Reconfiguration of IP Addresses
* and Enforcement of Flow and Message Limits"
diff --git a/include/net/sock.h b/include/net/sock.h
index e3a18ff..2ef3c3e 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1035,7 +1035,6 @@ enum cg_proto_flags {
};
struct cg_proto {
- void (*enter_memory_pressure)(struct sock *sk);
struct res_counter memory_allocated; /* Current allocated memory. */
struct percpu_counter sockets_allocated; /* Current number of sockets. */
int memory_pressure;
@@ -1155,8 +1154,7 @@ static inline void sk_leave_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- if (cg_proto->memory_pressure)
- cg_proto->memory_pressure = 0;
+ cg_proto->memory_pressure = 0;
}
}
@@ -1171,7 +1169,7 @@ static inline void sk_enter_memory_pressure(struct sock *sk)
struct proto *prot = sk->sk_prot;
for (; cg_proto; cg_proto = parent_cg_proto(prot, cg_proto))
- cg_proto->enter_memory_pressure(sk);
+ cg_proto->memory_pressure = 1;
}
sk->sk_prot->enter_memory_pressure(sk);
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 979874c..61e1935 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -978,7 +978,7 @@ struct ib_uobject {
};
struct ib_udata {
- void __user *inbuf;
+ const void __user *inbuf;
void __user *outbuf;
size_t inlen;
size_t outlen;
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index af99839..5f73785 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -108,7 +108,7 @@ static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
{
struct snd_sg_buf *sgbuf = dmab->private_data;
dma_addr_t addr = sgbuf->table[offset >> PAGE_SHIFT].addr;
- addr &= PAGE_MASK;
+ addr &= ~((dma_addr_t)PAGE_SIZE - 1);
return addr + offset % PAGE_SIZE;
}
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 45412a6..321301c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -517,10 +517,6 @@ struct se_node_acl {
u32 acl_index;
#define MAX_ACL_TAG_SIZE 64
char acl_tag[MAX_ACL_TAG_SIZE];
- u64 num_cmds;
- u64 read_bytes;
- u64 write_bytes;
- spinlock_t stats_lock;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
atomic_t acl_pr_ref_count;
struct se_dev_entry **device_list;
@@ -624,6 +620,7 @@ struct se_dev_attrib {
u32 unmap_granularity;
u32 unmap_granularity_alignment;
u32 max_write_same_len;
+ u32 max_bytes_per_io;
struct se_device *da_dev;
struct config_group da_group;
};
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 2f3f7ea..fe421e8 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -983,6 +983,8 @@ struct drm_radeon_cs {
#define RADEON_INFO_SI_CP_DMA_COMPUTE 0x17
/* CIK macrotile mode array */
#define RADEON_INFO_CIK_MACROTILE_MODE_ARRAY 0x18
+/* query the number of render backends */
+#define RADEON_INFO_SI_BACKEND_ENABLED_MASK 0x19
struct drm_radeon_info {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index bcb0912..f854ca4 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -75,6 +75,7 @@
#define DRM_VMW_PARAM_FIFO_CAPS 4
#define DRM_VMW_PARAM_MAX_FB_SIZE 5
#define DRM_VMW_PARAM_FIFO_HW_VERSION 6
+#define DRM_VMW_PARAM_MAX_SURF_MEMORY 7
/**
* struct drm_vmw_getparam_arg
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index ecc8859..bd24470 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -464,7 +464,8 @@ struct input_keymap_entry {
#define KEY_BRIGHTNESS_ZERO 244 /* brightness off, use ambient */
#define KEY_DISPLAY_OFF 245 /* display device to off state */
-#define KEY_WIMAX 246
+#define KEY_WWAN 246 /* Wireless WAN (LTE, UMTS, GSM, etc.) */
+#define KEY_WIMAX KEY_WWAN
#define KEY_RFKILL 247 /* Key that controls all radios */
#define KEY_MICMUTE 248 /* Mute / unmute the microphone */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e1802d6..959d454 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -679,6 +679,7 @@ enum perf_event_type {
*
* { u64 weight; } && PERF_SAMPLE_WEIGHT
* { u64 data_src; } && PERF_SAMPLE_DATA_SRC
+ * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index d630163..5759810 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -30,7 +30,7 @@
#include <sound/compress_params.h>
-#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 1)
+#define SNDRV_COMPRESS_VERSION SNDRV_PROTOCOL_VERSION(0, 1, 2)
/**
* struct snd_compressed_buffer: compressed buffer
* @fragment_size: size of buffer fragment in bytes
@@ -67,8 +67,8 @@ struct snd_compr_params {
struct snd_compr_tstamp {
__u32 byte_offset;
__u32 copied_total;
- snd_pcm_uframes_t pcm_frames;
- snd_pcm_uframes_t pcm_io_frames;
+ __u32 pcm_frames;
+ __u32 pcm_io_frames;
__u32 sampling_rate;
};
diff --git a/include/xen/interface/io/blkif.h b/include/xen/interface/io/blkif.h
index 65e1209..ae665ac 100644
--- a/include/xen/interface/io/blkif.h
+++ b/include/xen/interface/io/blkif.h
@@ -146,7 +146,7 @@ struct blkif_request_segment_aligned {
struct blkif_request_rw {
uint8_t nr_segments; /* number of segments */
blkif_vdev_t handle; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_request,u.rw.id) == 8 */
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -163,7 +163,7 @@ struct blkif_request_discard {
uint8_t flag; /* BLKIF_DISCARD_SECURE or zero. */
#define BLKIF_DISCARD_SECURE (1<<0) /* ignored if discard-secure=0 */
blkif_vdev_t _pad1; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad2; /* offsetof(blkif_req..,u.discard.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -175,7 +175,7 @@ struct blkif_request_discard {
struct blkif_request_other {
uint8_t _pad1;
blkif_vdev_t _pad2; /* only for read/write requests */
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad3; /* offsetof(blkif_req..,u.other.id)==8*/
#endif
uint64_t id; /* private guest value, echoed in resp */
@@ -184,7 +184,7 @@ struct blkif_request_other {
struct blkif_request_indirect {
uint8_t indirect_op;
uint16_t nr_segments;
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad1; /* offsetof(blkif_...,u.indirect.id) == 8 */
#endif
uint64_t id;
@@ -192,7 +192,7 @@ struct blkif_request_indirect {
blkif_vdev_t handle;
uint16_t _pad2;
grant_ref_t indirect_grefs[BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST];
-#ifdef CONFIG_X86_64
+#ifndef CONFIG_X86_32
uint32_t _pad3; /* make it 64 byte aligned */
#else
uint64_t _pad3; /* make it 64 byte aligned */
diff --git a/init/Kconfig b/init/Kconfig
index 79383d3..4e5d96a 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -809,6 +809,12 @@ config GENERIC_SCHED_CLOCK
config ARCH_SUPPORTS_NUMA_BALANCING
bool
+#
+# For architectures that know their GCC __int128 support is sound
+#
+config ARCH_SUPPORTS_INT128
+ bool
+
# For architectures that (ab)use NUMA to represent different memory regions
# all cpu-local but of different latencies, such as SuperH.
#
diff --git a/kernel/.gitignore b/kernel/.gitignore
index b3097bd..790d83c 100644
--- a/kernel/.gitignore
+++ b/kernel/.gitignore
@@ -5,3 +5,4 @@ config_data.h
config_data.gz
timeconst.h
hz.bc
+x509_certificate_list
diff --git a/kernel/Makefile b/kernel/Makefile
index bbaf7d5..bc010ee 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -137,9 +137,10 @@ $(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
###############################################################################
ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYRING),y)
X509_CERTIFICATES-y := $(wildcard *.x509) $(wildcard $(srctree)/*.x509)
-X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += signing_key.x509
-X509_CERTIFICATES := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
+X509_CERTIFICATES-$(CONFIG_MODULE_SIG) += $(objtree)/signing_key.x509
+X509_CERTIFICATES-raw := $(sort $(foreach CERT,$(X509_CERTIFICATES-y), \
$(or $(realpath $(CERT)),$(CERT))))
+X509_CERTIFICATES := $(subst $(realpath $(objtree))/,,$(X509_CERTIFICATES-raw))
ifeq ($(X509_CERTIFICATES),)
$(warning *** No X.509 certificates found ***)
@@ -164,9 +165,9 @@ $(obj)/x509_certificate_list: $(X509_CERTIFICATES) $(obj)/.x509.list
targets += $(obj)/.x509.list
$(obj)/.x509.list:
@echo $(X509_CERTIFICATES) >$@
+endif
clean-files := x509_certificate_list .x509.list
-endif
ifeq ($(CONFIG_MODULE_SIG),y)
###############################################################################
diff --git a/kernel/bounds.c b/kernel/bounds.c
index 5253204..9fd4246 100644
--- a/kernel/bounds.c
+++ b/kernel/bounds.c
@@ -22,6 +22,6 @@ void foo(void)
#ifdef CONFIG_SMP
DEFINE(NR_CPUS_BITS, ilog2(CONFIG_NR_CPUS));
#endif
- DEFINE(BLOATED_SPINLOCKS, sizeof(spinlock_t) > sizeof(int));
+ DEFINE(SPINLOCK_SIZE, sizeof(spinlock_t));
/* End of constants */
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 8b729c2..bc1dcab 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -890,6 +890,16 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
struct cgroup *cgrp = dentry->d_fsdata;
BUG_ON(!(cgroup_is_dead(cgrp)));
+
+ /*
+ * XXX: cgrp->id is only used to look up css's. As cgroup
+ * and css's lifetimes will be decoupled, it should be made
+ * per-subsystem and moved to css->id so that lookups are
+ * successful until the target css is released.
+ */
+ idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
+ cgrp->id = -1;
+
call_rcu(&cgrp->rcu_head, cgroup_free_rcu);
} else {
struct cfent *cfe = __d_cfe(dentry);
@@ -4268,6 +4278,7 @@ static void css_release(struct percpu_ref *ref)
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
+ rcu_assign_pointer(css->cgroup->subsys[css->ss->subsys_id], NULL);
call_rcu(&css->rcu_head, css_free_rcu_fn);
}
@@ -4426,14 +4437,6 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
list_add_tail_rcu(&cgrp->sibling, &cgrp->parent->children);
root->number_of_cgroups++;
- /* each css holds a ref to the cgroup's dentry and the parent css */
- for_each_root_subsys(root, ss) {
- struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
-
- dget(dentry);
- css_get(css->parent);
- }
-
/* hold a ref to the parent's dentry */
dget(parent->dentry);
@@ -4445,6 +4448,13 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
if (err)
goto err_destroy;
+ /* each css holds a ref to the cgroup's dentry and parent css */
+ dget(dentry);
+ css_get(css->parent);
+
+ /* mark it consumed for error path */
+ css_ar[ss->subsys_id] = NULL;
+
if (ss->broken_hierarchy && !ss->warned_broken_hierarchy &&
parent->parent) {
pr_warning("cgroup: %s (%d) created nested cgroup for controller \"%s\" which has incomplete hierarchy support. Nested cgroups may change behavior in the future.\n",
@@ -4491,6 +4501,14 @@ err_free_cgrp:
return err;
err_destroy:
+ for_each_root_subsys(root, ss) {
+ struct cgroup_subsys_state *css = css_ar[ss->subsys_id];
+
+ if (css) {
+ percpu_ref_cancel_init(&css->refcnt);
+ ss->css_free(css);
+ }
+ }
cgroup_destroy_locked(cgrp);
mutex_unlock(&cgroup_mutex);
mutex_unlock(&dentry->d_inode->i_mutex);
@@ -4652,8 +4670,12 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
* will be invoked to perform the rest of destruction once the
* percpu refs of all css's are confirmed to be killed.
*/
- for_each_root_subsys(cgrp->root, ss)
- kill_css(cgroup_css(cgrp, ss));
+ for_each_root_subsys(cgrp->root, ss) {
+ struct cgroup_subsys_state *css = cgroup_css(cgrp, ss);
+
+ if (css)
+ kill_css(css);
+ }
/*
* Mark @cgrp dead. This prevents further task migration and child
@@ -4722,14 +4744,6 @@ static void cgroup_destroy_css_killed(struct cgroup *cgrp)
/* delete this cgroup from parent->children */
list_del_rcu(&cgrp->sibling);
- /*
- * We should remove the cgroup object from idr before its grace
- * period starts, so we won't be looking up a cgroup while the
- * cgroup is being freed.
- */
- idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
- cgrp->id = -1;
-
dput(d);
set_bit(CGRP_RELEASABLE, &parent->flags);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 72348dc..f574401 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1396,6 +1396,8 @@ event_sched_out(struct perf_event *event,
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
+ perf_pmu_disable(event->pmu);
+
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
@@ -1412,6 +1414,8 @@ event_sched_out(struct perf_event *event,
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
+
+ perf_pmu_enable(event->pmu);
}
static void
@@ -1652,6 +1656,7 @@ event_sched_in(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
+ int ret = 0;
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
@@ -1674,10 +1679,13 @@ event_sched_in(struct perf_event *event,
*/
smp_wmb();
+ perf_pmu_disable(event->pmu);
+
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto out;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
@@ -1693,7 +1701,10 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive)
cpuctx->exclusive = 1;
- return 0;
+out:
+ perf_pmu_enable(event->pmu);
+
+ return ret;
}
static int
@@ -2743,6 +2754,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
if (!event_filter_match(event))
continue;
+ perf_pmu_disable(event->pmu);
+
hwc = &event->hw;
if (hwc->interrupts == MAX_INTERRUPTS) {
@@ -2752,7 +2765,7 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
}
if (!event->attr.freq || !event->attr.sample_freq)
- continue;
+ goto next;
/*
* stop the event and update event->count
@@ -2774,6 +2787,8 @@ static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
+ next:
+ perf_pmu_enable(event->pmu);
}
perf_pmu_enable(ctx->pmu);
diff --git a/kernel/fork.c b/kernel/fork.c
index 728d5be..5721f0e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -537,6 +537,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
spin_lock_init(&mm->page_table_lock);
mm_init_aio(mm);
mm_init_owner(mm, p);
+ clear_tlb_flush_pending(mm);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
diff --git a/kernel/freezer.c b/kernel/freezer.c
index b462fa1..aa6a8aa 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -19,6 +19,12 @@ EXPORT_SYMBOL(system_freezing_cnt);
bool pm_freezing;
bool pm_nosig_freezing;
+/*
+ * Temporary export for the deadlock workaround in ata_scsi_hotplug().
+ * Remove once the hack becomes unnecessary.
+ */
+EXPORT_SYMBOL_GPL(pm_freezing);
+
/* protects freezing and frozen transitions */
static DEFINE_SPINLOCK(freezer_lock);
diff --git a/kernel/futex.c b/kernel/futex.c
index 80ba086..f6ff019 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -251,6 +251,9 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
return -EINVAL;
address -= key->both.offset;
+ if (unlikely(!access_ok(rw, uaddr, sizeof(u32))))
+ return -EFAULT;
+
/*
* PROCESS_PRIVATE futexes are fast.
* As the mm cannot disappear under us and the 'key' only needs
@@ -259,8 +262,6 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
* but access_ok() should be faster than find_vma()
*/
if (!fshared) {
- if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))))
- return -EFAULT;
key->private.mm = mm;
key->private.address = address;
get_futex_key_refs(key);
@@ -288,7 +289,7 @@ again:
put_page(page);
/* serialize against __split_huge_page_splitting() */
local_irq_disable();
- if (likely(__get_user_pages_fast(address, 1, 1, &page) == 1)) {
+ if (likely(__get_user_pages_fast(address, 1, !ro, &page) == 1)) {
page_head = compound_head(page);
/*
* page_head is valid pointer but we must pin
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 490afc0..9c97016 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -47,6 +47,9 @@ u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
size_t vmcoreinfo_size;
size_t vmcoreinfo_max_size = sizeof(vmcoreinfo_data);
+/* Flag to indicate we are going to kexec a new kernel */
+bool kexec_in_progress = false;
+
/* Location of the reserved area for the crash kernel */
struct resource crashk_res = {
.name = "Crash kernel",
@@ -1675,7 +1678,9 @@ int kernel_kexec(void)
} else
#endif
{
+ kexec_in_progress = true;
kernel_restart_prepare(NULL);
+ migrate_to_reboot_cpu();
printk(KERN_EMERG "Starting new kernel\n");
machine_shutdown();
}
diff --git a/kernel/power/console.c b/kernel/power/console.c
index 463aa673..eacb8bd 100644
--- a/kernel/power/console.c
+++ b/kernel/power/console.c
@@ -81,6 +81,7 @@ void pm_vt_switch_unregister(struct device *dev)
list_for_each_entry(tmp, &pm_vt_switch_list, head) {
if (tmp->dev == dev) {
list_del(&tmp->head);
+ kfree(tmp);
break;
}
}
diff --git a/kernel/reboot.c b/kernel/reboot.c
index f813b34..662c83f 100644
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -104,7 +104,7 @@ int unregister_reboot_notifier(struct notifier_block *nb)
}
EXPORT_SYMBOL(unregister_reboot_notifier);
-static void migrate_to_reboot_cpu(void)
+void migrate_to_reboot_cpu(void)
{
/* The boot cpu is always logical cpu 0 */
int cpu = reboot_cpu;
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index e85cda2..a88f4a4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4902,6 +4902,7 @@ DEFINE_PER_CPU(struct sched_domain *, sd_asym);
static void update_top_cache_domain(int cpu)
{
struct sched_domain *sd;
+ struct sched_domain *busy_sd = NULL;
int id = cpu;
int size = 1;
@@ -4909,9 +4910,9 @@ static void update_top_cache_domain(int cpu)
if (sd) {
id = cpumask_first(sched_domain_span(sd));
size = cpumask_weight(sched_domain_span(sd));
- sd = sd->parent; /* sd_busy */
+ busy_sd = sd->parent; /* sd_busy */
}
- rcu_assign_pointer(per_cpu(sd_busy, cpu), sd);
+ rcu_assign_pointer(per_cpu(sd_busy, cpu), busy_sd);
rcu_assign_pointer(per_cpu(sd_llc, cpu), sd);
per_cpu(sd_llc_size, cpu) = size;
@@ -5112,6 +5113,7 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
* die on a /0 trap.
*/
sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
+ sg->sgp->power_orig = sg->sgp->power;
/*
* Make sure the first group of this domain contains the
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fd773ad..c7395d9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -178,59 +178,61 @@ void sched_init_granularity(void)
update_sysctl();
}
-#if BITS_PER_LONG == 32
-# define WMULT_CONST (~0UL)
-#else
-# define WMULT_CONST (1UL << 32)
-#endif
-
+#define WMULT_CONST (~0U)
#define WMULT_SHIFT 32
-/*
- * Shift right and round:
- */
-#define SRR(x, y) (((x) + (1UL << ((y) - 1))) >> (y))
+static void __update_inv_weight(struct load_weight *lw)
+{
+ unsigned long w;
+
+ if (likely(lw->inv_weight))
+ return;
+
+ w = scale_load_down(lw->weight);
+
+ if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
+ lw->inv_weight = 1;
+ else if (unlikely(!w))
+ lw->inv_weight = WMULT_CONST;
+ else
+ lw->inv_weight = WMULT_CONST / w;
+}
/*
- * delta *= weight / lw
+ * delta_exec * weight / lw.weight
+ * OR
+ * (delta_exec * (weight * lw->inv_weight)) >> WMULT_SHIFT
+ *
+ * Either weight := NICE_0_LOAD and lw \e prio_to_wmult[], in which case
+ * we're guaranteed shift stays positive because inv_weight is guaranteed to
+ * fit 32 bits, and NICE_0_LOAD gives another 10 bits; therefore shift >= 22.
+ *
+ * Or, weight =< lw.weight (because lw.weight is the runqueue weight), thus
+ * weight/lw.weight <= 1, and therefore our shift will also be positive.
*/
-static unsigned long
-calc_delta_mine(unsigned long delta_exec, unsigned long weight,
- struct load_weight *lw)
+static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw)
{
- u64 tmp;
-
- /*
- * weight can be less than 2^SCHED_LOAD_RESOLUTION for task group sched
- * entities since MIN_SHARES = 2. Treat weight as 1 if less than
- * 2^SCHED_LOAD_RESOLUTION.
- */
- if (likely(weight > (1UL << SCHED_LOAD_RESOLUTION)))
- tmp = (u64)delta_exec * scale_load_down(weight);
- else
- tmp = (u64)delta_exec;
+ u64 fact = scale_load_down(weight);
+ int shift = WMULT_SHIFT;
- if (!lw->inv_weight) {
- unsigned long w = scale_load_down(lw->weight);
+ __update_inv_weight(lw);
- if (BITS_PER_LONG > 32 && unlikely(w >= WMULT_CONST))
- lw->inv_weight = 1;
- else if (unlikely(!w))
- lw->inv_weight = WMULT_CONST;
- else
- lw->inv_weight = WMULT_CONST / w;
+ if (unlikely(fact >> 32)) {
+ while (fact >> 32) {
+ fact >>= 1;
+ shift--;
+ }
}
- /*
- * Check whether we'd overflow the 64-bit multiplication:
- */
- if (unlikely(tmp > WMULT_CONST))
- tmp = SRR(SRR(tmp, WMULT_SHIFT/2) * lw->inv_weight,
- WMULT_SHIFT/2);
- else
- tmp = SRR(tmp * lw->inv_weight, WMULT_SHIFT);
+ /* hint to use a 32x32->64 mul */
+ fact = (u64)(u32)fact * lw->inv_weight;
+
+ while (fact >> 32) {
+ fact >>= 1;
+ shift--;
+ }
- return (unsigned long)min(tmp, (u64)(unsigned long)LONG_MAX);
+ return mul_u64_u32_shr(delta_exec, fact, shift);
}
@@ -443,7 +445,7 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse)
#endif /* CONFIG_FAIR_GROUP_SCHED */
static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec);
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec);
/**************************************************************
* Scheduling class tree data structure manipulation methods:
@@ -612,11 +614,10 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
/*
* delta /= w
*/
-static inline unsigned long
-calc_delta_fair(unsigned long delta, struct sched_entity *se)
+static inline u64 calc_delta_fair(u64 delta, struct sched_entity *se)
{
if (unlikely(se->load.weight != NICE_0_LOAD))
- delta = calc_delta_mine(delta, NICE_0_LOAD, &se->load);
+ delta = __calc_delta(delta, NICE_0_LOAD, &se->load);
return delta;
}
@@ -665,7 +666,7 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
update_load_add(&lw, se->load.weight);
load = &lw;
}
- slice = calc_delta_mine(slice, se->load.weight, load);
+ slice = __calc_delta(slice, se->load.weight, load);
}
return slice;
}
@@ -703,47 +704,32 @@ void init_task_runnable_average(struct task_struct *p)
#endif
/*
- * Update the current task's runtime statistics. Skip current tasks that
- * are not in our scheduling class.
+ * Update the current task's runtime statistics.
*/
-static inline void
-__update_curr(struct cfs_rq *cfs_rq, struct sched_entity *curr,
- unsigned long delta_exec)
-{
- unsigned long delta_exec_weighted;
-
- schedstat_set(curr->statistics.exec_max,
- max((u64)delta_exec, curr->statistics.exec_max));
-
- curr->sum_exec_runtime += delta_exec;
- schedstat_add(cfs_rq, exec_clock, delta_exec);
- delta_exec_weighted = calc_delta_fair(delta_exec, curr);
-
- curr->vruntime += delta_exec_weighted;
- update_min_vruntime(cfs_rq);
-}
-
static void update_curr(struct cfs_rq *cfs_rq)
{
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
- unsigned long delta_exec;
+ u64 delta_exec;
if (unlikely(!curr))
return;
- /*
- * Get the amount of time the current task was running
- * since the last time we changed load (this cannot
- * overflow on 32 bits):
- */
- delta_exec = (unsigned long)(now - curr->exec_start);
- if (!delta_exec)
+ delta_exec = now - curr->exec_start;
+ if (unlikely((s64)delta_exec <= 0))
return;
- __update_curr(cfs_rq, curr, delta_exec);
curr->exec_start = now;
+ schedstat_set(curr->statistics.exec_max,
+ max(delta_exec, curr->statistics.exec_max));
+
+ curr->sum_exec_runtime += delta_exec;
+ schedstat_add(cfs_rq, exec_clock, delta_exec);
+
+ curr->vruntime += calc_delta_fair(delta_exec, curr);
+ update_min_vruntime(cfs_rq);
+
if (entity_is_task(curr)) {
struct task_struct *curtask = task_of(curr);
@@ -1752,6 +1738,13 @@ void task_numa_work(struct callback_head *work)
(vma->vm_file && (vma->vm_flags & (VM_READ|VM_WRITE)) == (VM_READ)))
continue;
+ /*
+ * Skip inaccessible VMAs to avoid any confusion between
+ * PROT_NONE and NUMA hinting ptes
+ */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
+ continue;
+
do {
start = max(start, vma->vm_start);
end = ALIGN(start + (pages << PAGE_SHIFT), HPAGE_SIZE);
@@ -3015,8 +3008,7 @@ static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq)
}
}
-static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec)
+static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
/* dock delta_exec before expiring quota (as it could span periods) */
cfs_rq->runtime_remaining -= delta_exec;
@@ -3034,7 +3026,7 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
}
static __always_inline
-void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec)
+void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec)
{
if (!cfs_bandwidth_used() || !cfs_rq->runtime_enabled)
return;
@@ -3574,8 +3566,7 @@ static inline u64 cfs_rq_clock_task(struct cfs_rq *cfs_rq)
return rq_clock_task(rq_of(cfs_rq));
}
-static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq,
- unsigned long delta_exec) {}
+static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, u64 delta_exec) {}
static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {}
static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {}
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 7d57275..1c40655 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -901,6 +901,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Change rq's cpupri only if rt_rq is the top queue.
+ */
+ if (&rq->rt != rt_rq)
+ return;
+#endif
if (rq->online && prio < prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, prio);
}
@@ -910,6 +917,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio)
{
struct rq *rq = rq_of_rt_rq(rt_rq);
+#ifdef CONFIG_RT_GROUP_SCHED
+ /*
+ * Change rq's cpupri only if rt_rq is the top queue.
+ */
+ if (&rq->rt != rt_rq)
+ return;
+#endif
if (rq->online && rt_rq->highest_prio.curr != prev_prio)
cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr);
}
diff --git a/kernel/system_certificates.S b/kernel/system_certificates.S
index 4aef390..3e9868d47 100644
--- a/kernel/system_certificates.S
+++ b/kernel/system_certificates.S
@@ -3,8 +3,18 @@
__INITRODATA
+ .align 8
.globl VMLINUX_SYMBOL(system_certificate_list)
VMLINUX_SYMBOL(system_certificate_list):
+__cert_list_start:
.incbin "kernel/x509_certificate_list"
- .globl VMLINUX_SYMBOL(system_certificate_list_end)
-VMLINUX_SYMBOL(system_certificate_list_end):
+__cert_list_end:
+
+ .align 8
+ .globl VMLINUX_SYMBOL(system_certificate_list_size)
+VMLINUX_SYMBOL(system_certificate_list_size):
+#ifdef CONFIG_64BIT
+ .quad __cert_list_end - __cert_list_start
+#else
+ .long __cert_list_end - __cert_list_start
+#endif
diff --git a/kernel/system_keyring.c b/kernel/system_keyring.c
index 564dd93..52ebc70 100644
--- a/kernel/system_keyring.c
+++ b/kernel/system_keyring.c
@@ -22,7 +22,7 @@ struct key *system_trusted_keyring;
EXPORT_SYMBOL_GPL(system_trusted_keyring);
extern __initconst const u8 system_certificate_list[];
-extern __initconst const u8 system_certificate_list_end[];
+extern __initconst const unsigned long system_certificate_list_size;
/*
* Load the compiled-in keys
@@ -60,8 +60,8 @@ static __init int load_system_certificate_list(void)
pr_notice("Loading compiled-in X.509 certificates\n");
- end = system_certificate_list_end;
p = system_certificate_list;
+ end = p + system_certificate_list_size;
while (p < end) {
/* Each cert begins with an ASN.1 SEQUENCE tag and must be more
* than 256 bytes in size.
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0e9f9ea..72a0f81 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -775,7 +775,7 @@ static int ftrace_profile_init(void)
int cpu;
int ret = 0;
- for_each_online_cpu(cpu) {
+ for_each_possible_cpu(cpu) {
ret = ftrace_profile_init_cpu(cpu);
if (ret)
break;
diff --git a/kernel/user.c b/kernel/user.c
index a3a0dbf..c006131 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -51,9 +51,9 @@ struct user_namespace init_user_ns = {
.owner = GLOBAL_ROOT_UID,
.group = GLOBAL_ROOT_GID,
.proc_inum = PROC_USER_INIT_INO,
-#ifdef CONFIG_KEYS_KERBEROS_CACHE
- .krb_cache_register_sem =
- __RWSEM_INITIALIZER(init_user_ns.krb_cache_register_sem),
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ .persistent_keyring_register_sem =
+ __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem),
#endif
};
EXPORT_SYMBOL_GPL(init_user_ns);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index c66912be..b010eac 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -2851,19 +2851,6 @@ already_gone:
return false;
}
-static bool __flush_work(struct work_struct *work)
-{
- struct wq_barrier barr;
-
- if (start_flush_work(work, &barr)) {
- wait_for_completion(&barr.done);
- destroy_work_on_stack(&barr.work);
- return true;
- } else {
- return false;
- }
-}
-
/**
* flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush
@@ -2877,10 +2864,18 @@ static bool __flush_work(struct work_struct *work)
*/
bool flush_work(struct work_struct *work)
{
+ struct wq_barrier barr;
+
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
- return __flush_work(work);
+ if (start_flush_work(work, &barr)) {
+ wait_for_completion(&barr.done);
+ destroy_work_on_stack(&barr.work);
+ return true;
+ } else {
+ return false;
+ }
}
EXPORT_SYMBOL_GPL(flush_work);
@@ -4832,14 +4827,7 @@ long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
INIT_WORK_ONSTACK(&wfc.work, work_for_cpu_fn);
schedule_work_on(cpu, &wfc.work);
-
- /*
- * The work item is on-stack and can't lead to deadlock through
- * flushing. Use __flush_work() to avoid spurious lockdep warnings
- * when work_on_cpu()s are nested.
- */
- __flush_work(&wfc.work);
-
+ flush_work(&wfc.work);
return wfc.ret;
}
EXPORT_SYMBOL_GPL(work_on_cpu);
diff --git a/lib/assoc_array.c b/lib/assoc_array.c
index 17edeaf..1b6a44f 100644
--- a/lib/assoc_array.c
+++ b/lib/assoc_array.c
@@ -759,8 +759,8 @@ all_leaves_cluster_together:
pr_devel("all leaves cluster together\n");
diff = INT_MAX;
for (i = 0; i < ASSOC_ARRAY_FAN_OUT; i++) {
- int x = ops->diff_objects(assoc_array_ptr_to_leaf(edit->leaf),
- assoc_array_ptr_to_leaf(node->slots[i]));
+ int x = ops->diff_objects(assoc_array_ptr_to_leaf(node->slots[i]),
+ index_key);
if (x < diff) {
BUG_ON(x < 0);
diff = x;
diff --git a/mm/Kconfig b/mm/Kconfig
index eb69f35..723bbe0 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -543,7 +543,7 @@ config ZSWAP
config MEM_SOFT_DIRTY
bool "Track memory changes"
- depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY
+ depends on CHECKPOINT_RESTORE && HAVE_ARCH_SOFT_DIRTY && PROC_FS
select PROC_PAGE_MONITOR
help
This option enables memory changes tracking by introducing a
diff --git a/mm/compaction.c b/mm/compaction.c
index 805165b..f58bcd0 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -134,6 +134,10 @@ static void update_pageblock_skip(struct compact_control *cc,
bool migrate_scanner)
{
struct zone *zone = cc->zone;
+
+ if (cc->ignore_skip_hint)
+ return;
+
if (!page)
return;
diff --git a/mm/fremap.c b/mm/fremap.c
index 5bff081..bbc4d66 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -208,9 +208,10 @@ get_write_lock:
if (mapping_cap_account_dirty(mapping)) {
unsigned long addr;
struct file *file = get_file(vma->vm_file);
+ /* mmap_region may free vma; grab the info now */
+ vm_flags = vma->vm_flags;
- addr = mmap_region(file, start, size,
- vma->vm_flags, pgoff);
+ addr = mmap_region(file, start, size, vm_flags, pgoff);
fput(file);
if (IS_ERR_VALUE(addr)) {
err = addr;
@@ -218,7 +219,7 @@ get_write_lock:
BUG_ON(addr != start);
err = 0;
}
- goto out;
+ goto out_freed;
}
mutex_lock(&mapping->i_mmap_mutex);
flush_dcache_mmap_lock(mapping);
@@ -253,6 +254,7 @@ get_write_lock:
out:
if (vma)
vm_flags = vma->vm_flags;
+out_freed:
if (likely(!has_write_lock))
up_read(&mm->mmap_sem);
else
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index bccd5a6..9c0b172 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -882,6 +882,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
ret = 0;
goto out_unlock;
}
+
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(src_ptl);
@@ -1243,6 +1244,10 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
if ((flags & FOLL_DUMP) && is_huge_zero_pmd(*pmd))
return ERR_PTR(-EFAULT);
+ /* Full NUMA hinting faults to serialise migration in fault paths */
+ if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
+ goto out;
+
page = pmd_page(*pmd);
VM_BUG_ON(!PageHead(page));
if (flags & FOLL_TOUCH) {
@@ -1295,6 +1300,17 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (unlikely(!pmd_same(pmd, *pmdp)))
goto out_unlock;
+ /*
+ * If there are potential migrations, wait for completion and retry
+ * without disrupting NUMA hinting information. Do not relock and
+ * check_same as the page may no longer be mapped.
+ */
+ if (unlikely(pmd_trans_migrating(*pmdp))) {
+ spin_unlock(ptl);
+ wait_migrate_huge_page(vma->anon_vma, pmdp);
+ goto out;
+ }
+
page = pmd_page(pmd);
BUG_ON(is_huge_zero_page(page));
page_nid = page_to_nid(page);
@@ -1323,23 +1339,22 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
/* If the page was locked, there are no parallel migrations */
if (page_locked)
goto clear_pmdnuma;
+ }
- /*
- * Otherwise wait for potential migrations and retry. We do
- * relock and check_same as the page may no longer be mapped.
- * As the fault is being retried, do not account for it.
- */
+ /* Migration could have started since the pmd_trans_migrating check */
+ if (!page_locked) {
spin_unlock(ptl);
wait_on_page_locked(page);
page_nid = -1;
goto out;
}
- /* Page is misplaced, serialise migrations and parallel THP splits */
+ /*
+ * Page is misplaced. Page lock serialises migrations. Acquire anon_vma
+ * to serialises splits
+ */
get_page(page);
spin_unlock(ptl);
- if (!page_locked)
- lock_page(page);
anon_vma = page_lock_anon_vma_read(page);
/* Confirm the PMD did not change while page_table_lock was released */
@@ -1351,6 +1366,13 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out_unlock;
}
+ /* Bail if we fail to protect against THP splits for any reason */
+ if (unlikely(!anon_vma)) {
+ put_page(page);
+ page_nid = -1;
+ goto clear_pmdnuma;
+ }
+
/*
* Migrate the THP to the requested node, returns with page unlocked
* and pmd_numa cleared.
@@ -1481,8 +1503,18 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
pmd = pmdp_get_and_clear(mm, old_addr, old_pmd);
VM_BUG_ON(!pmd_none(*new_pmd));
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
- if (new_ptl != old_ptl)
+ if (new_ptl != old_ptl) {
+ pgtable_t pgtable;
+
+ /*
+ * Move preallocated PTE page table if new_pmd is on
+ * different PMD page table.
+ */
+ pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
+ pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
+
spin_unlock(new_ptl);
+ }
spin_unlock(old_ptl);
}
out:
@@ -1507,6 +1539,8 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
ret = 1;
if (!prot_numa) {
entry = pmdp_get_and_clear(mm, addr, pmd);
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
entry = pmd_modify(entry, newprot);
ret = HPAGE_PMD_NR;
BUG_ON(pmd_write(entry));
@@ -1521,7 +1555,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
*/
if (!is_huge_zero_page(page) &&
!pmd_numa(*pmd)) {
- entry = pmdp_get_and_clear(mm, addr, pmd);
+ entry = *pmd;
entry = pmd_mknuma(entry);
ret = HPAGE_PMD_NR;
}
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f1a0ae6..7f1a356 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -338,7 +338,7 @@ struct mem_cgroup {
static size_t memcg_size(void)
{
return sizeof(struct mem_cgroup) +
- nr_node_ids * sizeof(struct mem_cgroup_per_node);
+ nr_node_ids * sizeof(struct mem_cgroup_per_node *);
}
/* internal only representation about the status of kmem accounting. */
@@ -2694,7 +2694,10 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
goto bypass;
if (unlikely(task_in_memcg_oom(current)))
- goto bypass;
+ goto nomem;
+
+ if (gfp_mask & __GFP_NOFAIL)
+ oom = false;
/*
* We always charge the cgroup the mm_struct belongs to.
@@ -6352,6 +6355,42 @@ static void mem_cgroup_css_offline(struct cgroup_subsys_state *css)
static void mem_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct mem_cgroup *memcg = mem_cgroup_from_css(css);
+ /*
+ * XXX: css_offline() would be where we should reparent all
+ * memory to prepare the cgroup for destruction. However,
+ * memcg does not do css_tryget() and res_counter charging
+ * under the same RCU lock region, which means that charging
+ * could race with offlining. Offlining only happens to
+ * cgroups with no tasks in them but charges can show up
+ * without any tasks from the swapin path when the target
+ * memcg is looked up from the swapout record and not from the
+ * current task as it usually is. A race like this can leak
+ * charges and put pages with stale cgroup pointers into
+ * circulation:
+ *
+ * #0 #1
+ * lookup_swap_cgroup_id()
+ * rcu_read_lock()
+ * mem_cgroup_lookup()
+ * css_tryget()
+ * rcu_read_unlock()
+ * disable css_tryget()
+ * call_rcu()
+ * offline_css()
+ * reparent_charges()
+ * res_counter_charge()
+ * css_put()
+ * css_free()
+ * pc->mem_cgroup = dead memcg
+ * add page to lru
+ *
+ * The bulk of the charges are still moved in offline_css() to
+ * avoid pinning a lot of pages in case a long-term reference
+ * like a swapout record is deferring the css_free() to long
+ * after offlining. But this makes sure we catch any charges
+ * made after offlining:
+ */
+ mem_cgroup_reparent_charges(memcg);
memcg_destroy_kmem(memcg);
__mem_cgroup_free(memcg);
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index b7c1716..fabe550 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -938,6 +938,16 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
BUG_ON(!PageHWPoison(p));
return SWAP_FAIL;
}
+ /*
+ * We pinned the head page for hwpoison handling,
+ * now we split the thp and we are interested in
+ * the hwpoisoned raw page, so move the refcount
+ * to it.
+ */
+ if (hpage != p) {
+ put_page(hpage);
+ get_page(p);
+ }
/* THP is split, so ppage should be the real poisoned page. */
ppage = p;
}
@@ -1505,10 +1515,16 @@ static int soft_offline_huge_page(struct page *page, int flags)
if (ret > 0)
ret = -EIO;
} else {
- set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
- &num_poisoned_pages);
+ /* overcommit hugetlb page will be freed to buddy */
+ if (PageHuge(page)) {
+ set_page_hwpoison_huge_page(hpage);
+ dequeue_hwpoisoned_huge_page(hpage);
+ atomic_long_add(1 << compound_order(hpage),
+ &num_poisoned_pages);
+ } else {
+ SetPageHWPoison(page);
+ atomic_long_inc(&num_poisoned_pages);
+ }
}
return ret;
}
diff --git a/mm/memory.c b/mm/memory.c
index 5d9025f..6768ce9 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -4271,7 +4271,7 @@ void copy_user_huge_page(struct page *dst, struct page *src,
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
-#if USE_SPLIT_PTE_PTLOCKS && BLOATED_SPINLOCKS
+#if USE_SPLIT_PTE_PTLOCKS && ALLOC_SPLIT_PTLOCKS
bool ptlock_alloc(struct page *page)
{
spinlock_t *ptl;
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index eca4a31..0cd2c4d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1197,14 +1197,16 @@ static struct page *new_vma_page(struct page *page, unsigned long private, int *
break;
vma = vma->vm_next;
}
+
+ if (PageHuge(page)) {
+ if (vma)
+ return alloc_huge_page_noerr(vma, address, 1);
+ else
+ return NULL;
+ }
/*
- * queue_pages_range() confirms that @page belongs to some vma,
- * so vma shouldn't be NULL.
+ * if !vma, alloc_page_vma() will use task or system default policy
*/
- BUG_ON(!vma);
-
- if (PageHuge(page))
- return alloc_huge_page_noerr(vma, address, 1);
return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
}
#else
@@ -1318,7 +1320,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (nr_failed && (flags & MPOL_MF_STRICT))
err = -EIO;
} else
- putback_lru_pages(&pagelist);
+ putback_movable_pages(&pagelist);
up_write(&mm->mmap_sem);
mpol_out:
diff --git a/mm/migrate.c b/mm/migrate.c
index bb94004..9194375 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -36,6 +36,7 @@
#include <linux/hugetlb_cgroup.h>
#include <linux/gfp.h>
#include <linux/balloon_compaction.h>
+#include <linux/mmu_notifier.h>
#include <asm/tlbflush.h>
@@ -316,14 +317,15 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
*/
int migrate_page_move_mapping(struct address_space *mapping,
struct page *newpage, struct page *page,
- struct buffer_head *head, enum migrate_mode mode)
+ struct buffer_head *head, enum migrate_mode mode,
+ int extra_count)
{
- int expected_count = 0;
+ int expected_count = 1 + extra_count;
void **pslot;
if (!mapping) {
/* Anonymous page without mapping */
- if (page_count(page) != 1)
+ if (page_count(page) != expected_count)
return -EAGAIN;
return MIGRATEPAGE_SUCCESS;
}
@@ -333,7 +335,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
pslot = radix_tree_lookup_slot(&mapping->page_tree,
page_index(page));
- expected_count = 2 + page_has_private(page);
+ expected_count += 1 + page_has_private(page);
if (page_count(page) != expected_count ||
radix_tree_deref_slot_protected(pslot, &mapping->tree_lock) != page) {
spin_unlock_irq(&mapping->tree_lock);
@@ -583,7 +585,7 @@ int migrate_page(struct address_space *mapping,
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
@@ -610,7 +612,7 @@ int buffer_migrate_page(struct address_space *mapping,
head = page_buffers(page);
- rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
+ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
if (rc != MIGRATEPAGE_SUCCESS)
return rc;
@@ -1654,6 +1656,18 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
return 1;
}
+bool pmd_trans_migrating(pmd_t pmd)
+{
+ struct page *page = pmd_page(pmd);
+ return PageLocked(page);
+}
+
+void wait_migrate_huge_page(struct anon_vma *anon_vma, pmd_t *pmd)
+{
+ struct page *page = pmd_page(*pmd);
+ wait_on_page_locked(page);
+}
+
/*
* Attempt to migrate a misplaced page to the specified destination
* node. Caller is expected to have an elevated reference count on
@@ -1716,12 +1730,14 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
struct page *page, int node)
{
spinlock_t *ptl;
- unsigned long haddr = address & HPAGE_PMD_MASK;
pg_data_t *pgdat = NODE_DATA(node);
int isolated = 0;
struct page *new_page = NULL;
struct mem_cgroup *memcg = NULL;
int page_lru = page_is_file_cache(page);
+ unsigned long mmun_start = address & HPAGE_PMD_MASK;
+ unsigned long mmun_end = mmun_start + HPAGE_PMD_SIZE;
+ pmd_t orig_entry;
/*
* Rate-limit the amount of data that is being migrated to a node.
@@ -1744,6 +1760,9 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
goto out_fail;
}
+ if (mm_tlb_flush_pending(mm))
+ flush_tlb_range(vma, mmun_start, mmun_end);
+
/* Prepare a page as a migration target */
__set_page_locked(new_page);
SetPageSwapBacked(new_page);
@@ -1755,9 +1774,12 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
WARN_ON(PageLRU(new_page));
/* Recheck the target PMD */
+ mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end);
ptl = pmd_lock(mm, pmd);
- if (unlikely(!pmd_same(*pmd, entry))) {
+ if (unlikely(!pmd_same(*pmd, entry) || page_count(page) != 2)) {
+fail_putback:
spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
/* Reverse changes made by migrate_page_copy() */
if (TestClearPageActive(new_page))
@@ -1774,7 +1796,8 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
putback_lru_page(page);
mod_zone_page_state(page_zone(page),
NR_ISOLATED_ANON + page_lru, -HPAGE_PMD_NR);
- goto out_fail;
+
+ goto out_unlock;
}
/*
@@ -1786,16 +1809,35 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
mem_cgroup_prepare_migration(page, new_page, &memcg);
+ orig_entry = *pmd;
entry = mk_pmd(new_page, vma->vm_page_prot);
- entry = pmd_mknonnuma(entry);
- entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
+ entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
- pmdp_clear_flush(vma, haddr, pmd);
- set_pmd_at(mm, haddr, pmd, entry);
- page_add_new_anon_rmap(new_page, vma, haddr);
+ /*
+ * Clear the old entry under pagetable lock and establish the new PTE.
+ * Any parallel GUP will either observe the old page blocking on the
+ * page lock, block on the page table lock or observe the new page.
+ * The SetPageUptodate on the new page and page_add_new_anon_rmap
+ * guarantee the copy is visible before the pagetable update.
+ */
+ flush_cache_range(vma, mmun_start, mmun_end);
+ page_add_new_anon_rmap(new_page, vma, mmun_start);
+ pmdp_clear_flush(vma, mmun_start, pmd);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
update_mmu_cache_pmd(vma, address, &entry);
+
+ if (page_count(page) != 2) {
+ set_pmd_at(mm, mmun_start, pmd, orig_entry);
+ flush_tlb_range(vma, mmun_start, mmun_end);
+ update_mmu_cache_pmd(vma, address, &entry);
+ page_remove_rmap(new_page);
+ goto fail_putback;
+ }
+
page_remove_rmap(page);
+
/*
* Finish the charge transaction under the page table lock to
* prevent split_huge_page() from dividing up the charge
@@ -1803,6 +1845,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
*/
mem_cgroup_end_migration(memcg, page, new_page, true);
spin_unlock(ptl);
+ mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
unlock_page(new_page);
unlock_page(page);
@@ -1820,10 +1863,15 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
out_fail:
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
out_dropref:
- entry = pmd_mknonnuma(entry);
- set_pmd_at(mm, haddr, pmd, entry);
- update_mmu_cache_pmd(vma, address, &entry);
+ ptl = pmd_lock(mm, pmd);
+ if (pmd_same(*pmd, entry)) {
+ entry = pmd_mknonnuma(entry);
+ set_pmd_at(mm, mmun_start, pmd, entry);
+ update_mmu_cache_pmd(vma, address, &entry);
+ }
+ spin_unlock(ptl);
+out_unlock:
unlock_page(page);
put_page(page);
return 0;
diff --git a/mm/mlock.c b/mm/mlock.c
index d480cd6..192e6ee 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -133,7 +133,10 @@ static void __munlock_isolation_failed(struct page *page)
/**
* munlock_vma_page - munlock a vma page
- * @page - page to be unlocked
+ * @page - page to be unlocked, either a normal page or THP page head
+ *
+ * returns the size of the page as a page mask (0 for normal page,
+ * HPAGE_PMD_NR - 1 for THP head page)
*
* called from munlock()/munmap() path with page supposedly on the LRU.
* When we munlock a page, because the vma where we found the page is being
@@ -148,21 +151,30 @@ static void __munlock_isolation_failed(struct page *page)
*/
unsigned int munlock_vma_page(struct page *page)
{
- unsigned int page_mask = 0;
+ unsigned int nr_pages;
BUG_ON(!PageLocked(page));
if (TestClearPageMlocked(page)) {
- unsigned int nr_pages = hpage_nr_pages(page);
+ nr_pages = hpage_nr_pages(page);
mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
- page_mask = nr_pages - 1;
if (!isolate_lru_page(page))
__munlock_isolated_page(page);
else
__munlock_isolation_failed(page);
+ } else {
+ nr_pages = hpage_nr_pages(page);
}
- return page_mask;
+ /*
+ * Regardless of the original PageMlocked flag, we determine nr_pages
+ * after touching the flag. This leaves a possible race with a THP page
+ * split, such that a whole THP page was munlocked, but nr_pages == 1.
+ * Returning a smaller mask due to that is OK, the worst that can
+ * happen is subsequent useless scanning of the former tail pages.
+ * The NR_MLOCK accounting can however become broken.
+ */
+ return nr_pages - 1;
}
/**
@@ -286,10 +298,12 @@ static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
{
int i;
int nr = pagevec_count(pvec);
- int delta_munlocked = -nr;
+ int delta_munlocked;
struct pagevec pvec_putback;
int pgrescued = 0;
+ pagevec_init(&pvec_putback, 0);
+
/* Phase 1: page isolation */
spin_lock_irq(&zone->lru_lock);
for (i = 0; i < nr; i++) {
@@ -318,18 +332,21 @@ skip_munlock:
/*
* We won't be munlocking this page in the next phase
* but we still need to release the follow_page_mask()
- * pin.
+ * pin. We cannot do it under lru_lock however. If it's
+ * the last pin, __page_cache_release would deadlock.
*/
+ pagevec_add(&pvec_putback, pvec->pages[i]);
pvec->pages[i] = NULL;
- put_page(page);
- delta_munlocked++;
}
}
+ delta_munlocked = -nr + pagevec_count(&pvec_putback);
__mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
spin_unlock_irq(&zone->lru_lock);
+ /* Now we can release pins of pages that we are not munlocking */
+ pagevec_release(&pvec_putback);
+
/* Phase 2: page munlock */
- pagevec_init(&pvec_putback, 0);
for (i = 0; i < nr; i++) {
struct page *page = pvec->pages[i];
@@ -440,7 +457,8 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
while (start < end) {
struct page *page = NULL;
- unsigned int page_mask, page_increm;
+ unsigned int page_mask;
+ unsigned long page_increm;
struct pagevec pvec;
struct zone *zone;
int zoneid;
@@ -490,7 +508,9 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
goto next;
}
}
- page_increm = 1 + (~(start >> PAGE_SHIFT) & page_mask);
+ /* It's a bug to munlock in the middle of a THP page */
+ VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
+ page_increm = 1 + page_mask;
start += page_increm * PAGE_SIZE;
next:
cond_resched();
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 26667971..bb53a65 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -52,17 +52,21 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
pte_t ptent;
bool updated = false;
- ptent = ptep_modify_prot_start(mm, addr, pte);
if (!prot_numa) {
+ ptent = ptep_modify_prot_start(mm, addr, pte);
+ if (pte_numa(ptent))
+ ptent = pte_mknonnuma(ptent);
ptent = pte_modify(ptent, newprot);
updated = true;
} else {
struct page *page;
+ ptent = *pte;
page = vm_normal_page(vma, addr, oldpte);
if (page) {
if (!pte_numa(oldpte)) {
ptent = pte_mknuma(ptent);
+ set_pte_at(mm, addr, pte, ptent);
updated = true;
}
}
@@ -79,7 +83,10 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (updated)
pages++;
- ptep_modify_prot_commit(mm, addr, pte, ptent);
+
+ /* Only !prot_numa always clears the pte */
+ if (!prot_numa)
+ ptep_modify_prot_commit(mm, addr, pte, ptent);
} else if (IS_ENABLED(CONFIG_MIGRATION) && !pte_file(oldpte)) {
swp_entry_t entry = pte_to_swp_entry(oldpte);
@@ -181,6 +188,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
BUG_ON(addr >= end);
pgd = pgd_offset(mm, addr);
flush_cache_range(vma, addr, end);
+ set_tlb_flush_pending(mm);
do {
next = pgd_addr_end(addr, end);
if (pgd_none_or_clear_bad(pgd))
@@ -192,6 +200,7 @@ static unsigned long change_protection_range(struct vm_area_struct *vma,
/* Only flush the TLB if we actually modified any entries: */
if (pages)
flush_tlb_range(vma, start, end);
+ clear_tlb_flush_pending(mm);
return pages;
}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 580a5f0..5248fe0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1816,7 +1816,7 @@ static void zlc_clear_zones_full(struct zonelist *zonelist)
static bool zone_local(struct zone *local_zone, struct zone *zone)
{
- return node_distance(local_zone->node, zone->node) == LOCAL_DISTANCE;
+ return local_zone->node == zone->node;
}
static bool zone_allows_reclaim(struct zone *local_zone, struct zone *zone)
@@ -1913,18 +1913,17 @@ zonelist_scan:
* page was allocated in should have no effect on the
* time the page has in memory before being reclaimed.
*
- * When zone_reclaim_mode is enabled, try to stay in
- * local zones in the fastpath. If that fails, the
- * slowpath is entered, which will do another pass
- * starting with the local zones, but ultimately fall
- * back to remote zones that do not partake in the
- * fairness round-robin cycle of this zonelist.
+ * Try to stay in local zones in the fastpath. If
+ * that fails, the slowpath is entered, which will do
+ * another pass starting with the local zones, but
+ * ultimately fall back to remote zones that do not
+ * partake in the fairness round-robin cycle of this
+ * zonelist.
*/
if (alloc_flags & ALLOC_WMARK_LOW) {
if (zone_page_state(zone, NR_ALLOC_BATCH) <= 0)
continue;
- if (zone_reclaim_mode &&
- !zone_local(preferred_zone, zone))
+ if (!zone_local(preferred_zone, zone))
continue;
}
/*
@@ -2390,7 +2389,7 @@ static void prepare_slowpath(gfp_t gfp_mask, unsigned int order,
* thrash fairness information for zones that are not
* actually part of this zonelist's round-robin cycle.
*/
- if (zone_reclaim_mode && !zone_local(preferred_zone, zone))
+ if (!zone_local(preferred_zone, zone))
continue;
mod_zone_page_state(zone, NR_ALLOC_BATCH,
high_wmark_pages(zone) -
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index cbb3854..a8b9199 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -110,9 +110,10 @@ int pmdp_clear_flush_young(struct vm_area_struct *vma,
pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
pte_t *ptep)
{
+ struct mm_struct *mm = (vma)->vm_mm;
pte_t pte;
- pte = ptep_get_and_clear((vma)->vm_mm, address, ptep);
- if (pte_accessible(pte))
+ pte = ptep_get_and_clear(mm, address, ptep);
+ if (pte_accessible(mm, pte))
flush_tlb_page(vma, address);
return pte;
}
@@ -191,6 +192,9 @@ pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
+ pmd_t entry = *pmdp;
+ if (pmd_numa(entry))
+ entry = pmd_mknonnuma(entry);
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(*pmdp));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
}
diff --git a/mm/rmap.c b/mm/rmap.c
index 55c8b8d..068522d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -600,7 +600,11 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
spinlock_t *ptl;
if (unlikely(PageHuge(page))) {
+ /* when pud is not present, pte will be NULL */
pte = huge_pte_offset(mm, address);
+ if (!pte)
+ return NULL;
+
ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
goto check;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 8297623..902a148 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2918,13 +2918,8 @@ static struct dentry_operations anon_ops = {
.d_dname = simple_dname
};
-/**
- * shmem_file_setup - get an unlinked file living in tmpfs
- * @name: name for dentry (to be seen in /proc/<pid>/maps
- * @size: size to be set for the file
- * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
- */
-struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+static struct file *__shmem_file_setup(const char *name, loff_t size,
+ unsigned long flags, unsigned int i_flags)
{
struct file *res;
struct inode *inode;
@@ -2957,6 +2952,7 @@ struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags
if (!inode)
goto put_dentry;
+ inode->i_flags |= i_flags;
d_instantiate(path.dentry, inode);
inode->i_size = size;
clear_nlink(inode); /* It is unlinked */
@@ -2977,6 +2973,32 @@ put_memory:
shmem_unacct_size(flags, size);
return res;
}
+
+/**
+ * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
+ * kernel internal. There will be NO LSM permission checks against the
+ * underlying inode. So users of this interface must do LSM checks at a
+ * higher layer. The one user is the big_key implementation. LSM checks
+ * are provided at the key level rather than the inode level.
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_kernel_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(name, size, flags, S_PRIVATE);
+}
+
+/**
+ * shmem_file_setup - get an unlinked file living in tmpfs
+ * @name: name for dentry (to be seen in /proc/<pid>/maps
+ * @size: size to be set for the file
+ * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
+ */
+struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags)
+{
+ return __shmem_file_setup(name, size, flags, 0);
+}
EXPORT_SYMBOL_GPL(shmem_file_setup);
/**
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 762896e..47c908f 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -530,6 +530,23 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
+static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev,
+ unsigned short type,
+ const void *daddr, const void *saddr,
+ unsigned int len)
+{
+ struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+ struct net_device *real_dev = vlan->real_dev;
+
+ return dev_hard_header(skb, real_dev, type, daddr, saddr, len);
+}
+
+static const struct header_ops vlan_passthru_header_ops = {
+ .create = vlan_passthru_hard_header,
+ .rebuild = dev_rebuild_header,
+ .parse = eth_header_parse,
+};
+
static struct device_type vlan_type = {
.name = "vlan",
};
@@ -573,7 +590,7 @@ static int vlan_dev_init(struct net_device *dev)
dev->needed_headroom = real_dev->needed_headroom;
if (real_dev->features & NETIF_F_HW_VLAN_CTAG_TX) {
- dev->header_ops = real_dev->header_ops;
+ dev->header_ops = &vlan_passthru_header_ops;
dev->hard_header_len = real_dev->hard_header_len;
} else {
dev->header_ops = &vlan_header_ops;
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index a2b480a..b9c8a6e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -307,9 +307,9 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
hard_iface->bat_iv.ogm_buff = ogm_buff;
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
- batadv_ogm_packet->header.packet_type = BATADV_IV_OGM;
- batadv_ogm_packet->header.version = BATADV_COMPAT_VERSION;
- batadv_ogm_packet->header.ttl = 2;
+ batadv_ogm_packet->packet_type = BATADV_IV_OGM;
+ batadv_ogm_packet->version = BATADV_COMPAT_VERSION;
+ batadv_ogm_packet->ttl = 2;
batadv_ogm_packet->flags = BATADV_NO_FLAGS;
batadv_ogm_packet->reserved = 0;
batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE;
@@ -346,7 +346,7 @@ batadv_iv_ogm_primary_iface_set(struct batadv_hard_iface *hard_iface)
batadv_ogm_packet = (struct batadv_ogm_packet *)ogm_buff;
batadv_ogm_packet->flags = BATADV_PRIMARIES_FIRST_HOP;
- batadv_ogm_packet->header.ttl = BATADV_TTL;
+ batadv_ogm_packet->ttl = BATADV_TTL;
}
/* when do we schedule our own ogm to be sent */
@@ -435,7 +435,7 @@ static void batadv_iv_ogm_send_to_if(struct batadv_forw_packet *forw_packet,
fwd_str, (packet_num > 0 ? "aggregated " : ""),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
- batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl,
(batadv_ogm_packet->flags & BATADV_DIRECTLINK ?
"on" : "off"),
hard_iface->net_dev->name,
@@ -491,7 +491,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
/* multihomed peer assumed
* non-primary OGMs are only broadcasted on their interface
*/
- if ((directlink && (batadv_ogm_packet->header.ttl == 1)) ||
+ if ((directlink && (batadv_ogm_packet->ttl == 1)) ||
(forw_packet->own && (forw_packet->if_incoming != primary_if))) {
/* FIXME: what about aggregated packets ? */
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
@@ -499,7 +499,7 @@ static void batadv_iv_ogm_emit(struct batadv_forw_packet *forw_packet)
(forw_packet->own ? "Sending own" : "Forwarding"),
batadv_ogm_packet->orig,
ntohl(batadv_ogm_packet->seqno),
- batadv_ogm_packet->header.ttl,
+ batadv_ogm_packet->ttl,
forw_packet->if_incoming->net_dev->name,
forw_packet->if_incoming->net_dev->dev_addr);
@@ -572,7 +572,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
*/
if ((!directlink) &&
(!(batadv_ogm_packet->flags & BATADV_DIRECTLINK)) &&
- (batadv_ogm_packet->header.ttl != 1) &&
+ (batadv_ogm_packet->ttl != 1) &&
/* own packets originating non-primary
* interfaces leave only that interface
@@ -587,7 +587,7 @@ batadv_iv_ogm_can_aggregate(const struct batadv_ogm_packet *new_bat_ogm_packet,
* interface only - we still can aggregate
*/
if ((directlink) &&
- (new_bat_ogm_packet->header.ttl == 1) &&
+ (new_bat_ogm_packet->ttl == 1) &&
(forw_packet->if_incoming == if_incoming) &&
/* packets from direct neighbors or
@@ -778,7 +778,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
struct batadv_priv *bat_priv = netdev_priv(if_incoming->soft_iface);
uint16_t tvlv_len;
- if (batadv_ogm_packet->header.ttl <= 1) {
+ if (batadv_ogm_packet->ttl <= 1) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv, "ttl exceeded\n");
return;
}
@@ -798,7 +798,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
tvlv_len = ntohs(batadv_ogm_packet->tvlv_len);
- batadv_ogm_packet->header.ttl--;
+ batadv_ogm_packet->ttl--;
memcpy(batadv_ogm_packet->prev_sender, ethhdr->h_source, ETH_ALEN);
/* apply hop penalty */
@@ -807,7 +807,7 @@ static void batadv_iv_ogm_forward(struct batadv_orig_node *orig_node,
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Forwarding packet: tq: %i, ttl: %i\n",
- batadv_ogm_packet->tq, batadv_ogm_packet->header.ttl);
+ batadv_ogm_packet->tq, batadv_ogm_packet->ttl);
/* switch of primaries first hop flag when forwarding */
batadv_ogm_packet->flags &= ~BATADV_PRIMARIES_FIRST_HOP;
@@ -972,8 +972,8 @@ batadv_iv_ogm_orig_update(struct batadv_priv *bat_priv,
spin_unlock_bh(&neigh_node->bat_iv.lq_update_lock);
if (dup_status == BATADV_NO_DUP) {
- orig_node->last_ttl = batadv_ogm_packet->header.ttl;
- neigh_node->last_ttl = batadv_ogm_packet->header.ttl;
+ orig_node->last_ttl = batadv_ogm_packet->ttl;
+ neigh_node->last_ttl = batadv_ogm_packet->ttl;
}
batadv_bonding_candidate_add(bat_priv, orig_node, neigh_node);
@@ -1247,7 +1247,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* packet in an aggregation. Here we expect that the padding
* is always zero (or not 0x01)
*/
- if (batadv_ogm_packet->header.packet_type != BATADV_IV_OGM)
+ if (batadv_ogm_packet->packet_type != BATADV_IV_OGM)
return;
/* could be changed by schedule_own_packet() */
@@ -1267,8 +1267,8 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
if_incoming->net_dev->dev_addr, batadv_ogm_packet->orig,
batadv_ogm_packet->prev_sender,
ntohl(batadv_ogm_packet->seqno), batadv_ogm_packet->tq,
- batadv_ogm_packet->header.ttl,
- batadv_ogm_packet->header.version, has_directlink_flag);
+ batadv_ogm_packet->ttl,
+ batadv_ogm_packet->version, has_directlink_flag);
rcu_read_lock();
list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
@@ -1433,7 +1433,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
* seqno and similar ttl as the non-duplicate
*/
sameseq = orig_node->last_real_seqno == ntohl(batadv_ogm_packet->seqno);
- similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->header.ttl;
+ similar_ttl = orig_node->last_ttl - 3 <= batadv_ogm_packet->ttl;
if (is_bidirect && ((dup_status == BATADV_NO_DUP) ||
(sameseq && similar_ttl)))
batadv_iv_ogm_orig_update(bat_priv, orig_node, ethhdr,
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 6c8c393..b316a4c 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -349,7 +349,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- switch (unicast_4addr_packet->u.header.packet_type) {
+ switch (unicast_4addr_packet->u.packet_type) {
case BATADV_UNICAST:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within a UNICAST packet\n");
@@ -374,7 +374,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
break;
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv, "* type: Unknown (%u)!\n",
- unicast_4addr_packet->u.header.packet_type);
+ unicast_4addr_packet->u.packet_type);
}
break;
case BATADV_BCAST:
@@ -387,7 +387,7 @@ static void batadv_dbg_arp(struct batadv_priv *bat_priv, struct sk_buff *skb,
default:
batadv_dbg(BATADV_DBG_DAT, bat_priv,
"* encapsulated within an unknown packet type (0x%x)\n",
- unicast_4addr_packet->u.header.packet_type);
+ unicast_4addr_packet->u.packet_type);
}
}
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 271d321..6ddb614 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -355,7 +355,7 @@ bool batadv_frag_skb_fwd(struct sk_buff *skb,
batadv_add_counter(bat_priv, BATADV_CNT_FRAG_FWD_BYTES,
skb->len + ETH_HLEN);
- packet->header.ttl--;
+ packet->ttl--;
batadv_send_skb_packet(skb, neigh_node->if_incoming,
neigh_node->addr);
ret = true;
@@ -444,9 +444,9 @@ bool batadv_frag_send_packet(struct sk_buff *skb,
goto out_err;
/* Create one header to be copied to all fragments */
- frag_header.header.packet_type = BATADV_UNICAST_FRAG;
- frag_header.header.version = BATADV_COMPAT_VERSION;
- frag_header.header.ttl = BATADV_TTL;
+ frag_header.packet_type = BATADV_UNICAST_FRAG;
+ frag_header.version = BATADV_COMPAT_VERSION;
+ frag_header.ttl = BATADV_TTL;
frag_header.seqno = htons(atomic_inc_return(&bat_priv->frag_seqno));
frag_header.reserved = 0;
frag_header.no = 0;
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index 29ae4ef..130cc32 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -194,7 +194,7 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
goto free_skb;
}
- if (icmp_header->header.packet_type != BATADV_ICMP) {
+ if (icmp_header->packet_type != BATADV_ICMP) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Error - can't send packet from char device: got bogus packet type (expected: BAT_ICMP)\n");
len = -EINVAL;
@@ -243,9 +243,9 @@ static ssize_t batadv_socket_write(struct file *file, const char __user *buff,
icmp_header->uid = socket_client->index;
- if (icmp_header->header.version != BATADV_COMPAT_VERSION) {
+ if (icmp_header->version != BATADV_COMPAT_VERSION) {
icmp_header->msg_type = BATADV_PARAMETER_PROBLEM;
- icmp_header->header.version = BATADV_COMPAT_VERSION;
+ icmp_header->version = BATADV_COMPAT_VERSION;
batadv_socket_add_packet(socket_client, icmp_header,
packet_len);
goto free_skb;
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index c51a5e5..1511f64 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -383,17 +383,17 @@ int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev,
batadv_ogm_packet = (struct batadv_ogm_packet *)skb->data;
- if (batadv_ogm_packet->header.version != BATADV_COMPAT_VERSION) {
+ if (batadv_ogm_packet->version != BATADV_COMPAT_VERSION) {
batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
"Drop packet: incompatible batman version (%i)\n",
- batadv_ogm_packet->header.version);
+ batadv_ogm_packet->version);
goto err_free;
}
/* all receive handlers return whether they received or reused
* the supplied skb. if not, we have to free the skb.
*/
- idx = batadv_ogm_packet->header.packet_type;
+ idx = batadv_ogm_packet->packet_type;
ret = (*batadv_rx_handler[idx])(skb, hard_iface);
if (ret == NET_RX_DROP)
@@ -426,8 +426,8 @@ static void batadv_recv_handler_init(void)
BUILD_BUG_ON(offsetof(struct batadv_unicast_packet, dest) != 4);
BUILD_BUG_ON(offsetof(struct batadv_unicast_tvlv_packet, dst) != 4);
BUILD_BUG_ON(offsetof(struct batadv_frag_packet, dest) != 4);
- BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, icmph.dst) != 4);
- BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, icmph.dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet, dst) != 4);
+ BUILD_BUG_ON(offsetof(struct batadv_icmp_packet_rr, dst) != 4);
/* broadcast packet */
batadv_rx_handler[BATADV_BCAST] = batadv_recv_bcast_packet;
@@ -1119,9 +1119,9 @@ void batadv_tvlv_unicast_send(struct batadv_priv *bat_priv, uint8_t *src,
skb_reserve(skb, ETH_HLEN);
tvlv_buff = skb_put(skb, sizeof(*unicast_tvlv_packet) + tvlv_len);
unicast_tvlv_packet = (struct batadv_unicast_tvlv_packet *)tvlv_buff;
- unicast_tvlv_packet->header.packet_type = BATADV_UNICAST_TVLV;
- unicast_tvlv_packet->header.version = BATADV_COMPAT_VERSION;
- unicast_tvlv_packet->header.ttl = BATADV_TTL;
+ unicast_tvlv_packet->packet_type = BATADV_UNICAST_TVLV;
+ unicast_tvlv_packet->version = BATADV_COMPAT_VERSION;
+ unicast_tvlv_packet->ttl = BATADV_TTL;
unicast_tvlv_packet->reserved = 0;
unicast_tvlv_packet->tvlv_len = htons(tvlv_len);
unicast_tvlv_packet->align = 0;
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index 351e199..511d7e1 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -722,7 +722,7 @@ static bool batadv_can_nc_with_orig(struct batadv_priv *bat_priv,
{
if (orig_node->last_real_seqno != ntohl(ogm_packet->seqno))
return false;
- if (orig_node->last_ttl != ogm_packet->header.ttl + 1)
+ if (orig_node->last_ttl != ogm_packet->ttl + 1)
return false;
if (!batadv_compare_eth(ogm_packet->orig, ogm_packet->prev_sender))
return false;
@@ -1082,9 +1082,9 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
coded_packet = (struct batadv_coded_packet *)skb_dest->data;
skb_reset_mac_header(skb_dest);
- coded_packet->header.packet_type = BATADV_CODED;
- coded_packet->header.version = BATADV_COMPAT_VERSION;
- coded_packet->header.ttl = packet1->header.ttl;
+ coded_packet->packet_type = BATADV_CODED;
+ coded_packet->version = BATADV_COMPAT_VERSION;
+ coded_packet->ttl = packet1->ttl;
/* Info about first unicast packet */
memcpy(coded_packet->first_source, first_source, ETH_ALEN);
@@ -1097,7 +1097,7 @@ static bool batadv_nc_code_packets(struct batadv_priv *bat_priv,
memcpy(coded_packet->second_source, second_source, ETH_ALEN);
memcpy(coded_packet->second_orig_dest, packet2->dest, ETH_ALEN);
coded_packet->second_crc = packet_id2;
- coded_packet->second_ttl = packet2->header.ttl;
+ coded_packet->second_ttl = packet2->ttl;
coded_packet->second_ttvn = packet2->ttvn;
coded_packet->coded_len = htons(coding_len);
@@ -1452,7 +1452,7 @@ bool batadv_nc_skb_forward(struct sk_buff *skb,
/* We only handle unicast packets */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
- if (packet->header.packet_type != BATADV_UNICAST)
+ if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Try to find a coding opportunity and send the skb if one is found */
@@ -1505,7 +1505,7 @@ void batadv_nc_skb_store_for_decoding(struct batadv_priv *bat_priv,
/* Check for supported packet type */
payload = skb_network_header(skb);
packet = (struct batadv_unicast_packet *)payload;
- if (packet->header.packet_type != BATADV_UNICAST)
+ if (packet->packet_type != BATADV_UNICAST)
goto out;
/* Find existing nc_path or create a new */
@@ -1623,7 +1623,7 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
ttvn = coded_packet_tmp.second_ttvn;
} else {
orig_dest = coded_packet_tmp.first_orig_dest;
- ttl = coded_packet_tmp.header.ttl;
+ ttl = coded_packet_tmp.ttl;
ttvn = coded_packet_tmp.first_ttvn;
}
@@ -1648,9 +1648,9 @@ batadv_nc_skb_decode_packet(struct batadv_priv *bat_priv, struct sk_buff *skb,
/* Create decoded unicast packet */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.packet_type = BATADV_UNICAST;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
- unicast_packet->header.ttl = ttl;
+ unicast_packet->packet_type = BATADV_UNICAST;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
+ unicast_packet->ttl = ttl;
memcpy(unicast_packet->dest, orig_dest, ETH_ALEN);
unicast_packet->ttvn = ttvn;
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 207459b..2dd8f24 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -155,6 +155,7 @@ enum batadv_tvlv_type {
BATADV_TVLV_ROAM = 0x05,
};
+#pragma pack(2)
/* the destination hardware field in the ARP frame is used to
* transport the claim type and the group id
*/
@@ -163,24 +164,20 @@ struct batadv_bla_claim_dst {
uint8_t type; /* bla_claimframe */
__be16 group; /* group id */
};
-
-struct batadv_header {
- uint8_t packet_type;
- uint8_t version; /* batman version field */
- uint8_t ttl;
- /* the parent struct has to add a byte after the header to make
- * everything 4 bytes aligned again
- */
-};
+#pragma pack()
/**
* struct batadv_ogm_packet - ogm (routing protocol) packet
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @flags: contains routing relevant flags - see enum batadv_iv_flags
* @tvlv_len: length of tvlv data following the ogm header
*/
struct batadv_ogm_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t flags;
__be32 seqno;
uint8_t orig[ETH_ALEN];
@@ -196,29 +193,51 @@ struct batadv_ogm_packet {
#define BATADV_OGM_HLEN sizeof(struct batadv_ogm_packet)
/**
- * batadv_icmp_header - common ICMP header
- * @header: common batman header
+ * batadv_icmp_header - common members among all the ICMP packets
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @msg_type: ICMP packet type
* @dst: address of the destination node
* @orig: address of the source node
* @uid: local ICMP socket identifier
+ * @align: not used - useful for alignment purposes only
+ *
+ * This structure is used for ICMP packets parsing only and it is never sent
+ * over the wire. The alignment field at the end is there to ensure that
+ * members are padded the same way as they are in real packets.
*/
struct batadv_icmp_header {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t msg_type; /* see ICMP message types above */
uint8_t dst[ETH_ALEN];
uint8_t orig[ETH_ALEN];
uint8_t uid;
+ uint8_t align[3];
};
/**
* batadv_icmp_packet - ICMP packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
* @reserved: not used - useful for alignment
* @seqno: ICMP sequence number
*/
struct batadv_icmp_packet {
- struct batadv_icmp_header icmph;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ uint8_t uid;
uint8_t reserved;
__be16 seqno;
};
@@ -227,13 +246,25 @@ struct batadv_icmp_packet {
/**
* batadv_icmp_packet_rr - ICMP RouteRecord packet
- * @icmph: common ICMP header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @msg_type: ICMP packet type
+ * @dst: address of the destination node
+ * @orig: address of the source node
+ * @uid: local ICMP socket identifier
* @rr_cur: number of entries the rr array
* @seqno: ICMP sequence number
* @rr: route record array
*/
struct batadv_icmp_packet_rr {
- struct batadv_icmp_header icmph;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
+ uint8_t msg_type; /* see ICMP message types above */
+ uint8_t dst[ETH_ALEN];
+ uint8_t orig[ETH_ALEN];
+ uint8_t uid;
uint8_t rr_cur;
__be16 seqno;
uint8_t rr[BATADV_RR_LEN][ETH_ALEN];
@@ -253,8 +284,18 @@ struct batadv_icmp_packet_rr {
*/
#pragma pack(2)
+/**
+ * struct batadv_unicast_packet - unicast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @ttvn: translation table version number
+ * @dest: originator destination of the unicast packet
+ */
struct batadv_unicast_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version;
+ uint8_t ttl;
uint8_t ttvn; /* destination translation table version number */
uint8_t dest[ETH_ALEN];
/* "4 bytes boundary + 2 bytes" long to make the payload after the
@@ -280,7 +321,9 @@ struct batadv_unicast_4addr_packet {
/**
* struct batadv_frag_packet - fragmented packet
- * @header: common batman packet header with type, compatversion, and ttl
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @dest: final destination used when routing fragments
* @orig: originator of the fragment used when merging the packet
* @no: fragment number within this sequence
@@ -289,7 +332,9 @@ struct batadv_unicast_4addr_packet {
* @total_size: size of the merged packet
*/
struct batadv_frag_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
#if defined(__BIG_ENDIAN_BITFIELD)
uint8_t no:4;
uint8_t reserved:4;
@@ -305,8 +350,19 @@ struct batadv_frag_packet {
__be16 total_size;
};
+/**
+ * struct batadv_bcast_packet - broadcast packet for network payload
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
+ * @reserved: reserved byte for alignment
+ * @seqno: sequence identification
+ * @orig: originator of the broadcast packet
+ */
struct batadv_bcast_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t reserved;
__be32 seqno;
uint8_t orig[ETH_ALEN];
@@ -315,11 +371,11 @@ struct batadv_bcast_packet {
*/
};
-#pragma pack()
-
/**
* struct batadv_coded_packet - network coded packet
- * @header: common batman packet header and ttl of first included packet
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @reserved: Align following fields to 2-byte boundaries
* @first_source: original source of first included packet
* @first_orig_dest: original destinal of first included packet
@@ -334,7 +390,9 @@ struct batadv_bcast_packet {
* @coded_len: length of network coded part of the payload
*/
struct batadv_coded_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t first_ttvn;
/* uint8_t first_dest[ETH_ALEN]; - saved in mac header destination */
uint8_t first_source[ETH_ALEN];
@@ -349,9 +407,13 @@ struct batadv_coded_packet {
__be16 coded_len;
};
+#pragma pack()
+
/**
* struct batadv_unicast_tvlv - generic unicast packet with tvlv payload
- * @header: common batman packet header
+ * @packet_type: batman-adv packet type, part of the general header
+ * @version: batman-adv protocol version, part of the genereal header
+ * @ttl: time to live for this packet, part of the genereal header
* @reserved: reserved field (for packet alignment)
* @src: address of the source
* @dst: address of the destination
@@ -359,7 +421,9 @@ struct batadv_coded_packet {
* @align: 2 bytes to align the header to a 4 byte boundry
*/
struct batadv_unicast_tvlv_packet {
- struct batadv_header header;
+ uint8_t packet_type;
+ uint8_t version; /* batman version field */
+ uint8_t ttl;
uint8_t reserved;
uint8_t dst[ETH_ALEN];
uint8_t src[ETH_ALEN];
@@ -420,13 +484,13 @@ struct batadv_tvlv_tt_vlan_data {
* struct batadv_tvlv_tt_change - translation table diff data
* @flags: status indicators concerning the non-mesh client (see
* batadv_tt_client_flags)
- * @reserved: reserved field
+ * @reserved: reserved field - useful for alignment purposes only
* @addr: mac address of non-mesh client that triggered this tt change
* @vid: VLAN identifier
*/
struct batadv_tvlv_tt_change {
uint8_t flags;
- uint8_t reserved;
+ uint8_t reserved[3];
uint8_t addr[ETH_ALEN];
__be16 vid;
};
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index d4114d7..46278bf 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -308,7 +308,7 @@ static int batadv_recv_my_icmp_packet(struct batadv_priv *bat_priv,
memcpy(icmph->dst, icmph->orig, ETH_ALEN);
memcpy(icmph->orig, primary_if->net_dev->dev_addr, ETH_ALEN);
icmph->msg_type = BATADV_ECHO_REPLY;
- icmph->header.ttl = BATADV_TTL;
+ icmph->ttl = BATADV_TTL;
res = batadv_send_skb_to_orig(skb, orig_node, NULL);
if (res != NET_XMIT_DROP)
@@ -338,9 +338,9 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
/* send TTL exceeded if packet is an echo request (traceroute) */
- if (icmp_packet->icmph.msg_type != BATADV_ECHO_REQUEST) {
+ if (icmp_packet->msg_type != BATADV_ECHO_REQUEST) {
pr_debug("Warning - can't forward icmp packet from %pM to %pM: ttl exceeded\n",
- icmp_packet->icmph.orig, icmp_packet->icmph.dst);
+ icmp_packet->orig, icmp_packet->dst);
goto out;
}
@@ -349,7 +349,7 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
goto out;
/* get routing information */
- orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->icmph.orig);
+ orig_node = batadv_orig_hash_find(bat_priv, icmp_packet->orig);
if (!orig_node)
goto out;
@@ -359,11 +359,11 @@ static int batadv_recv_icmp_ttl_exceeded(struct batadv_priv *bat_priv,
icmp_packet = (struct batadv_icmp_packet *)skb->data;
- memcpy(icmp_packet->icmph.dst, icmp_packet->icmph.orig, ETH_ALEN);
- memcpy(icmp_packet->icmph.orig, primary_if->net_dev->dev_addr,
+ memcpy(icmp_packet->dst, icmp_packet->orig, ETH_ALEN);
+ memcpy(icmp_packet->orig, primary_if->net_dev->dev_addr,
ETH_ALEN);
- icmp_packet->icmph.msg_type = BATADV_TTL_EXCEEDED;
- icmp_packet->icmph.header.ttl = BATADV_TTL;
+ icmp_packet->msg_type = BATADV_TTL_EXCEEDED;
+ icmp_packet->ttl = BATADV_TTL;
if (batadv_send_skb_to_orig(skb, orig_node, NULL) != NET_XMIT_DROP)
ret = NET_RX_SUCCESS;
@@ -434,7 +434,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
return batadv_recv_my_icmp_packet(bat_priv, skb);
/* TTL exceeded */
- if (icmph->header.ttl < 2)
+ if (icmph->ttl < 2)
return batadv_recv_icmp_ttl_exceeded(bat_priv, skb);
/* get routing information */
@@ -449,7 +449,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,
icmph = (struct batadv_icmp_header *)skb->data;
/* decrement ttl */
- icmph->header.ttl--;
+ icmph->ttl--;
/* route it */
if (batadv_send_skb_to_orig(skb, orig_node, recv_if) != NET_XMIT_DROP)
@@ -709,7 +709,7 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
/* TTL exceeded */
- if (unicast_packet->header.ttl < 2) {
+ if (unicast_packet->ttl < 2) {
pr_debug("Warning - can't forward unicast packet from %pM to %pM: ttl exceeded\n",
ethhdr->h_source, unicast_packet->dest);
goto out;
@@ -727,9 +727,9 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
/* decrement ttl */
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.ttl--;
+ unicast_packet->ttl--;
- switch (unicast_packet->header.packet_type) {
+ switch (unicast_packet->packet_type) {
case BATADV_UNICAST_4ADDR:
hdr_len = sizeof(struct batadv_unicast_4addr_packet);
break;
@@ -970,7 +970,7 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,
unicast_packet = (struct batadv_unicast_packet *)skb->data;
unicast_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- is4addr = unicast_packet->header.packet_type == BATADV_UNICAST_4ADDR;
+ is4addr = unicast_packet->packet_type == BATADV_UNICAST_4ADDR;
/* the caller function should have already pulled 2 bytes */
if (is4addr)
hdr_size = sizeof(*unicast_4addr_packet);
@@ -1160,7 +1160,7 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,
if (batadv_is_my_mac(bat_priv, bcast_packet->orig))
goto out;
- if (bcast_packet->header.ttl < 2)
+ if (bcast_packet->ttl < 2)
goto out;
orig_node = batadv_orig_hash_find(bat_priv, bcast_packet->orig);
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index c83be5e..fba4dcf 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -161,11 +161,11 @@ batadv_send_skb_push_fill_unicast(struct sk_buff *skb, int hdr_size,
return false;
unicast_packet = (struct batadv_unicast_packet *)skb->data;
- unicast_packet->header.version = BATADV_COMPAT_VERSION;
+ unicast_packet->version = BATADV_COMPAT_VERSION;
/* batman packet type: unicast */
- unicast_packet->header.packet_type = BATADV_UNICAST;
+ unicast_packet->packet_type = BATADV_UNICAST;
/* set unicast ttl */
- unicast_packet->header.ttl = BATADV_TTL;
+ unicast_packet->ttl = BATADV_TTL;
/* copy the destination for faster routing */
memcpy(unicast_packet->dest, orig_node->orig, ETH_ALEN);
/* set the destination tt version number */
@@ -221,7 +221,7 @@ bool batadv_send_skb_prepare_unicast_4addr(struct batadv_priv *bat_priv,
goto out;
uc_4addr_packet = (struct batadv_unicast_4addr_packet *)skb->data;
- uc_4addr_packet->u.header.packet_type = BATADV_UNICAST_4ADDR;
+ uc_4addr_packet->u.packet_type = BATADV_UNICAST_4ADDR;
memcpy(uc_4addr_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
uc_4addr_packet->subtype = packet_subtype;
uc_4addr_packet->reserved = 0;
@@ -436,7 +436,7 @@ int batadv_add_bcast_packet_to_list(struct batadv_priv *bat_priv,
/* as we have a copy now, it is safe to decrease the TTL */
bcast_packet = (struct batadv_bcast_packet *)newskb->data;
- bcast_packet->header.ttl--;
+ bcast_packet->ttl--;
skb_reset_mac_header(newskb);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 36f0508..a8f99d1 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -264,11 +264,11 @@ static int batadv_interface_tx(struct sk_buff *skb,
goto dropped;
bcast_packet = (struct batadv_bcast_packet *)skb->data;
- bcast_packet->header.version = BATADV_COMPAT_VERSION;
- bcast_packet->header.ttl = BATADV_TTL;
+ bcast_packet->version = BATADV_COMPAT_VERSION;
+ bcast_packet->ttl = BATADV_TTL;
/* batman packet type: broadcast */
- bcast_packet->header.packet_type = BATADV_BCAST;
+ bcast_packet->packet_type = BATADV_BCAST;
bcast_packet->reserved = 0;
/* hw address of first interface is the orig mac because only
@@ -328,7 +328,7 @@ void batadv_interface_rx(struct net_device *soft_iface,
struct sk_buff *skb, struct batadv_hard_iface *recv_if,
int hdr_size, struct batadv_orig_node *orig_node)
{
- struct batadv_header *batadv_header = (struct batadv_header *)skb->data;
+ struct batadv_bcast_packet *batadv_bcast_packet;
struct batadv_priv *bat_priv = netdev_priv(soft_iface);
__be16 ethertype = htons(ETH_P_BATMAN);
struct vlan_ethhdr *vhdr;
@@ -336,7 +336,8 @@ void batadv_interface_rx(struct net_device *soft_iface,
unsigned short vid;
bool is_bcast;
- is_bcast = (batadv_header->packet_type == BATADV_BCAST);
+ batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data;
+ is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST);
/* check if enough space is available for pulling, and pull */
if (!pskb_may_pull(skb, hdr_size))
@@ -345,7 +346,12 @@ void batadv_interface_rx(struct net_device *soft_iface,
skb_pull_rcsum(skb, hdr_size);
skb_reset_mac_header(skb);
- vid = batadv_get_vid(skb, hdr_size);
+ /* clean the netfilter state now that the batman-adv header has been
+ * removed
+ */
+ nf_reset(skb);
+
+ vid = batadv_get_vid(skb, 0);
ethhdr = eth_hdr(skb);
switch (ntohs(ethhdr->h_proto)) {
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 4add57d..ff625fe 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -333,7 +333,8 @@ static void batadv_tt_local_event(struct batadv_priv *bat_priv,
return;
tt_change_node->change.flags = flags;
- tt_change_node->change.reserved = 0;
+ memset(tt_change_node->change.reserved, 0,
+ sizeof(tt_change_node->change.reserved));
memcpy(tt_change_node->change.addr, common->addr, ETH_ALEN);
tt_change_node->change.vid = htons(common->vid);
@@ -2221,7 +2222,8 @@ static void batadv_tt_tvlv_generate(struct batadv_priv *bat_priv,
ETH_ALEN);
tt_change->flags = tt_common_entry->flags;
tt_change->vid = htons(tt_common_entry->vid);
- tt_change->reserved = 0;
+ memset(tt_change->reserved, 0,
+ sizeof(tt_change->reserved));
tt_num_entries++;
tt_change++;
diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
index 6a6c8bb..7552f9e 100644
--- a/net/bluetooth/hci_sock.c
+++ b/net/bluetooth/hci_sock.c
@@ -940,8 +940,22 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
bt_cb(skb)->pkt_type = *((unsigned char *) skb->data);
skb_pull(skb, 1);
- if (hci_pi(sk)->channel == HCI_CHANNEL_RAW &&
- bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
+ if (hci_pi(sk)->channel == HCI_CHANNEL_USER) {
+ /* No permission check is needed for user channel
+ * since that gets enforced when binding the socket.
+ *
+ * However check that the packet type is valid.
+ */
+ if (bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
+ bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
+ bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
+ err = -EINVAL;
+ goto drop;
+ }
+
+ skb_queue_tail(&hdev->raw_q, skb);
+ queue_work(hdev->workqueue, &hdev->tx_work);
+ } else if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
u16 opcode = get_unaligned_le16(skb->data);
u16 ogf = hci_opcode_ogf(opcode);
u16 ocf = hci_opcode_ocf(opcode);
@@ -972,14 +986,6 @@ static int hci_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
goto drop;
}
- if (hci_pi(sk)->channel == HCI_CHANNEL_USER &&
- bt_cb(skb)->pkt_type != HCI_COMMAND_PKT &&
- bt_cb(skb)->pkt_type != HCI_ACLDATA_PKT &&
- bt_cb(skb)->pkt_type != HCI_SCODATA_PKT) {
- err = -EINVAL;
- goto drop;
- }
-
skb_queue_tail(&hdev->raw_q, skb);
queue_work(hdev->workqueue, &hdev->tx_work);
}
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 4c214b2..ef66365 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -1998,7 +1998,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val)
u32 old;
struct net_bridge_mdb_htable *mdb;
- spin_lock(&br->multicast_lock);
+ spin_lock_bh(&br->multicast_lock);
if (!netif_running(br->dev))
goto unlock;
@@ -2030,7 +2030,7 @@ rollback:
}
unlock:
- spin_unlock(&br->multicast_lock);
+ spin_unlock_bh(&br->multicast_lock);
return err;
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 229d820..045d56e 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -426,6 +426,16 @@ netdev_features_t br_features_recompute(struct net_bridge *br,
int br_handle_frame_finish(struct sk_buff *skb);
rx_handler_result_t br_handle_frame(struct sk_buff **pskb);
+static inline bool br_rx_handler_check_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler) == br_handle_frame;
+}
+
+static inline struct net_bridge_port *br_port_get_check_rcu(const struct net_device *dev)
+{
+ return br_rx_handler_check_rcu(dev) ? br_port_get_rcu(dev) : NULL;
+}
+
/* br_ioctl.c */
int br_dev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
int br_ioctl_deviceless_stub(struct net *net, unsigned int cmd,
diff --git a/net/bridge/br_stp_bpdu.c b/net/bridge/br_stp_bpdu.c
index 8660ea3..bdb459d 100644
--- a/net/bridge/br_stp_bpdu.c
+++ b/net/bridge/br_stp_bpdu.c
@@ -153,7 +153,7 @@ void br_stp_rcv(const struct stp_proto *proto, struct sk_buff *skb,
if (buf[0] != 0 || buf[1] != 0 || buf[2] != 0)
goto err;
- p = br_port_get_rcu(dev);
+ p = br_port_get_check_rcu(dev);
if (!p)
goto err;
diff --git a/net/core/dev.c b/net/core/dev.c
index ba3b7ea..4fc1722 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4500,7 +4500,7 @@ struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
{
struct netdev_adjacent *upper;
- WARN_ON_ONCE(!rcu_read_lock_held());
+ WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 9589718..e70301e 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -64,7 +64,6 @@ static struct genl_family net_drop_monitor_family = {
.hdrsize = 0,
.name = "NET_DM",
.version = 2,
- .maxattr = NET_DM_CMD_MAX,
};
static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data);
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index ca15f32..932c6d7 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1161,6 +1161,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
neigh->parms->reachable_time :
0)));
neigh->nud_state = new;
+ notify = 1;
}
if (lladdr != neigh->ha) {
@@ -1274,7 +1275,7 @@ int neigh_compat_output(struct neighbour *neigh, struct sk_buff *skb)
if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
skb->len) < 0 &&
- dev->header_ops->rebuild(skb))
+ dev_rebuild_header(skb))
return 0;
return dev_queue_xmit(skb);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 8f97199..3030978 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -386,8 +386,14 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
!vlan_hw_offload_capable(netif_skb_features(skb),
skb->vlan_proto)) {
skb = __vlan_put_tag(skb, skb->vlan_proto, vlan_tx_tag_get(skb));
- if (unlikely(!skb))
- break;
+ if (unlikely(!skb)) {
+ /* This is actually a packet drop, but we
+ * don't want the code at the end of this
+ * function to try and re-queue a NULL skb.
+ */
+ status = NETDEV_TX_OK;
+ goto unlock_txq;
+ }
skb->vlan_tci = 0;
}
@@ -395,6 +401,7 @@ void netpoll_send_skb_on_dev(struct netpoll *np, struct sk_buff *skb,
if (status == NETDEV_TX_OK)
txq_trans_update(txq);
}
+ unlock_txq:
__netif_tx_unlock(txq);
if (status == NETDEV_TX_OK)
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2718fed..06e72d3 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3584,6 +3584,7 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet)
skb->tstamp.tv64 = 0;
skb->pkt_type = PACKET_HOST;
skb->skb_iif = 0;
+ skb->local_df = 0;
skb_dst_drop(skb);
skb->mark = 0;
secpath_reset(skb);
diff --git a/net/core/sock.c b/net/core/sock.c
index ab20ed9..5393b4b 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -882,7 +882,7 @@ set_rcvbuf:
case SO_PEEK_OFF:
if (sock->ops->set_peek_off)
- sock->ops->set_peek_off(sk, val);
+ ret = sock->ops->set_peek_off(sk, val);
else
ret = -EOPNOTSUPP;
break;
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 4ac71ff..2b90a78 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -851,7 +851,6 @@ static int dccp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
diff --git a/net/dccp/probe.c b/net/dccp/probe.c
index 4c6bdf9..595ddf0 100644
--- a/net/dccp/probe.c
+++ b/net/dccp/probe.c
@@ -152,17 +152,6 @@ static const struct file_operations dccpprobe_fops = {
.llseek = noop_llseek,
};
-static __init int setup_jprobe(void)
-{
- int ret = register_jprobe(&dccp_send_probe);
-
- if (ret) {
- request_module("dccp");
- ret = register_jprobe(&dccp_send_probe);
- }
- return ret;
-}
-
static __init int dccpprobe_init(void)
{
int ret = -ENOMEM;
@@ -174,7 +163,13 @@ static __init int dccpprobe_init(void)
if (!proc_create(procname, S_IRUSR, init_net.proc_net, &dccpprobe_fops))
goto err0;
- ret = setup_jprobe();
+ ret = register_jprobe(&dccp_send_probe);
+ if (ret) {
+ ret = request_module("dccp");
+ if (!ret)
+ ret = register_jprobe(&dccp_send_probe);
+ }
+
if (ret)
goto err1;
diff --git a/net/ieee802154/6lowpan.c b/net/ieee802154/6lowpan.c
index 459e200..a2d2456 100644
--- a/net/ieee802154/6lowpan.c
+++ b/net/ieee802154/6lowpan.c
@@ -547,7 +547,7 @@ static int lowpan_header_create(struct sk_buff *skb,
hc06_ptr += 3;
} else {
/* compress nothing */
- memcpy(hc06_ptr, &hdr, 4);
+ memcpy(hc06_ptr, hdr, 4);
/* replace the top byte with new ECN | DSCP format */
*hc06_ptr = tmp;
hc06_ptr += 4;
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index 523be38..f2e1573 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -104,7 +104,10 @@ errout:
static bool fib4_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
{
struct fib_result *result = (struct fib_result *) arg->result;
- struct net_device *dev = result->fi->fib_dev;
+ struct net_device *dev = NULL;
+
+ if (result->fi)
+ dev = result->fi->fib_dev;
/* do not accept result if the route does
* not meet the required prefix length
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index e5d4361..2cd02f3 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -28,6 +28,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
netdev_features_t enc_features;
int ghl = GRE_HEADER_SECTION;
struct gre_base_hdr *greh;
+ u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
__be16 protocol = skb->protocol;
int tnl_hlen;
@@ -58,13 +59,13 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
} else
csum = false;
+ if (unlikely(!pskb_may_pull(skb, ghl)))
+ goto out;
+
/* setup inner skb. */
skb->protocol = greh->protocol;
skb->encapsulation = 0;
- if (unlikely(!pskb_may_pull(skb, ghl)))
- goto out;
-
__skb_pull(skb, ghl);
skb_reset_mac_header(skb);
skb_set_network_header(skb, skb_inner_network_offset(skb));
@@ -73,8 +74,10 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, enc_features);
- if (!segs || IS_ERR(segs))
+ if (!segs || IS_ERR(segs)) {
+ skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
goto out;
+ }
skb = segs;
tnl_hlen = skb_tnl_header_len(skb);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 56a964a..a0f52da 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -106,6 +106,10 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = inet->inet_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = inet->inet_rcv_saddr;
r->id.idiag_dst[0] = inet->inet_daddr;
@@ -240,12 +244,19 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw,
r->idiag_family = tw->tw_family;
r->idiag_retrans = 0;
+
r->id.idiag_if = tw->tw_bound_dev_if;
sock_diag_save_cookie(tw, r->id.idiag_cookie);
+
r->id.idiag_sport = tw->tw_sport;
r->id.idiag_dport = tw->tw_dport;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = tw->tw_rcv_saddr;
r->id.idiag_dst[0] = tw->tw_daddr;
+
r->idiag_state = tw->tw_substate;
r->idiag_timer = 3;
r->idiag_expires = jiffies_to_msecs(tmo);
@@ -726,8 +737,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk,
r->id.idiag_sport = inet->inet_sport;
r->id.idiag_dport = ireq->ir_rmt_port;
+
+ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
+ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
+
r->id.idiag_src[0] = ireq->ir_loc_addr;
r->id.idiag_dst[0] = ireq->ir_rmt_addr;
+
r->idiag_expires = jiffies_to_msecs(tmo);
r->idiag_rqueue = 0;
r->idiag_wqueue = 0;
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index d7aea4c..e560ef3 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -217,6 +217,7 @@ static int ipgre_rcv(struct sk_buff *skb, const struct tnl_ptk_info *tpi)
iph->saddr, iph->daddr, tpi->key);
if (tunnel) {
+ skb_pop_mac_header(skb);
ip_tunnel_rcv(tunnel, skb, tpi, log_ecn_error);
return PACKET_RCVD;
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 9124027..df18461 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -828,7 +828,7 @@ static int __ip_append_data(struct sock *sk,
if (cork->length + length > maxnonfragsize - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
- mtu-exthdrlen);
+ mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
@@ -1151,7 +1151,8 @@ ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
mtu : 0xFFFF;
if (cork->length + size > maxnonfragsize - fragheaderlen) {
- ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
+ ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
+ mtu - (opt ? opt->optlen : 0));
return -EMSGSIZE;
}
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index f13bd91..a313c3f 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -423,6 +423,7 @@ static void synproxy_tg4_destroy(const struct xt_tgdtor_param *par)
static struct xt_target synproxy_tg4_reg __read_mostly = {
.name = "SYNPROXY",
.family = NFPROTO_IPV4,
+ .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
.target = synproxy_tg4,
.targetsize = sizeof(struct xt_synproxy_info),
.checkentry = synproxy_tg4_check,
diff --git a/net/ipv4/netfilter/nft_reject_ipv4.c b/net/ipv4/netfilter/nft_reject_ipv4.c
index fff5ba1..4a5e94a 100644
--- a/net/ipv4/netfilter/nft_reject_ipv4.c
+++ b/net/ipv4/netfilter/nft_reject_ipv4.c
@@ -72,7 +72,7 @@ static int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
{
const struct nft_reject *priv = nft_expr_priv(expr);
- if (nla_put_be32(skb, NFTA_REJECT_TYPE, priv->type))
+ if (nla_put_be32(skb, NFTA_REJECT_TYPE, htonl(priv->type)))
goto nla_put_failure;
switch (priv->type) {
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 269a89e..f7e522c 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -6,13 +6,6 @@
#include <linux/memcontrol.h>
#include <linux/module.h>
-static void memcg_tcp_enter_memory_pressure(struct sock *sk)
-{
- if (sk->sk_cgrp->memory_pressure)
- sk->sk_cgrp->memory_pressure = 1;
-}
-EXPORT_SYMBOL(memcg_tcp_enter_memory_pressure);
-
int tcp_init_cgroup(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
{
/*
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 44f6a20..a7e4729 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -560,15 +560,11 @@ static inline struct sock *__udp4_lib_lookup_skb(struct sk_buff *skb,
__be16 sport, __be16 dport,
struct udp_table *udptable)
{
- struct sock *sk;
const struct iphdr *iph = ip_hdr(skb);
- if (unlikely(sk = skb_steal_sock(skb)))
- return sk;
- else
- return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
- iph->daddr, dport, inet_iif(skb),
- udptable);
+ return __udp4_lib_lookup(dev_net(skb_dst(skb)->dev), iph->saddr, sport,
+ iph->daddr, dport, inet_iif(skb),
+ udptable);
}
struct sock *udp4_lib_lookup(struct net *net, __be32 saddr, __be16 sport,
@@ -1603,12 +1599,16 @@ static void flush_stack(struct sock **stack, unsigned int count,
kfree_skb(skb1);
}
-static void udp_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
+/* For TCP sockets, sk_rx_dst is protected by socket lock
+ * For UDP, we use xchg() to guard against concurrent changes.
+ */
+static void udp_sk_rx_dst_set(struct sock *sk, struct dst_entry *dst)
{
- struct dst_entry *dst = skb_dst(skb);
+ struct dst_entry *old;
dst_hold(dst);
- sk->sk_rx_dst = dst;
+ old = xchg(&sk->sk_rx_dst, dst);
+ dst_release(old);
}
/*
@@ -1739,15 +1739,16 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
if (udp4_csum_init(skb, uh, proto))
goto csum_error;
- if (skb->sk) {
+ sk = skb_steal_sock(skb);
+ if (sk) {
+ struct dst_entry *dst = skb_dst(skb);
int ret;
- sk = skb->sk;
- if (unlikely(sk->sk_rx_dst == NULL))
- udp_sk_rx_dst_set(sk, skb);
+ if (unlikely(sk->sk_rx_dst != dst))
+ udp_sk_rx_dst_set(sk, dst);
ret = udp_queue_rcv_skb(sk, skb);
-
+ sock_put(sk);
/* a return value > 0 means to resubmit the input, but
* it wants the return to be -protocol, or 0
*/
@@ -1913,17 +1914,20 @@ static struct sock *__udp4_lib_demux_lookup(struct net *net,
void udp_v4_early_demux(struct sk_buff *skb)
{
- const struct iphdr *iph = ip_hdr(skb);
- const struct udphdr *uh = udp_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ const struct iphdr *iph;
+ const struct udphdr *uh;
struct sock *sk;
struct dst_entry *dst;
- struct net *net = dev_net(skb->dev);
int dif = skb->dev->ifindex;
/* validate the packet */
if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
return;
+ iph = ip_hdr(skb);
+ uh = udp_hdr(skb);
+
if (skb->pkt_type == PACKET_BROADCAST ||
skb->pkt_type == PACKET_MULTICAST)
sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
@@ -2474,6 +2478,7 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features)
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
+ u16 mac_offset = skb->mac_header;
int mac_len = skb->mac_len;
int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
__be16 protocol = skb->protocol;
@@ -2493,8 +2498,11 @@ struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
/* segment inner packet. */
enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
segs = skb_mac_gso_segment(skb, enc_features);
- if (!segs || IS_ERR(segs))
+ if (!segs || IS_ERR(segs)) {
+ skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+ mac_len);
goto out;
+ }
outer_hlen = skb_tnl_header_len(skb);
skb = segs;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 83206de..79c62bd 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -41,6 +41,14 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
{
struct sk_buff *segs = ERR_PTR(-EINVAL);
unsigned int mss;
+ int offset;
+ __wsum csum;
+
+ if (skb->encapsulation &&
+ skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) {
+ segs = skb_udp_tunnel_segment(skb, features);
+ goto out;
+ }
mss = skb_shinfo(skb)->gso_size;
if (unlikely(skb->len <= mss))
@@ -63,27 +71,20 @@ static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
goto out;
}
+ /* Do software UFO. Complete and fill in the UDP checksum as
+ * HW cannot do checksum of UDP packets sent as multiple
+ * IP fragments.
+ */
+ offset = skb_checksum_start_offset(skb);
+ csum = skb_checksum(skb, offset, skb->len - offset, 0);
+ offset += skb->csum_offset;
+ *(__sum16 *)(skb->data + offset) = csum_fold(csum);
+ skb->ip_summed = CHECKSUM_NONE;
+
/* Fragment the skb. IP headers of the fragments are updated in
* inet_gso_segment()
*/
- if (skb->encapsulation && skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL)
- segs = skb_udp_tunnel_segment(skb, features);
- else {
- int offset;
- __wsum csum;
-
- /* Do software UFO. Complete and fill in the UDP checksum as
- * HW cannot do checksum of UDP packets sent as multiple
- * IP fragments.
- */
- offset = skb_checksum_start_offset(skb);
- csum = skb_checksum(skb, offset, skb->len - offset, 0);
- offset += skb->csum_offset;
- *(__sum16 *)(skb->data + offset) = csum_fold(csum);
- skb->ip_summed = CHECKSUM_NONE;
-
- segs = skb_segment(skb, features);
- }
+ segs = skb_segment(skb, features);
out:
return segs;
}
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 12c97d8..f62c72b 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1671,7 +1671,7 @@ void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- if (ifp->prefix_len == 127) /* RFC 6164 */
+ if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
@@ -1682,7 +1682,7 @@ static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
{
struct in6_addr addr;
- if (ifp->prefix_len == 127) /* RFC 6164 */
+ if (ifp->prefix_len >= 127) /* RFC 6164 */
return;
ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
if (ipv6_addr_any(&addr))
@@ -2613,7 +2613,7 @@ static void init_loopback(struct net_device *dev)
if (sp_ifa->rt)
continue;
- sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0);
+ sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
/* Failure cases are ignored */
if (!IS_ERR(sp_rt)) {
@@ -3456,7 +3456,12 @@ restart:
&inet6_addr_lst[i], addr_lst) {
unsigned long age;
- if (ifp->flags & IFA_F_PERMANENT)
+ /* When setting preferred_lft to a value not zero or
+ * infinity, while valid_lft is infinity
+ * IFA_F_PERMANENT has a non-infinity life time.
+ */
+ if ((ifp->flags & IFA_F_PERMANENT) &&
+ (ifp->prefered_lft == INFINITY_LIFE_TIME))
continue;
spin_lock(&ifp->lock);
@@ -3481,7 +3486,8 @@ restart:
ifp->flags |= IFA_F_DEPRECATED;
}
- if (time_before(ifp->tstamp + ifp->valid_lft * HZ, next))
+ if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
+ (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
next = ifp->tstamp + ifp->valid_lft * HZ;
spin_unlock(&ifp->lock);
@@ -3761,7 +3767,8 @@ static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
ifa->idev->dev->ifindex);
- if (!(ifa->flags&IFA_F_PERMANENT)) {
+ if (!((ifa->flags&IFA_F_PERMANENT) &&
+ (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
preferred = ifa->prefered_lft;
valid = ifa->valid_lft;
if (preferred != INFINITY_LIFE_TIME) {
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 8dfe1f4..93b1aa3 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -73,7 +73,6 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- usin->sin6_addr = flowlabel->dst;
}
}
diff --git a/net/ipv6/fib6_rules.c b/net/ipv6/fib6_rules.c
index e275916..3fd0a57 100644
--- a/net/ipv6/fib6_rules.c
+++ b/net/ipv6/fib6_rules.c
@@ -122,7 +122,11 @@ out:
static bool fib6_rule_suppress(struct fib_rule *rule, struct fib_lookup_arg *arg)
{
struct rt6_info *rt = (struct rt6_info *) arg->result;
- struct net_device *dev = rt->rt6i_idev->dev;
+ struct net_device *dev = NULL;
+
+ if (rt->rt6i_idev)
+ dev = rt->rt6i_idev->dev;
+
/* do not accept result if the route does
* not meet the required prefix length
*/
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 4acdb63..e6f9319 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1193,11 +1193,35 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
(opt ? opt->opt_nflen : 0);
- maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);
+ maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
+ sizeof(struct frag_hdr);
if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
- if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
- ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
+ unsigned int maxnonfragsize, headersize;
+
+ headersize = sizeof(struct ipv6hdr) +
+ (opt ? opt->tot_len : 0) +
+ (dst_allfrag(&rt->dst) ?
+ sizeof(struct frag_hdr) : 0) +
+ rt->rt6i_nfheader_len;
+
+ maxnonfragsize = (np->pmtudisc >= IPV6_PMTUDISC_DO) ?
+ mtu : sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
+
+ /* dontfrag active */
+ if ((cork->length + length > mtu - headersize) && dontfrag &&
+ (sk->sk_protocol == IPPROTO_UDP ||
+ sk->sk_protocol == IPPROTO_RAW)) {
+ ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
+ sizeof(struct ipv6hdr));
+ goto emsgsize;
+ }
+
+ if (cork->length + length > maxnonfragsize - headersize) {
+emsgsize:
+ ipv6_local_error(sk, EMSGSIZE, fl6,
+ mtu - headersize +
+ sizeof(struct ipv6hdr));
return -EMSGSIZE;
}
}
@@ -1222,12 +1246,6 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
* --yoshfuji
*/
- if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP ||
- sk->sk_protocol == IPPROTO_RAW)) {
- ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
- return -EMSGSIZE;
- }
-
skb = skb_peek_tail(&sk->sk_write_queue);
cork->length += length;
if (((length > mtu) ||
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index d606232..7881965 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -103,16 +103,25 @@ struct ip6_tnl_net {
static struct net_device_stats *ip6_get_stats(struct net_device *dev)
{
- struct pcpu_tstats sum = { 0 };
+ struct pcpu_tstats tmp, sum = { 0 };
int i;
for_each_possible_cpu(i) {
+ unsigned int start;
const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
+ do {
+ start = u64_stats_fetch_begin_bh(&tstats->syncp);
+ tmp.rx_packets = tstats->rx_packets;
+ tmp.rx_bytes = tstats->rx_bytes;
+ tmp.tx_packets = tstats->tx_packets;
+ tmp.tx_bytes = tstats->tx_bytes;
+ } while (u64_stats_fetch_retry_bh(&tstats->syncp, start));
+
+ sum.rx_packets += tmp.rx_packets;
+ sum.rx_bytes += tmp.rx_bytes;
+ sum.tx_packets += tmp.tx_packets;
+ sum.tx_bytes += tmp.tx_bytes;
}
dev->stats.rx_packets = sum.rx_packets;
dev->stats.rx_bytes = sum.rx_bytes;
@@ -824,8 +833,10 @@ static int ip6_tnl_rcv(struct sk_buff *skb, __u16 protocol,
}
tstats = this_cpu_ptr(t->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index ed94ba6..a4564b0 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -75,26 +75,6 @@ struct vti6_net {
struct ip6_tnl __rcu **tnls[2];
};
-static struct net_device_stats *vti6_get_stats(struct net_device *dev)
-{
- struct pcpu_tstats sum = { 0 };
- int i;
-
- for_each_possible_cpu(i) {
- const struct pcpu_tstats *tstats = per_cpu_ptr(dev->tstats, i);
-
- sum.rx_packets += tstats->rx_packets;
- sum.rx_bytes += tstats->rx_bytes;
- sum.tx_packets += tstats->tx_packets;
- sum.tx_bytes += tstats->tx_bytes;
- }
- dev->stats.rx_packets = sum.rx_packets;
- dev->stats.rx_bytes = sum.rx_bytes;
- dev->stats.tx_packets = sum.tx_packets;
- dev->stats.tx_bytes = sum.tx_bytes;
- return &dev->stats;
-}
-
#define for_each_vti6_tunnel_rcu(start) \
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
@@ -331,8 +311,10 @@ static int vti6_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(t->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
skb->mark = 0;
secpath_reset(skb);
@@ -716,7 +698,7 @@ static const struct net_device_ops vti6_netdev_ops = {
.ndo_start_xmit = vti6_tnl_xmit,
.ndo_do_ioctl = vti6_ioctl,
.ndo_change_mtu = vti6_change_mtu,
- .ndo_get_stats = vti6_get_stats,
+ .ndo_get_stats64 = ip_tunnel_get_stats64,
};
/**
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 3512177..3008651 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1277,6 +1277,9 @@ skip_linkparms:
ri->prefix_len == 0)
continue;
#endif
+ if (ri->prefix_len == 0 &&
+ !in6_dev->cnf.accept_ra_defrtr)
+ continue;
if (ri->prefix_len > in6_dev->cnf.accept_ra_rt_info_max_plen)
continue;
rt6_route_rcv(skb->dev, (u8*)p, (p->nd_opt_len) << 3,
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index f78f41a..a0d1727 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -446,6 +446,7 @@ static void synproxy_tg6_destroy(const struct xt_tgdtor_param *par)
static struct xt_target synproxy_tg6_reg __read_mostly = {
.name = "SYNPROXY",
.family = NFPROTO_IPV6,
+ .hooks = (1 << NF_INET_LOCAL_IN) | (1 << NF_INET_FORWARD),
.target = synproxy_tg6,
.targetsize = sizeof(struct xt_synproxy_info),
.checkentry = synproxy_tg6_check,
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 7fb4e14..b6bb87e 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -792,7 +792,6 @@ static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- daddr = &flowlabel->dst;
}
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7faa9d5..4b4944c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -84,6 +84,8 @@ static int ip6_dst_gc(struct dst_ops *ops);
static int ip6_pkt_discard(struct sk_buff *skb);
static int ip6_pkt_discard_out(struct sk_buff *skb);
+static int ip6_pkt_prohibit(struct sk_buff *skb);
+static int ip6_pkt_prohibit_out(struct sk_buff *skb);
static void ip6_link_failure(struct sk_buff *skb);
static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb, u32 mtu);
@@ -234,9 +236,6 @@ static const struct rt6_info ip6_null_entry_template = {
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-static int ip6_pkt_prohibit(struct sk_buff *skb);
-static int ip6_pkt_prohibit_out(struct sk_buff *skb);
-
static const struct rt6_info ip6_prohibit_entry_template = {
.dst = {
.__refcnt = ATOMIC_INIT(1),
@@ -1565,21 +1564,24 @@ int ip6_route_add(struct fib6_config *cfg)
goto out;
}
}
- rt->dst.output = ip6_pkt_discard_out;
- rt->dst.input = ip6_pkt_discard;
rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
switch (cfg->fc_type) {
case RTN_BLACKHOLE:
rt->dst.error = -EINVAL;
+ rt->dst.output = dst_discard;
+ rt->dst.input = dst_discard;
break;
case RTN_PROHIBIT:
rt->dst.error = -EACCES;
+ rt->dst.output = ip6_pkt_prohibit_out;
+ rt->dst.input = ip6_pkt_prohibit;
break;
case RTN_THROW:
- rt->dst.error = -EAGAIN;
- break;
default:
- rt->dst.error = -ENETUNREACH;
+ rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
+ : -ENETUNREACH;
+ rt->dst.output = ip6_pkt_discard_out;
+ rt->dst.input = ip6_pkt_discard;
break;
}
goto install_route;
@@ -1903,9 +1905,7 @@ static struct rt6_info *ip6_rt_copy(struct rt6_info *ort,
else
rt->rt6i_gateway = *dest;
rt->rt6i_flags = ort->rt6i_flags;
- if ((ort->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF)) ==
- (RTF_DEFAULT | RTF_ADDRCONF))
- rt6_set_from(rt, ort);
+ rt6_set_from(rt, ort);
rt->rt6i_metric = 0;
#ifdef CONFIG_IPV6_SUBTREES
@@ -2144,8 +2144,6 @@ static int ip6_pkt_discard_out(struct sk_buff *skb)
return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
}
-#ifdef CONFIG_IPV6_MULTIPLE_TABLES
-
static int ip6_pkt_prohibit(struct sk_buff *skb)
{
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
@@ -2157,8 +2155,6 @@ static int ip6_pkt_prohibit_out(struct sk_buff *skb)
return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
}
-#endif
-
/*
* Allocate a dst for local (unicast / anycast) address.
*/
@@ -2168,12 +2164,10 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
bool anycast)
{
struct net *net = dev_net(idev->dev);
- struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev, 0, NULL);
-
- if (!rt) {
- net_warn_ratelimited("Maximum number of routes reached, consider increasing route/max_size\n");
+ struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
+ DST_NOCOUNT, NULL);
+ if (!rt)
return ERR_PTR(-ENOMEM);
- }
in6_dev_hold(idev);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 366fbba..d3005b3 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -702,8 +702,10 @@ static int ipip6_rcv(struct sk_buff *skb)
}
tstats = this_cpu_ptr(tunnel->dev->tstats);
+ u64_stats_update_begin(&tstats->syncp);
tstats->rx_packets++;
tstats->rx_bytes += skb->len;
+ u64_stats_update_end(&tstats->syncp);
netif_rx(skb);
@@ -924,7 +926,7 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
if (tunnel->parms.iph.daddr && skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len > mtu) {
+ if (skb->len > mtu && !skb_is_gso(skb)) {
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
ip_rt_put(rt);
goto tx_error;
@@ -966,8 +968,10 @@ static netdev_tx_t ipip6_tunnel_xmit(struct sk_buff *skb,
tos = INET_ECN_encapsulate(tos, ipv6_get_dsfield(iph6));
skb = iptunnel_handle_offloads(skb, false, SKB_GSO_SIT);
- if (IS_ERR(skb))
+ if (IS_ERR(skb)) {
+ ip_rt_put(rt);
goto out;
+ }
err = iptunnel_xmit(rt, skb, fl4.saddr, fl4.daddr, IPPROTO_IPV6, tos,
ttl, df, !net_eq(tunnel->net, dev_net(dev)));
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 0740f93..f67033b 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -156,7 +156,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- usin->sin6_addr = flowlabel->dst;
fl6_sock_release(flowlabel);
}
}
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bcd5699..089c741 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1140,7 +1140,6 @@ do_udp_sendmsg:
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- daddr = &flowlabel->dst;
}
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index d9b437e..bb6e206 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -528,7 +528,6 @@ static int l2tp_ip6_sendmsg(struct kiocb *iocb, struct sock *sk,
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
- daddr = &flowlabel->dst;
}
}
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 7b01b9f..c71b699 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
unsigned long cpu_flags;
size_t copied = 0;
u32 peek_seq = 0;
- u32 *seq;
+ u32 *seq, skb_len;
unsigned long used;
int target; /* Read at least this many bytes */
long timeo;
@@ -812,6 +812,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
continue;
found_ok_skb:
+ skb_len = skb->len;
/* Ok so how much can we use? */
used = skb->len - offset;
if (len < used)
@@ -844,7 +845,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,
}
/* Partial read */
- if (used + offset < skb->len)
+ if (used + offset < skb_len)
continue;
} while (len > 0);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 95667b0..364ce0c 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -1368,7 +1368,7 @@ static int sta_apply_parameters(struct ieee80211_local *local,
changed |=
ieee80211_mps_set_sta_local_pm(sta,
params->local_pm);
- ieee80211_bss_info_change_notify(sdata, changed);
+ ieee80211_mbss_info_change_notify(sdata, changed);
#endif
}
@@ -2488,8 +2488,7 @@ static int ieee80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
struct ieee80211_local *local = wdev_priv(dev->ieee80211_ptr);
- if (sdata->vif.type != NL80211_IFTYPE_STATION &&
- sdata->vif.type != NL80211_IFTYPE_MESH_POINT)
+ if (sdata->vif.type != NL80211_IFTYPE_STATION)
return -EOPNOTSUPP;
if (!(local->hw.flags & IEEE80211_HW_SUPPORTS_PS))
@@ -3120,9 +3119,17 @@ static int ieee80211_channel_switch(struct wiphy *wiphy, struct net_device *dev,
params->chandef.chan->band)
return -EINVAL;
+ ifmsh->chsw_init = true;
+ if (!ifmsh->pre_value)
+ ifmsh->pre_value = 1;
+ else
+ ifmsh->pre_value++;
+
err = ieee80211_mesh_csa_beacon(sdata, params, true);
- if (err < 0)
+ if (err < 0) {
+ ifmsh->chsw_init = false;
return err;
+ }
break;
#endif
default:
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 531be04..27a39de 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -823,6 +823,10 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (err)
return false;
+ /* channel switch is not supported, disconnect */
+ if (!(sdata->local->hw.wiphy->flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
+ goto disconnect;
+
params.count = csa_ie.count;
params.chandef = csa_ie.chandef;
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index 29dc505..4aea4e7 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1228,6 +1228,7 @@ struct ieee80211_csa_ie {
u8 mode;
u8 count;
u8 ttl;
+ u16 pre_value;
};
/* Parsed Information Elements */
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ff101ea..36c3a4c 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1325,7 +1325,6 @@ static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata,
sdata->vif.bss_conf.bssid = NULL;
break;
case NL80211_IFTYPE_AP_VLAN:
- break;
case NL80211_IFTYPE_P2P_DEVICE:
sdata->vif.bss_conf.bssid = sdata->vif.addr;
break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 21d5d44..7d1c3ac 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -940,6 +940,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
result);
+ local->hw.conf.flags = IEEE80211_CONF_IDLE;
+
ieee80211_led_init(local);
rtnl_lock();
@@ -1047,6 +1049,7 @@ void ieee80211_unregister_hw(struct ieee80211_hw *hw)
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
+ flush_work(&local->sched_scan_stopped_work);
ieee80211_clear_tx_pending(local);
rate_control_deinitialize(local);
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 896fe3b..ba10525 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -943,14 +943,19 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
params.chandef.chan->center_freq);
params.block_tx = csa_ie.mode & WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT;
- if (beacon)
+ if (beacon) {
ifmsh->chsw_ttl = csa_ie.ttl - 1;
- else
- ifmsh->chsw_ttl = 0;
+ if (ifmsh->pre_value >= csa_ie.pre_value)
+ return false;
+ ifmsh->pre_value = csa_ie.pre_value;
+ }
- if (ifmsh->chsw_ttl > 0)
+ if (ifmsh->chsw_ttl < ifmsh->mshcfg.dot11MeshTTL) {
if (ieee80211_mesh_csa_beacon(sdata, &params, false) < 0)
return false;
+ } else {
+ return false;
+ }
sdata->csa_radar_required = params.radar_required;
@@ -1163,7 +1168,6 @@ static int mesh_fwd_csa_frame(struct ieee80211_sub_if_data *sdata,
offset_ttl = (len < 42) ? 7 : 10;
*(pos + offset_ttl) -= 1;
*(pos + offset_ttl + 1) &= ~WLAN_EID_CHAN_SWITCH_PARAM_INITIATOR;
- sdata->u.mesh.chsw_ttl = *(pos + offset_ttl);
memcpy(mgmt_fwd, mgmt, len);
eth_broadcast_addr(mgmt_fwd->da);
@@ -1182,7 +1186,7 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
u16 pre_value;
bool fwd_csa = true;
size_t baselen;
- u8 *pos, ttl;
+ u8 *pos;
if (mgmt->u.action.u.measurement.action_code !=
WLAN_ACTION_SPCT_CHL_SWITCH)
@@ -1193,8 +1197,8 @@ static void mesh_rx_csa_frame(struct ieee80211_sub_if_data *sdata,
u.action.u.chan_switch.variable);
ieee802_11_parse_elems(pos, len - baselen, false, &elems);
- ttl = elems.mesh_chansw_params_ie->mesh_ttl;
- if (!--ttl)
+ ifmsh->chsw_ttl = elems.mesh_chansw_params_ie->mesh_ttl;
+ if (!--ifmsh->chsw_ttl)
fwd_csa = false;
pre_value = le16_to_cpu(elems.mesh_chansw_params_ie->mesh_pre_value);
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index d7504ab..b3a3ce3 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1910,6 +1910,8 @@ static void ieee80211_mgd_probe_ap(struct ieee80211_sub_if_data *sdata,
if (ifmgd->flags & IEEE80211_STA_CONNECTION_POLL)
already = true;
+ ifmgd->flags |= IEEE80211_STA_CONNECTION_POLL;
+
mutex_unlock(&sdata->local->mtx);
if (already)
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 5d60779..4096ff6 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -226,7 +226,7 @@ minstrel_ht_calc_tp(struct minstrel_ht_sta *mi, int group, int rate)
nsecs = 1000 * mi->overhead / MINSTREL_TRUNC(mi->avg_ampdu_len);
nsecs += minstrel_mcs_groups[group].duration[rate];
- tp = 1000000 * ((mr->probability * 1000) / nsecs);
+ tp = 1000000 * ((prob * 1000) / nsecs);
mr->cur_tp = MINSTREL_TRUNC(tp);
}
@@ -277,13 +277,15 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
if (!(mg->supported & BIT(i)))
continue;
+ index = MCS_GROUP_RATES * group + i;
+
/* initialize rates selections starting indexes */
if (!mg_rates_valid) {
mg->max_tp_rate = mg->max_tp_rate2 =
mg->max_prob_rate = i;
if (!mi_rates_valid) {
mi->max_tp_rate = mi->max_tp_rate2 =
- mi->max_prob_rate = i;
+ mi->max_prob_rate = index;
mi_rates_valid = true;
}
mg_rates_valid = true;
@@ -291,7 +293,6 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
mr = &mg->rates[i];
mr->retry_updated = false;
- index = MCS_GROUP_RATES * group + i;
minstrel_calc_rate_ewma(mr);
minstrel_ht_calc_tp(mi, group, i);
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index caecef8..2b0debb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -911,7 +911,8 @@ static void ieee80211_rx_reorder_ampdu(struct ieee80211_rx_data *rx,
u16 sc;
u8 tid, ack_policy;
- if (!ieee80211_is_data_qos(hdr->frame_control))
+ if (!ieee80211_is_data_qos(hdr->frame_control) ||
+ is_multicast_ether_addr(hdr->addr1))
goto dont_reorder;
/*
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 5ad66a8..bcc4833 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -1088,6 +1088,6 @@ void ieee80211_sched_scan_stopped(struct ieee80211_hw *hw)
trace_api_sched_scan_stopped(local);
- ieee80211_queue_work(&local->hw, &local->sched_scan_stopped_work);
+ schedule_work(&local->sched_scan_stopped_work);
}
EXPORT_SYMBOL(ieee80211_sched_scan_stopped);
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index a40da20..6ab0090 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -78,6 +78,8 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
if (elems->mesh_chansw_params_ie) {
csa_ie->ttl = elems->mesh_chansw_params_ie->mesh_ttl;
csa_ie->mode = elems->mesh_chansw_params_ie->mesh_flags;
+ csa_ie->pre_value = le16_to_cpu(
+ elems->mesh_chansw_params_ie->mesh_pre_value);
}
new_freq = ieee80211_channel_to_frequency(new_chan_no, new_band);
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 592a181..9f9b9bd 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -2278,17 +2278,15 @@ void ieee80211_dfs_radar_detected_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, radar_detected_work);
- struct cfg80211_chan_def chandef;
+ struct cfg80211_chan_def chandef = local->hw.conf.chandef;
ieee80211_dfs_cac_cancel(local);
if (local->use_chanctx)
/* currently not handled */
WARN_ON(1);
- else {
- chandef = local->hw.conf.chandef;
+ else
cfg80211_radar_event(local->hw.wiphy, &chandef, GFP_KERNEL);
- }
}
void ieee80211_radar_detected(struct ieee80211_hw *hw)
@@ -2459,14 +2457,9 @@ int ieee80211_send_action_csa(struct ieee80211_sub_if_data *sdata,
WLAN_EID_CHAN_SWITCH_PARAM_TX_RESTRICT : 0x00;
put_unaligned_le16(WLAN_REASON_MESH_CHAN, pos); /* Reason Cd */
pos += 2;
- if (!ifmsh->pre_value)
- ifmsh->pre_value = 1;
- else
- ifmsh->pre_value++;
pre_value = cpu_to_le16(ifmsh->pre_value);
memcpy(pos, &pre_value, 2); /* Precedence Value */
pos += 2;
- ifmsh->chsw_init = true;
}
ieee80211_tx_skb(sdata, skb);
diff --git a/net/netfilter/ipset/ip_set_hash_netnet.c b/net/netfilter/ipset/ip_set_hash_netnet.c
index 2bc2dec..6226803 100644
--- a/net/netfilter/ipset/ip_set_hash_netnet.c
+++ b/net/netfilter/ipset/ip_set_hash_netnet.c
@@ -59,7 +59,7 @@ hash_netnet4_data_equal(const struct hash_netnet4_elem *ip1,
u32 *multi)
{
return ip1->ipcmp == ip2->ipcmp &&
- ip2->ccmp == ip2->ccmp;
+ ip1->ccmp == ip2->ccmp;
}
static inline int
diff --git a/net/netfilter/ipvs/ip_vs_nfct.c b/net/netfilter/ipvs/ip_vs_nfct.c
index c8beafd..5a355a4 100644
--- a/net/netfilter/ipvs/ip_vs_nfct.c
+++ b/net/netfilter/ipvs/ip_vs_nfct.c
@@ -63,6 +63,7 @@
#include <net/ip_vs.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_expect.h>
+#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -97,6 +98,11 @@ ip_vs_update_conntrack(struct sk_buff *skb, struct ip_vs_conn *cp, int outin)
if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
return;
+ /* Applications may adjust TCP seqs */
+ if (cp->app && nf_ct_protonum(ct) == IPPROTO_TCP &&
+ !nfct_seqadj(ct) && !nfct_seqadj_ext_add(ct))
+ return;
+
/*
* The connection is not yet in the hashtable, so we update it.
* CIP->VIP will remain the same, so leave the tuple in
diff --git a/net/netfilter/nf_conntrack_seqadj.c b/net/netfilter/nf_conntrack_seqadj.c
index 17c1bcb..b2d38da 100644
--- a/net/netfilter/nf_conntrack_seqadj.c
+++ b/net/netfilter/nf_conntrack_seqadj.c
@@ -36,6 +36,11 @@ int nf_ct_seqadj_set(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
if (off == 0)
return 0;
+ if (unlikely(!seqadj)) {
+ WARN(1, "Wrong seqadj usage, missing nfct_seqadj_ext_add()\n");
+ return 0;
+ }
+
set_bit(IPS_SEQ_ADJUST_BIT, &ct->status);
spin_lock_bh(&ct->lock);
diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
index 902fb0a..7a394df 100644
--- a/net/netfilter/nf_conntrack_timestamp.c
+++ b/net/netfilter/nf_conntrack_timestamp.c
@@ -97,7 +97,6 @@ int nf_conntrack_tstamp_pernet_init(struct net *net)
void nf_conntrack_tstamp_pernet_fini(struct net *net)
{
nf_conntrack_tstamp_fini_sysctl(net);
- nf_ct_extend_unregister(&tstamp_extend);
}
int nf_conntrack_tstamp_init(void)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index dcddc49..71a9f49 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -312,6 +312,9 @@ static int nf_tables_table_enable(struct nft_table *table)
int err, i = 0;
list_for_each_entry(chain, &table->chains, list) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ continue;
+
err = nf_register_hook(&nft_base_chain(chain)->ops);
if (err < 0)
goto err;
@@ -321,6 +324,9 @@ static int nf_tables_table_enable(struct nft_table *table)
return 0;
err:
list_for_each_entry(chain, &table->chains, list) {
+ if (!(chain->flags & NFT_BASE_CHAIN))
+ continue;
+
if (i-- <= 0)
break;
@@ -333,8 +339,10 @@ static int nf_tables_table_disable(struct nft_table *table)
{
struct nft_chain *chain;
- list_for_each_entry(chain, &table->chains, list)
- nf_unregister_hook(&nft_base_chain(chain)->ops);
+ list_for_each_entry(chain, &table->chains, list) {
+ if (chain->flags & NFT_BASE_CHAIN)
+ nf_unregister_hook(&nft_base_chain(chain)->ops);
+ }
return 0;
}
@@ -1717,6 +1725,19 @@ nf_tables_delrule_one(struct nft_ctx *ctx, struct nft_rule *rule)
return -ENOENT;
}
+static int nf_table_delrule_by_chain(struct nft_ctx *ctx)
+{
+ struct nft_rule *rule;
+ int err;
+
+ list_for_each_entry(rule, &ctx->chain->rules, list) {
+ err = nf_tables_delrule_one(ctx, rule);
+ if (err < 0)
+ return err;
+ }
+ return 0;
+}
+
static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
const struct nlmsghdr *nlh,
const struct nlattr * const nla[])
@@ -1725,8 +1746,8 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
const struct nft_af_info *afi;
struct net *net = sock_net(skb->sk);
const struct nft_table *table;
- struct nft_chain *chain;
- struct nft_rule *rule, *tmp;
+ struct nft_chain *chain = NULL;
+ struct nft_rule *rule;
int family = nfmsg->nfgen_family, err = 0;
struct nft_ctx ctx;
@@ -1738,22 +1759,29 @@ static int nf_tables_delrule(struct sock *nlsk, struct sk_buff *skb,
if (IS_ERR(table))
return PTR_ERR(table);
- chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
- if (IS_ERR(chain))
- return PTR_ERR(chain);
+ if (nla[NFTA_RULE_CHAIN]) {
+ chain = nf_tables_chain_lookup(table, nla[NFTA_RULE_CHAIN]);
+ if (IS_ERR(chain))
+ return PTR_ERR(chain);
+ }
nft_ctx_init(&ctx, skb, nlh, afi, table, chain, nla);
- if (nla[NFTA_RULE_HANDLE]) {
- rule = nf_tables_rule_lookup(chain, nla[NFTA_RULE_HANDLE]);
- if (IS_ERR(rule))
- return PTR_ERR(rule);
+ if (chain) {
+ if (nla[NFTA_RULE_HANDLE]) {
+ rule = nf_tables_rule_lookup(chain,
+ nla[NFTA_RULE_HANDLE]);
+ if (IS_ERR(rule))
+ return PTR_ERR(rule);
- err = nf_tables_delrule_one(&ctx, rule);
- } else {
- /* Remove all rules in this chain */
- list_for_each_entry_safe(rule, tmp, &chain->rules, list) {
err = nf_tables_delrule_one(&ctx, rule);
+ } else {
+ err = nf_table_delrule_by_chain(&ctx);
+ }
+ } else {
+ list_for_each_entry(chain, &table->chains, list) {
+ ctx.chain = chain;
+ err = nf_table_delrule_by_chain(&ctx);
if (err < 0)
break;
}
@@ -2078,17 +2106,21 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
struct netlink_callback *cb)
{
const struct nft_set *set;
- unsigned int idx = 0, s_idx = cb->args[0];
+ unsigned int idx, s_idx = cb->args[0];
struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
if (cb->args[1])
return skb->len;
list_for_each_entry(table, &ctx->afi->tables, list) {
- if (cur_table && cur_table != table)
- continue;
+ if (cur_table) {
+ if (cur_table != table)
+ continue;
+ cur_table = NULL;
+ }
ctx->table = table;
+ idx = 0;
list_for_each_entry(set, &ctx->table->sets, list) {
if (idx < s_idx)
goto cont;
@@ -2350,7 +2382,9 @@ static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
enum nft_registers dreg;
dreg = nft_type_to_reg(set->dtype);
- return nft_validate_data_load(ctx, dreg, &elem->data, set->dtype);
+ return nft_validate_data_load(ctx, dreg, &elem->data,
+ set->dtype == NFT_DATA_VERDICT ?
+ NFT_DATA_VERDICT : NFT_DATA_VALUE);
}
int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 3c4b69e..a155d19 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -1053,6 +1053,7 @@ static void __net_exit nfnl_log_net_exit(struct net *net)
#ifdef CONFIG_PROC_FS
remove_proc_entry("nfnetlink_log", net->nf.proc_netfilter);
#endif
+ nf_log_unset(net, &nfulnl_logger);
}
static struct pernet_operations nfnl_log_net_ops = {
diff --git a/net/netfilter/nft_exthdr.c b/net/netfilter/nft_exthdr.c
index 8e0bb75..55c939f 100644
--- a/net/netfilter/nft_exthdr.c
+++ b/net/netfilter/nft_exthdr.c
@@ -31,7 +31,7 @@ static void nft_exthdr_eval(const struct nft_expr *expr,
{
struct nft_exthdr *priv = nft_expr_priv(expr);
struct nft_data *dest = &data[priv->dreg];
- unsigned int offset;
+ unsigned int offset = 0;
int err;
err = ipv6_find_hdr(pkt->skb, &offset, priv->type, NULL, NULL);
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9ff035c..a3910fc 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -325,21 +325,24 @@ static void htable_gc(unsigned long htlong)
add_timer(&ht->timer);
}
-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
{
struct hashlimit_net *hashlimit_net = hashlimit_pernet(hinfo->net);
struct proc_dir_entry *parent;
- del_timer_sync(&hinfo->timer);
-
if (hinfo->family == NFPROTO_IPV4)
parent = hashlimit_net->ipt_hashlimit;
else
parent = hashlimit_net->ip6t_hashlimit;
- if(parent != NULL)
+ if (parent != NULL)
remove_proc_entry(hinfo->name, parent);
+}
+static void htable_destroy(struct xt_hashlimit_htable *hinfo)
+{
+ del_timer_sync(&hinfo->timer);
+ htable_remove_proc_entry(hinfo);
htable_selective_cleanup(hinfo, select_all);
kfree(hinfo->name);
vfree(hinfo);
@@ -883,21 +886,15 @@ static int __net_init hashlimit_proc_net_init(struct net *net)
static void __net_exit hashlimit_proc_net_exit(struct net *net)
{
struct xt_hashlimit_htable *hinfo;
- struct proc_dir_entry *pde;
struct hashlimit_net *hashlimit_net = hashlimit_pernet(net);
- /* recent_net_exit() is called before recent_mt_destroy(). Make sure
- * that the parent xt_recent proc entry is is empty before trying to
- * remove it.
+ /* hashlimit_net_exit() is called before hashlimit_mt_destroy().
+ * Make sure that the parent ipt_hashlimit and ip6t_hashlimit proc
+ * entries is empty before trying to remove it.
*/
mutex_lock(&hashlimit_mutex);
- pde = hashlimit_net->ipt_hashlimit;
- if (pde == NULL)
- pde = hashlimit_net->ip6t_hashlimit;
-
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node)
- remove_proc_entry(hinfo->name, pde);
-
+ htable_remove_proc_entry(hinfo);
hashlimit_net->ipt_hashlimit = NULL;
hashlimit_net->ip6t_hashlimit = NULL;
mutex_unlock(&hashlimit_mutex);
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ba2548b..88cfbc1 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -237,6 +237,30 @@ struct packet_skb_cb {
static void __fanout_unlink(struct sock *sk, struct packet_sock *po);
static void __fanout_link(struct sock *sk, struct packet_sock *po);
+static struct net_device *packet_cached_dev_get(struct packet_sock *po)
+{
+ struct net_device *dev;
+
+ rcu_read_lock();
+ dev = rcu_dereference(po->cached_dev);
+ if (likely(dev))
+ dev_hold(dev);
+ rcu_read_unlock();
+
+ return dev;
+}
+
+static void packet_cached_dev_assign(struct packet_sock *po,
+ struct net_device *dev)
+{
+ rcu_assign_pointer(po->cached_dev, dev);
+}
+
+static void packet_cached_dev_reset(struct packet_sock *po)
+{
+ RCU_INIT_POINTER(po->cached_dev, NULL);
+}
+
/* register_prot_hook must be invoked with the po->bind_lock held,
* or from a context in which asynchronous accesses to the packet
* socket is not possible (packet_create()).
@@ -246,12 +270,10 @@ static void register_prot_hook(struct sock *sk)
struct packet_sock *po = pkt_sk(sk);
if (!po->running) {
- if (po->fanout) {
+ if (po->fanout)
__fanout_link(sk, po);
- } else {
+ else
dev_add_pack(&po->prot_hook);
- rcu_assign_pointer(po->cached_dev, po->prot_hook.dev);
- }
sock_hold(sk);
po->running = 1;
@@ -270,12 +292,11 @@ static void __unregister_prot_hook(struct sock *sk, bool sync)
struct packet_sock *po = pkt_sk(sk);
po->running = 0;
- if (po->fanout) {
+
+ if (po->fanout)
__fanout_unlink(sk, po);
- } else {
+ else
__dev_remove_pack(&po->prot_hook);
- RCU_INIT_POINTER(po->cached_dev, NULL);
- }
__sock_put(sk);
@@ -2059,19 +2080,6 @@ static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
return tp_len;
}
-static struct net_device *packet_cached_dev_get(struct packet_sock *po)
-{
- struct net_device *dev;
-
- rcu_read_lock();
- dev = rcu_dereference(po->cached_dev);
- if (dev)
- dev_hold(dev);
- rcu_read_unlock();
-
- return dev;
-}
-
static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
{
struct sk_buff *skb;
@@ -2088,7 +2096,7 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
mutex_lock(&po->pg_vec_lock);
- if (saddr == NULL) {
+ if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
@@ -2242,7 +2250,7 @@ static int packet_snd(struct socket *sock,
* Get and verify the address.
*/
- if (saddr == NULL) {
+ if (likely(saddr == NULL)) {
dev = packet_cached_dev_get(po);
proto = po->num;
addr = NULL;
@@ -2451,6 +2459,8 @@ static int packet_release(struct socket *sock)
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, false);
+ packet_cached_dev_reset(po);
+
if (po->prot_hook.dev) {
dev_put(po->prot_hook.dev);
po->prot_hook.dev = NULL;
@@ -2506,14 +2516,17 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 protoc
spin_lock(&po->bind_lock);
unregister_prot_hook(sk, true);
+
po->num = protocol;
po->prot_hook.type = protocol;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
- po->prot_hook.dev = dev;
+ po->prot_hook.dev = dev;
po->ifindex = dev ? dev->ifindex : 0;
+ packet_cached_dev_assign(po, dev);
+
if (protocol == 0)
goto out_unlock;
@@ -2626,7 +2639,8 @@ static int packet_create(struct net *net, struct socket *sock, int protocol,
po = pkt_sk(sk);
sk->sk_family = PF_PACKET;
po->num = proto;
- RCU_INIT_POINTER(po->cached_dev, NULL);
+
+ packet_cached_dev_reset(po);
sk->sk_destruct = packet_sock_destruct;
sk_refcnt_debug_inc(sk);
@@ -3337,6 +3351,7 @@ static int packet_notifier(struct notifier_block *this,
sk->sk_error_report(sk);
}
if (msg == NETDEV_UNREGISTER) {
+ packet_cached_dev_reset(po);
po->ifindex = -1;
if (po->prot_hook.dev)
dev_put(po->prot_hook.dev);
diff --git a/net/rds/ib.c b/net/rds/ib.c
index b4c8b00..ba2dffe 100644
--- a/net/rds/ib.c
+++ b/net/rds/ib.c
@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr)
ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin);
/* due to this, we will claim to support iWARP devices unless we
check node_type. */
- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA)
+ if (ret || !cm_id->device ||
+ cm_id->device->node_type != RDMA_NODE_IB_CA)
ret = -EADDRNOTAVAIL;
rdsdebug("addr %pI4 ret %d node type %d\n",
diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c
index e590949..37be6e2 100644
--- a/net/rds/ib_send.c
+++ b/net/rds/ib_send.c
@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
&& rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
scat = &rm->data.op_sg[sg];
- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off);
- return ret;
+ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length);
+ return sizeof(struct rds_header) + ret;
}
/* FIXME we may overallocate here */
diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
index 33af772..62ced65 100644
--- a/net/rose/af_rose.c
+++ b/net/rose/af_rose.c
@@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
if (msg->msg_name) {
struct sockaddr_rose *srose;
+ struct full_sockaddr_rose *full_srose = msg->msg_name;
memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose));
srose = msg->msg_name;
@@ -1260,18 +1261,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,
srose->srose_addr = rose->dest_addr;
srose->srose_call = rose->dest_call;
srose->srose_ndigis = rose->dest_ndigis;
- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) {
- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name;
- for (n = 0 ; n < rose->dest_ndigis ; n++)
- full_srose->srose_digis[n] = rose->dest_digis[n];
- msg->msg_namelen = sizeof(struct full_sockaddr_rose);
- } else {
- if (rose->dest_ndigis >= 1) {
- srose->srose_ndigis = 1;
- srose->srose_digi = rose->dest_digis[0];
- }
- msg->msg_namelen = sizeof(struct sockaddr_rose);
- }
+ for (n = 0 ; n < rose->dest_ndigis ; n++)
+ full_srose->srose_digis[n] = rose->dest_digis[n];
+ msg->msg_namelen = sizeof(struct full_sockaddr_rose);
}
skb_free_datagram(sk, skb);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index fd70728..69cb848 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -270,6 +270,16 @@ int tcf_register_action(struct tc_action_ops *act)
{
struct tc_action_ops *a, **ap;
+ /* Must supply act, dump, cleanup and init */
+ if (!act->act || !act->dump || !act->cleanup || !act->init)
+ return -EINVAL;
+
+ /* Supply defaults */
+ if (!act->lookup)
+ act->lookup = tcf_hash_search;
+ if (!act->walk)
+ act->walk = tcf_generic_walker;
+
write_lock(&act_mod_lock);
for (ap = &act_base; (a = *ap) != NULL; ap = &a->next) {
if (act->type == a->type || (strcmp(act->kind, a->kind) == 0)) {
@@ -381,7 +391,7 @@ int tcf_action_exec(struct sk_buff *skb, const struct tc_action *act,
}
while ((a = act) != NULL) {
repeat:
- if (a->ops && a->ops->act) {
+ if (a->ops) {
ret = a->ops->act(skb, a, res);
if (TC_MUNGED & skb->tc_verd) {
/* copied already, allow trampling */
@@ -405,7 +415,7 @@ void tcf_action_destroy(struct tc_action *act, int bind)
struct tc_action *a;
for (a = act; a; a = act) {
- if (a->ops && a->ops->cleanup) {
+ if (a->ops) {
if (a->ops->cleanup(a, bind) == ACT_P_DELETED)
module_put(a->ops->owner);
act = act->next;
@@ -424,7 +434,7 @@ tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
{
int err = -EINVAL;
- if (a->ops == NULL || a->ops->dump == NULL)
+ if (a->ops == NULL)
return err;
return a->ops->dump(skb, a, bind, ref);
}
@@ -436,7 +446,7 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
unsigned char *b = skb_tail_pointer(skb);
struct nlattr *nest;
- if (a->ops == NULL || a->ops->dump == NULL)
+ if (a->ops == NULL)
return err;
if (nla_put_string(skb, TCA_KIND, a->ops->kind))
@@ -723,8 +733,6 @@ tcf_action_get_1(struct nlattr *nla, struct nlmsghdr *n, u32 portid)
a->ops = tc_lookup_action(tb[TCA_ACT_KIND]);
if (a->ops == NULL)
goto err_free;
- if (a->ops->lookup == NULL)
- goto err_mod;
err = -ENOENT;
if (a->ops->lookup(a, index) == 0)
goto err_mod;
@@ -1084,12 +1092,6 @@ tc_dump_action(struct sk_buff *skb, struct netlink_callback *cb)
memset(&a, 0, sizeof(struct tc_action));
a.ops = a_o;
- if (a_o->walk == NULL) {
- WARN(1, "tc_dump_action: %s !capable of dumping table\n",
- a_o->kind);
- goto out_module_put;
- }
-
nlh = nlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq,
cb->nlh->nlmsg_type, sizeof(*t), 0);
if (!nlh)
diff --git a/net/sched/act_csum.c b/net/sched/act_csum.c
index 3a4c0ca..11fe1a4 100644
--- a/net/sched/act_csum.c
+++ b/net/sched/act_csum.c
@@ -77,16 +77,16 @@ static int tcf_csum_init(struct net *n, struct nlattr *nla, struct nlattr *est,
&csum_idx_gen, &csum_hash_info);
if (IS_ERR(pc))
return PTR_ERR(pc);
- p = to_tcf_csum(pc);
ret = ACT_P_CREATED;
} else {
- p = to_tcf_csum(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &csum_hash_info);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_hash_release(pc, bind, &csum_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
+ p = to_tcf_csum(pc);
spin_lock_bh(&p->tcf_lock);
p->tcf_action = parm->action;
p->update_flags = parm->update_flags;
@@ -585,9 +585,7 @@ static struct tc_action_ops act_csum_ops = {
.act = tcf_csum,
.dump = tcf_csum_dump,
.cleanup = tcf_csum_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_csum_init,
- .walk = tcf_generic_walker
};
MODULE_DESCRIPTION("Checksum updating actions");
diff --git a/net/sched/act_gact.c b/net/sched/act_gact.c
index fd2b3cf..eb9ba60 100644
--- a/net/sched/act_gact.c
+++ b/net/sched/act_gact.c
@@ -102,10 +102,11 @@ static int tcf_gact_init(struct net *net, struct nlattr *nla,
return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
- if (!ovr) {
- tcf_hash_release(pc, bind, &gact_hash_info);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_hash_release(pc, bind, &gact_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
gact = to_gact(pc);
@@ -206,9 +207,7 @@ static struct tc_action_ops act_gact_ops = {
.act = tcf_gact,
.dump = tcf_gact_dump,
.cleanup = tcf_gact_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_gact_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 60d88b6..dcbfe8c 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -141,10 +141,12 @@ static int tcf_ipt_init(struct net *net, struct nlattr *nla, struct nlattr *est,
return PTR_ERR(pc);
ret = ACT_P_CREATED;
} else {
- if (!ovr) {
- tcf_ipt_release(to_ipt(pc), bind);
+ if (bind)/* dont override defaults */
+ return 0;
+ tcf_ipt_release(to_ipt(pc), bind);
+
+ if (!ovr)
return -EEXIST;
- }
}
ipt = to_ipt(pc);
@@ -298,9 +300,7 @@ static struct tc_action_ops act_ipt_ops = {
.act = tcf_ipt,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_ipt_init,
- .walk = tcf_generic_walker
};
static struct tc_action_ops act_xt_ops = {
@@ -312,9 +312,7 @@ static struct tc_action_ops act_xt_ops = {
.act = tcf_ipt,
.dump = tcf_ipt_dump,
.cleanup = tcf_ipt_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_ipt_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-13)");
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index 977c10e..2523781 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -271,9 +271,7 @@ static struct tc_action_ops act_mirred_ops = {
.act = tcf_mirred,
.dump = tcf_mirred_dump,
.cleanup = tcf_mirred_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_mirred_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002)");
diff --git a/net/sched/act_nat.c b/net/sched/act_nat.c
index 876f0ef..7686953 100644
--- a/net/sched/act_nat.c
+++ b/net/sched/act_nat.c
@@ -70,15 +70,15 @@ static int tcf_nat_init(struct net *net, struct nlattr *nla, struct nlattr *est,
&nat_idx_gen, &nat_hash_info);
if (IS_ERR(pc))
return PTR_ERR(pc);
- p = to_tcf_nat(pc);
ret = ACT_P_CREATED;
} else {
- p = to_tcf_nat(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &nat_hash_info);
+ if (bind)
+ return 0;
+ tcf_hash_release(pc, bind, &nat_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
+ p = to_tcf_nat(pc);
spin_lock_bh(&p->tcf_lock);
p->old_addr = parm->old_addr;
@@ -308,9 +308,7 @@ static struct tc_action_ops act_nat_ops = {
.act = tcf_nat,
.dump = tcf_nat_dump,
.cleanup = tcf_nat_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_nat_init,
- .walk = tcf_generic_walker
};
MODULE_DESCRIPTION("Stateless NAT actions");
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 7ed78c9..7aa2dcd 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -84,10 +84,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
p = to_pedit(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &pedit_hash_info);
+ tcf_hash_release(pc, bind, &pedit_hash_info);
+ if (bind)
+ return 0;
+ if (!ovr)
return -EEXIST;
- }
+
if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
keys = kmalloc(ksize, GFP_KERNEL);
if (keys == NULL)
@@ -243,9 +245,7 @@ static struct tc_action_ops act_pedit_ops = {
.act = tcf_pedit,
.dump = tcf_pedit_dump,
.cleanup = tcf_pedit_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_pedit_init,
- .walk = tcf_generic_walker
};
MODULE_AUTHOR("Jamal Hadi Salim(2002-4)");
diff --git a/net/sched/act_police.c b/net/sched/act_police.c
index 272d8e9..ef246d8 100644
--- a/net/sched/act_police.c
+++ b/net/sched/act_police.c
@@ -177,10 +177,12 @@ static int tcf_act_police_locate(struct net *net, struct nlattr *nla,
if (bind) {
police->tcf_bindcnt += 1;
police->tcf_refcnt += 1;
+ return 0;
}
if (ovr)
goto override;
- return ret;
+ /* not replacing */
+ return -EEXIST;
}
}
@@ -407,7 +409,6 @@ static struct tc_action_ops act_police_ops = {
.act = tcf_act_police,
.dump = tcf_act_police_dump,
.cleanup = tcf_act_police_cleanup,
- .lookup = tcf_hash_search,
.init = tcf_act_police_locate,
.walk = tcf_act_police_walker
};
diff --git a/net/sched/act_simple.c b/net/sched/act_simple.c
index 7725eb4..f7b45ab 100644
--- a/net/sched/act_simple.c
+++ b/net/sched/act_simple.c
@@ -142,10 +142,13 @@ static int tcf_simp_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
d = to_defact(pc);
- if (!ovr) {
- tcf_simp_release(d, bind);
+
+ if (bind)
+ return 0;
+ tcf_simp_release(d, bind);
+ if (!ovr)
return -EEXIST;
- }
+
reset_policy(d, defdata, parm);
}
@@ -201,7 +204,6 @@ static struct tc_action_ops act_simp_ops = {
.dump = tcf_simp_dump,
.cleanup = tcf_simp_cleanup,
.init = tcf_simp_init,
- .walk = tcf_generic_walker,
};
MODULE_AUTHOR("Jamal Hadi Salim(2005)");
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index cb42211..8fe9d25 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -120,10 +120,11 @@ static int tcf_skbedit_init(struct net *net, struct nlattr *nla,
ret = ACT_P_CREATED;
} else {
d = to_skbedit(pc);
- if (!ovr) {
- tcf_hash_release(pc, bind, &skbedit_hash_info);
+ if (bind)
+ return 0;
+ tcf_hash_release(pc, bind, &skbedit_hash_info);
+ if (!ovr)
return -EEXIST;
- }
}
spin_lock_bh(&d->tcf_lock);
@@ -203,7 +204,6 @@ static struct tc_action_ops act_skbedit_ops = {
.dump = tcf_skbedit_dump,
.cleanup = tcf_skbedit_cleanup,
.init = tcf_skbedit_init,
- .walk = tcf_generic_walker,
};
MODULE_AUTHOR("Alexander Duyck, <alexander.h.duyck@intel.com>");
diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
index 0e1e38b..717b210 100644
--- a/net/sched/sch_htb.c
+++ b/net/sched/sch_htb.c
@@ -1477,11 +1477,22 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
sch_tree_lock(sch);
}
+ rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
+
+ ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
+
+ psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
+ psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
+
/* it used to be a nasty bug here, we have to check that node
* is really leaf before changing cl->un.leaf !
*/
if (!cl->level) {
- cl->quantum = hopt->rate.rate / q->rate2quantum;
+ u64 quantum = cl->rate.rate_bytes_ps;
+
+ do_div(quantum, q->rate2quantum);
+ cl->quantum = min_t(u64, quantum, INT_MAX);
+
if (!hopt->quantum && cl->quantum < 1000) {
pr_warning(
"HTB: quantum of class %X is small. Consider r2q change.\n",
@@ -1500,13 +1511,6 @@ static int htb_change_class(struct Qdisc *sch, u32 classid,
cl->prio = TC_HTB_NUMPRIO - 1;
}
- rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
-
- ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
-
- psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
- psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
-
cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
index a609005..887e672 100644
--- a/net/sched/sch_tbf.c
+++ b/net/sched/sch_tbf.c
@@ -118,6 +118,32 @@ struct tbf_sched_data {
};
+/* Time to Length, convert time in ns to length in bytes
+ * to determinate how many bytes can be sent in given time.
+ */
+static u64 psched_ns_t2l(const struct psched_ratecfg *r,
+ u64 time_in_ns)
+{
+ /* The formula is :
+ * len = (time_in_ns * r->rate_bytes_ps) / NSEC_PER_SEC
+ */
+ u64 len = time_in_ns * r->rate_bytes_ps;
+
+ do_div(len, NSEC_PER_SEC);
+
+ if (unlikely(r->linklayer == TC_LINKLAYER_ATM)) {
+ do_div(len, 53);
+ len = len * 48;
+ }
+
+ if (len > r->overhead)
+ len -= r->overhead;
+ else
+ len = 0;
+
+ return len;
+}
+
/*
* Return length of individual segments of a gso packet,
* including all headers (MAC, IP, TCP/UDP)
@@ -289,10 +315,11 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
struct tbf_sched_data *q = qdisc_priv(sch);
struct nlattr *tb[TCA_TBF_MAX + 1];
struct tc_tbf_qopt *qopt;
- struct qdisc_rate_table *rtab = NULL;
- struct qdisc_rate_table *ptab = NULL;
struct Qdisc *child = NULL;
- int max_size, n;
+ struct psched_ratecfg rate;
+ struct psched_ratecfg peak;
+ u64 max_size;
+ s64 buffer, mtu;
u64 rate64 = 0, prate64 = 0;
err = nla_parse_nested(tb, TCA_TBF_MAX, opt, tbf_policy);
@@ -304,38 +331,13 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
goto done;
qopt = nla_data(tb[TCA_TBF_PARMS]);
- rtab = qdisc_get_rtab(&qopt->rate, tb[TCA_TBF_RTAB]);
- if (rtab == NULL)
- goto done;
-
- if (qopt->peakrate.rate) {
- if (qopt->peakrate.rate > qopt->rate.rate)
- ptab = qdisc_get_rtab(&qopt->peakrate, tb[TCA_TBF_PTAB]);
- if (ptab == NULL)
- goto done;
- }
-
- for (n = 0; n < 256; n++)
- if (rtab->data[n] > qopt->buffer)
- break;
- max_size = (n << qopt->rate.cell_log) - 1;
- if (ptab) {
- int size;
-
- for (n = 0; n < 256; n++)
- if (ptab->data[n] > qopt->mtu)
- break;
- size = (n << qopt->peakrate.cell_log) - 1;
- if (size < max_size)
- max_size = size;
- }
- if (max_size < 0)
- goto done;
+ if (qopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
+ qdisc_put_rtab(qdisc_get_rtab(&qopt->rate,
+ tb[TCA_TBF_RTAB]));
- if (max_size < psched_mtu(qdisc_dev(sch)))
- pr_warn_ratelimited("sch_tbf: burst %u is lower than device %s mtu (%u) !\n",
- max_size, qdisc_dev(sch)->name,
- psched_mtu(qdisc_dev(sch)));
+ if (qopt->peakrate.linklayer == TC_LINKLAYER_UNAWARE)
+ qdisc_put_rtab(qdisc_get_rtab(&qopt->peakrate,
+ tb[TCA_TBF_PTAB]));
if (q->qdisc != &noop_qdisc) {
err = fifo_set_limit(q->qdisc, qopt->limit);
@@ -349,6 +351,39 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
}
}
+ buffer = min_t(u64, PSCHED_TICKS2NS(qopt->buffer), ~0U);
+ mtu = min_t(u64, PSCHED_TICKS2NS(qopt->mtu), ~0U);
+
+ if (tb[TCA_TBF_RATE64])
+ rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
+ psched_ratecfg_precompute(&rate, &qopt->rate, rate64);
+
+ max_size = min_t(u64, psched_ns_t2l(&rate, buffer), ~0U);
+
+ if (qopt->peakrate.rate) {
+ if (tb[TCA_TBF_PRATE64])
+ prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
+ psched_ratecfg_precompute(&peak, &qopt->peakrate, prate64);
+ if (peak.rate_bytes_ps <= rate.rate_bytes_ps) {
+ pr_warn_ratelimited("sch_tbf: peakrate %llu is lower than or equals to rate %llu !\n",
+ peak.rate_bytes_ps, rate.rate_bytes_ps);
+ err = -EINVAL;
+ goto done;
+ }
+
+ max_size = min_t(u64, max_size, psched_ns_t2l(&peak, mtu));
+ }
+
+ if (max_size < psched_mtu(qdisc_dev(sch)))
+ pr_warn_ratelimited("sch_tbf: burst %llu is lower than device %s mtu (%u) !\n",
+ max_size, qdisc_dev(sch)->name,
+ psched_mtu(qdisc_dev(sch)));
+
+ if (!max_size) {
+ err = -EINVAL;
+ goto done;
+ }
+
sch_tree_lock(sch);
if (child) {
qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen);
@@ -362,13 +397,9 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
q->tokens = q->buffer;
q->ptokens = q->mtu;
- if (tb[TCA_TBF_RATE64])
- rate64 = nla_get_u64(tb[TCA_TBF_RATE64]);
- psched_ratecfg_precompute(&q->rate, &rtab->rate, rate64);
- if (ptab) {
- if (tb[TCA_TBF_PRATE64])
- prate64 = nla_get_u64(tb[TCA_TBF_PRATE64]);
- psched_ratecfg_precompute(&q->peak, &ptab->rate, prate64);
+ memcpy(&q->rate, &rate, sizeof(struct psched_ratecfg));
+ if (qopt->peakrate.rate) {
+ memcpy(&q->peak, &peak, sizeof(struct psched_ratecfg));
q->peak_present = true;
} else {
q->peak_present = false;
@@ -377,10 +408,6 @@ static int tbf_change(struct Qdisc *sch, struct nlattr *opt)
sch_tree_unlock(sch);
err = 0;
done:
- if (rtab)
- qdisc_put_rtab(rtab);
- if (ptab)
- qdisc_put_rtab(ptab);
return err;
}
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 68a27f9..31ed008 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -154,8 +154,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->timeouts[SCTP_EVENT_TIMEOUT_HEARTBEAT] = 0;
asoc->timeouts[SCTP_EVENT_TIMEOUT_SACK] = asoc->sackdelay;
- asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] =
- min_t(unsigned long, sp->autoclose, net->sctp.max_autoclose) * HZ;
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE] = sp->autoclose * HZ;
/* Initializes the timers */
for (i = SCTP_EVENT_TIMEOUT_NONE; i < SCTP_NUM_TIMEOUT_TYPES; ++i)
@@ -291,8 +290,6 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->peer.ipv6_address = 1;
INIT_LIST_HEAD(&asoc->asocs);
- asoc->autoclose = sp->autoclose;
-
asoc->default_stream = sp->default_stream;
asoc->default_ppid = sp->default_ppid;
asoc->default_flags = sp->default_flags;
diff --git a/net/sctp/output.c b/net/sctp/output.c
index 0e2644d..0fb140f 100644
--- a/net/sctp/output.c
+++ b/net/sctp/output.c
@@ -581,7 +581,8 @@ int sctp_packet_transmit(struct sctp_packet *packet)
unsigned long timeout;
/* Restart the AUTOCLOSE timer when sending data. */
- if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
+ if (sctp_state(asoc, ESTABLISHED) &&
+ asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index f51ba98..59268f6 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -208,8 +208,6 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
INIT_LIST_HEAD(&q->retransmit);
INIT_LIST_HEAD(&q->sacked);
INIT_LIST_HEAD(&q->abandoned);
-
- q->empty = 1;
}
/* Free the outqueue structure and any related pending chunks.
@@ -332,7 +330,6 @@ int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
SCTP_INC_STATS(net, SCTP_MIB_OUTUNORDERCHUNKS);
else
SCTP_INC_STATS(net, SCTP_MIB_OUTORDERCHUNKS);
- q->empty = 0;
break;
}
} else {
@@ -654,7 +651,6 @@ redo:
if (chunk->fast_retransmit == SCTP_NEED_FRTX)
chunk->fast_retransmit = SCTP_DONT_FRTX;
- q->empty = 0;
q->asoc->stats.rtxchunks++;
break;
}
@@ -1065,8 +1061,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
sctp_transport_reset_timers(transport);
- q->empty = 0;
-
/* Only let one DATA chunk get bundled with a
* COOKIE-ECHO chunk.
*/
@@ -1275,29 +1269,17 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
"advertised peer ack point:0x%x\n", __func__, asoc, ctsn,
asoc->adv_peer_ack_point);
- /* See if all chunks are acked.
- * Make sure the empty queue handler will get run later.
- */
- q->empty = (list_empty(&q->out_chunk_list) &&
- list_empty(&q->retransmit));
- if (!q->empty)
- goto finish;
-
- list_for_each_entry(transport, transport_list, transports) {
- q->empty = q->empty && list_empty(&transport->transmitted);
- if (!q->empty)
- goto finish;
- }
-
- pr_debug("%s: sack queue is empty\n", __func__);
-finish:
- return q->empty;
+ return sctp_outq_is_empty(q);
}
-/* Is the outqueue empty? */
+/* Is the outqueue empty?
+ * The queue is empty when we have not pending data, no in-flight data
+ * and nothing pending retransmissions.
+ */
int sctp_outq_is_empty(const struct sctp_outq *q)
{
- return q->empty;
+ return q->out_qlen == 0 && q->outstanding_bytes == 0 &&
+ list_empty(&q->retransmit);
}
/********************************************************************
diff --git a/net/sctp/probe.c b/net/sctp/probe.c
index 53c452e..5e68b94 100644
--- a/net/sctp/probe.c
+++ b/net/sctp/probe.c
@@ -38,6 +38,7 @@
#include <net/sctp/sctp.h>
#include <net/sctp/sm.h>
+MODULE_SOFTDEP("pre: sctp");
MODULE_AUTHOR("Wei Yongjun <yjwei@cn.fujitsu.com>");
MODULE_DESCRIPTION("SCTP snooper");
MODULE_LICENSE("GPL");
@@ -182,6 +183,20 @@ static struct jprobe sctp_recv_probe = {
.entry = jsctp_sf_eat_sack,
};
+static __init int sctp_setup_jprobe(void)
+{
+ int ret = register_jprobe(&sctp_recv_probe);
+
+ if (ret) {
+ if (request_module("sctp"))
+ goto out;
+ ret = register_jprobe(&sctp_recv_probe);
+ }
+
+out:
+ return ret;
+}
+
static __init int sctpprobe_init(void)
{
int ret = -ENOMEM;
@@ -202,7 +217,7 @@ static __init int sctpprobe_init(void)
&sctpprobe_fops))
goto free_kfifo;
- ret = register_jprobe(&sctp_recv_probe);
+ ret = sctp_setup_jprobe();
if (ret)
goto remove_proc;
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index dfe3f36..a26065b 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -820,7 +820,7 @@ sctp_disposition_t sctp_sf_do_5_1D_ce(struct net *net,
SCTP_INC_STATS(net, SCTP_MIB_PASSIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
- if (new_asoc->autoclose)
+ if (new_asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -908,7 +908,7 @@ sctp_disposition_t sctp_sf_do_5_1E_ca(struct net *net,
SCTP_INC_STATS(net, SCTP_MIB_CURRESTAB);
SCTP_INC_STATS(net, SCTP_MIB_ACTIVEESTABS);
sctp_add_cmd_sf(commands, SCTP_CMD_HB_TIMERS_START, SCTP_NULL());
- if (asoc->autoclose)
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_START,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -2970,7 +2970,7 @@ sctp_disposition_t sctp_sf_eat_data_6_2(struct net *net,
if (chunk->chunk_hdr->flags & SCTP_DATA_SACK_IMM)
force = SCTP_FORCE();
- if (asoc->autoclose) {
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
@@ -3878,7 +3878,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(struct net *net,
SCTP_CHUNK(chunk));
/* Count this as receiving DATA. */
- if (asoc->autoclose) {
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE]) {
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
}
@@ -5267,7 +5267,7 @@ sctp_disposition_t sctp_sf_do_9_2_start_shutdown(
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD));
- if (asoc->autoclose)
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
@@ -5346,7 +5346,7 @@ sctp_disposition_t sctp_sf_do_9_2_shutdown_ack(
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_RESTART,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
- if (asoc->autoclose)
+ if (asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE])
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_AUTOCLOSE));
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 72046b9..42b709c 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2196,6 +2196,7 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
unsigned int optlen)
{
struct sctp_sock *sp = sctp_sk(sk);
+ struct net *net = sock_net(sk);
/* Applicable to UDP-style socket only */
if (sctp_style(sk, TCP))
@@ -2205,6 +2206,9 @@ static int sctp_setsockopt_autoclose(struct sock *sk, char __user *optval,
if (copy_from_user(&sp->autoclose, optval, optlen))
return -EFAULT;
+ if (sp->autoclose > net->sctp.max_autoclose)
+ sp->autoclose = net->sctp.max_autoclose;
+
return 0;
}
@@ -2811,6 +2815,8 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
{
struct sctp_rtoinfo rtoinfo;
struct sctp_association *asoc;
+ unsigned long rto_min, rto_max;
+ struct sctp_sock *sp = sctp_sk(sk);
if (optlen != sizeof (struct sctp_rtoinfo))
return -EINVAL;
@@ -2824,26 +2830,36 @@ static int sctp_setsockopt_rtoinfo(struct sock *sk, char __user *optval, unsigne
if (!asoc && rtoinfo.srto_assoc_id && sctp_style(sk, UDP))
return -EINVAL;
+ rto_max = rtoinfo.srto_max;
+ rto_min = rtoinfo.srto_min;
+
+ if (rto_max)
+ rto_max = asoc ? msecs_to_jiffies(rto_max) : rto_max;
+ else
+ rto_max = asoc ? asoc->rto_max : sp->rtoinfo.srto_max;
+
+ if (rto_min)
+ rto_min = asoc ? msecs_to_jiffies(rto_min) : rto_min;
+ else
+ rto_min = asoc ? asoc->rto_min : sp->rtoinfo.srto_min;
+
+ if (rto_min > rto_max)
+ return -EINVAL;
+
if (asoc) {
if (rtoinfo.srto_initial != 0)
asoc->rto_initial =
msecs_to_jiffies(rtoinfo.srto_initial);
- if (rtoinfo.srto_max != 0)
- asoc->rto_max = msecs_to_jiffies(rtoinfo.srto_max);
- if (rtoinfo.srto_min != 0)
- asoc->rto_min = msecs_to_jiffies(rtoinfo.srto_min);
+ asoc->rto_max = rto_max;
+ asoc->rto_min = rto_min;
} else {
/* If there is no association or the association-id = 0
* set the values to the endpoint.
*/
- struct sctp_sock *sp = sctp_sk(sk);
-
if (rtoinfo.srto_initial != 0)
sp->rtoinfo.srto_initial = rtoinfo.srto_initial;
- if (rtoinfo.srto_max != 0)
- sp->rtoinfo.srto_max = rtoinfo.srto_max;
- if (rtoinfo.srto_min != 0)
- sp->rtoinfo.srto_min = rtoinfo.srto_min;
+ sp->rtoinfo.srto_max = rto_max;
+ sp->rtoinfo.srto_min = rto_min;
}
return 0;
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index 6b36561..b0565af 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -56,11 +56,16 @@ extern long sysctl_sctp_mem[3];
extern int sysctl_sctp_rmem[3];
extern int sysctl_sctp_wmem[3];
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
- int write,
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
-
loff_t *ppos);
+
static struct ctl_table sctp_table[] = {
{
.procname = "sctp_mem",
@@ -102,17 +107,17 @@ static struct ctl_table sctp_net_table[] = {
.data = &init_net.sctp.rto_min,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_sctp_do_rto_min,
.extra1 = &one,
- .extra2 = &timer_max
+ .extra2 = &init_net.sctp.rto_max
},
{
.procname = "rto_max",
.data = &init_net.sctp.rto_max,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &one,
+ .proc_handler = proc_sctp_do_rto_max,
+ .extra1 = &init_net.sctp.rto_min,
.extra2 = &timer_max
},
{
@@ -294,8 +299,7 @@ static struct ctl_table sctp_net_table[] = {
{ /* sentinel */ }
};
-static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
- int write,
+static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
@@ -342,6 +346,60 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl,
return ret;
}
+static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ int new_value;
+ struct ctl_table tbl;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+ int ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_min;
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write) {
+ if (ret || new_value > max || new_value < min)
+ return -EINVAL;
+ net->sctp.rto_min = new_value;
+ }
+ return ret;
+}
+
+static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ struct net *net = current->nsproxy->net_ns;
+ int new_value;
+ struct ctl_table tbl;
+ unsigned int min = *(unsigned int *) ctl->extra1;
+ unsigned int max = *(unsigned int *) ctl->extra2;
+ int ret;
+
+ memset(&tbl, 0, sizeof(struct ctl_table));
+ tbl.maxlen = sizeof(unsigned int);
+
+ if (write)
+ tbl.data = &new_value;
+ else
+ tbl.data = &net->sctp.rto_max;
+ ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
+ if (write) {
+ if (ret || new_value > max || new_value < min)
+ return -EINVAL;
+ net->sctp.rto_max = new_value;
+ }
+ return ret;
+}
+
int sctp_sysctl_net_register(struct net *net)
{
struct ctl_table *table;
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index e332efb..efc46ff 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -573,7 +573,7 @@ void sctp_transport_burst_limited(struct sctp_transport *t)
u32 old_cwnd = t->cwnd;
u32 max_burst_bytes;
- if (t->burst_limited)
+ if (t->burst_limited || asoc->max_burst == 0)
return;
max_burst_bytes = t->flight_size + (asoc->max_burst * asoc->pathmtu);
diff --git a/net/tipc/core.c b/net/tipc/core.c
index fd4eeea..c6d3f75 100644
--- a/net/tipc/core.c
+++ b/net/tipc/core.c
@@ -113,7 +113,6 @@ err:
static void tipc_core_stop(void)
{
tipc_netlink_stop();
- tipc_handler_stop();
tipc_cfg_stop();
tipc_subscr_stop();
tipc_nametbl_stop();
@@ -146,9 +145,10 @@ static int tipc_core_start(void)
res = tipc_subscr_start();
if (!res)
res = tipc_cfg_init();
- if (res)
+ if (res) {
+ tipc_handler_stop();
tipc_core_stop();
-
+ }
return res;
}
@@ -178,6 +178,7 @@ static int __init tipc_init(void)
static void __exit tipc_exit(void)
{
+ tipc_handler_stop();
tipc_core_stop_net();
tipc_core_stop();
pr_info("Deactivated\n");
diff --git a/net/tipc/handler.c b/net/tipc/handler.c
index b36f0fc..e4bc8a2 100644
--- a/net/tipc/handler.c
+++ b/net/tipc/handler.c
@@ -56,12 +56,13 @@ unsigned int tipc_k_signal(Handler routine, unsigned long argument)
{
struct queue_item *item;
+ spin_lock_bh(&qitem_lock);
if (!handler_enabled) {
pr_err("Signal request ignored by handler\n");
+ spin_unlock_bh(&qitem_lock);
return -ENOPROTOOPT;
}
- spin_lock_bh(&qitem_lock);
item = kmem_cache_alloc(tipc_queue_item_cache, GFP_ATOMIC);
if (!item) {
pr_err("Signal queue out of memory\n");
@@ -112,10 +113,14 @@ void tipc_handler_stop(void)
struct list_head *l, *n;
struct queue_item *item;
- if (!handler_enabled)
+ spin_lock_bh(&qitem_lock);
+ if (!handler_enabled) {
+ spin_unlock_bh(&qitem_lock);
return;
-
+ }
handler_enabled = 0;
+ spin_unlock_bh(&qitem_lock);
+
tasklet_kill(&tipc_tasklet);
spin_lock_bh(&qitem_lock);
diff --git a/net/tipc/port.c b/net/tipc/port.c
index c081a76..d43f318 100644
--- a/net/tipc/port.c
+++ b/net/tipc/port.c
@@ -251,18 +251,15 @@ struct tipc_port *tipc_createport(struct sock *sk,
return p_ptr;
}
-int tipc_deleteport(u32 ref)
+int tipc_deleteport(struct tipc_port *p_ptr)
{
- struct tipc_port *p_ptr;
struct sk_buff *buf = NULL;
- tipc_withdraw(ref, 0, NULL);
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
+ tipc_withdraw(p_ptr, 0, NULL);
- tipc_ref_discard(ref);
- tipc_port_unlock(p_ptr);
+ spin_lock_bh(p_ptr->lock);
+ tipc_ref_discard(p_ptr->ref);
+ spin_unlock_bh(p_ptr->lock);
k_cancel_timer(&p_ptr->timer);
if (p_ptr->connected) {
@@ -704,47 +701,36 @@ int tipc_set_portimportance(u32 ref, unsigned int imp)
}
-int tipc_publish(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct tipc_port *p_ptr;
struct publication *publ;
u32 key;
- int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
+ if (p_ptr->connected)
return -EINVAL;
+ key = p_ptr->ref + p_ptr->pub_count + 1;
+ if (key == p_ptr->ref)
+ return -EADDRINUSE;
- if (p_ptr->connected)
- goto exit;
- key = ref + p_ptr->pub_count + 1;
- if (key == ref) {
- res = -EADDRINUSE;
- goto exit;
- }
publ = tipc_nametbl_publish(seq->type, seq->lower, seq->upper,
scope, p_ptr->ref, key);
if (publ) {
list_add(&publ->pport_list, &p_ptr->publications);
p_ptr->pub_count++;
p_ptr->published = 1;
- res = 0;
+ return 0;
}
-exit:
- tipc_port_unlock(p_ptr);
- return res;
+ return -EINVAL;
}
-int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
+ struct tipc_name_seq const *seq)
{
- struct tipc_port *p_ptr;
struct publication *publ;
struct publication *tpubl;
int res = -EINVAL;
- p_ptr = tipc_port_lock(ref);
- if (!p_ptr)
- return -EINVAL;
if (!seq) {
list_for_each_entry_safe(publ, tpubl,
&p_ptr->publications, pport_list) {
@@ -771,7 +757,6 @@ int tipc_withdraw(u32 ref, unsigned int scope, struct tipc_name_seq const *seq)
}
if (list_empty(&p_ptr->publications))
p_ptr->published = 0;
- tipc_port_unlock(p_ptr);
return res;
}
diff --git a/net/tipc/port.h b/net/tipc/port.h
index 9122535..34f12bd 100644
--- a/net/tipc/port.h
+++ b/net/tipc/port.h
@@ -116,7 +116,7 @@ int tipc_reject_msg(struct sk_buff *buf, u32 err);
void tipc_acknowledge(u32 port_ref, u32 ack);
-int tipc_deleteport(u32 portref);
+int tipc_deleteport(struct tipc_port *p_ptr);
int tipc_portimportance(u32 portref, unsigned int *importance);
int tipc_set_portimportance(u32 portref, unsigned int importance);
@@ -127,9 +127,9 @@ int tipc_set_portunreliable(u32 portref, unsigned int isunreliable);
int tipc_portunreturnable(u32 portref, unsigned int *isunreturnable);
int tipc_set_portunreturnable(u32 portref, unsigned int isunreturnable);
-int tipc_publish(u32 portref, unsigned int scope,
+int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
-int tipc_withdraw(u32 portref, unsigned int scope,
+int tipc_withdraw(struct tipc_port *p_ptr, unsigned int scope,
struct tipc_name_seq const *name_seq);
int tipc_connect(u32 portref, struct tipc_portid const *port);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 3b61851..e741416 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -354,7 +354,7 @@ static int release(struct socket *sock)
* Delete TIPC port; this ensures no more messages are queued
* (also disconnects an active connection & sends a 'FIN-' to peer)
*/
- res = tipc_deleteport(tport->ref);
+ res = tipc_deleteport(tport);
/* Discard any remaining (connection-based) messages in receive queue */
__skb_queue_purge(&sk->sk_receive_queue);
@@ -386,30 +386,46 @@ static int release(struct socket *sock)
*/
static int bind(struct socket *sock, struct sockaddr *uaddr, int uaddr_len)
{
+ struct sock *sk = sock->sk;
struct sockaddr_tipc *addr = (struct sockaddr_tipc *)uaddr;
- u32 portref = tipc_sk_port(sock->sk)->ref;
+ struct tipc_port *tport = tipc_sk_port(sock->sk);
+ int res = -EINVAL;
- if (unlikely(!uaddr_len))
- return tipc_withdraw(portref, 0, NULL);
+ lock_sock(sk);
+ if (unlikely(!uaddr_len)) {
+ res = tipc_withdraw(tport, 0, NULL);
+ goto exit;
+ }
- if (uaddr_len < sizeof(struct sockaddr_tipc))
- return -EINVAL;
- if (addr->family != AF_TIPC)
- return -EAFNOSUPPORT;
+ if (uaddr_len < sizeof(struct sockaddr_tipc)) {
+ res = -EINVAL;
+ goto exit;
+ }
+ if (addr->family != AF_TIPC) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if (addr->addrtype == TIPC_ADDR_NAME)
addr->addr.nameseq.upper = addr->addr.nameseq.lower;
- else if (addr->addrtype != TIPC_ADDR_NAMESEQ)
- return -EAFNOSUPPORT;
+ else if (addr->addrtype != TIPC_ADDR_NAMESEQ) {
+ res = -EAFNOSUPPORT;
+ goto exit;
+ }
if ((addr->addr.nameseq.type < TIPC_RESERVED_TYPES) &&
(addr->addr.nameseq.type != TIPC_TOP_SRV) &&
- (addr->addr.nameseq.type != TIPC_CFG_SRV))
- return -EACCES;
+ (addr->addr.nameseq.type != TIPC_CFG_SRV)) {
+ res = -EACCES;
+ goto exit;
+ }
- return (addr->scope > 0) ?
- tipc_publish(portref, addr->scope, &addr->addr.nameseq) :
- tipc_withdraw(portref, -addr->scope, &addr->addr.nameseq);
+ res = (addr->scope > 0) ?
+ tipc_publish(tport, addr->scope, &addr->addr.nameseq) :
+ tipc_withdraw(tport, -addr->scope, &addr->addr.nameseq);
+exit:
+ release_sock(sk);
+ return res;
}
/**
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 01625ccc..a427623 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -530,13 +530,17 @@ static int unix_seqpacket_sendmsg(struct kiocb *, struct socket *,
static int unix_seqpacket_recvmsg(struct kiocb *, struct socket *,
struct msghdr *, size_t, int);
-static void unix_set_peek_off(struct sock *sk, int val)
+static int unix_set_peek_off(struct sock *sk, int val)
{
struct unix_sock *u = unix_sk(sk);
- mutex_lock(&u->readlock);
+ if (mutex_lock_interruptible(&u->readlock))
+ return -EINTR;
+
sk->sk_peek_off = val;
mutex_unlock(&u->readlock);
+
+ return 0;
}
@@ -714,7 +718,9 @@ static int unix_autobind(struct socket *sock)
int err;
unsigned int retries = 0;
- mutex_lock(&u->readlock);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ return err;
err = 0;
if (u->addr)
@@ -873,7 +879,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
- mutex_lock(&u->readlock);
+ err = mutex_lock_interruptible(&u->readlock);
+ if (err)
+ goto out;
err = -EINVAL;
if (u->addr)
diff --git a/net/wireless/core.c b/net/wireless/core.c
index aff959e..52b865f 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -451,6 +451,15 @@ int wiphy_register(struct wiphy *wiphy)
int i;
u16 ifmodes = wiphy->interface_modes;
+ /* support for 5/10 MHz is broken due to nl80211 API mess - disable */
+ wiphy->flags &= ~WIPHY_FLAG_SUPPORTS_5_10_MHZ;
+
+ /*
+ * There are major locking problems in nl80211/mac80211 for CSA,
+ * disable for all drivers until this has been reworked.
+ */
+ wiphy->flags &= ~WIPHY_FLAG_HAS_CHANNEL_SWITCH;
+
#ifdef CONFIG_PM
if (WARN_ON(wiphy->wowlan &&
(wiphy->wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) &&
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index 9d797df..89737ee 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -262,7 +262,7 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
/* try to find an IBSS channel if none requested ... */
if (!wdev->wext.ibss.chandef.chan) {
- wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
+ struct ieee80211_channel *new_chan = NULL;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband;
@@ -278,18 +278,19 @@ int cfg80211_ibss_wext_join(struct cfg80211_registered_device *rdev,
continue;
if (chan->flags & IEEE80211_CHAN_DISABLED)
continue;
- wdev->wext.ibss.chandef.chan = chan;
- wdev->wext.ibss.chandef.center_freq1 =
- chan->center_freq;
+ new_chan = chan;
break;
}
- if (wdev->wext.ibss.chandef.chan)
+ if (new_chan)
break;
}
- if (!wdev->wext.ibss.chandef.chan)
+ if (!new_chan)
return -EINVAL;
+
+ cfg80211_chandef_create(&wdev->wext.ibss.chandef, new_chan,
+ NL80211_CHAN_NO_HT);
}
/* don't join -- SSID is not there */
@@ -363,9 +364,8 @@ int cfg80211_ibss_wext_siwfreq(struct net_device *dev,
return err;
if (chan) {
- wdev->wext.ibss.chandef.chan = chan;
- wdev->wext.ibss.chandef.width = NL80211_CHAN_WIDTH_20_NOHT;
- wdev->wext.ibss.chandef.center_freq1 = freq;
+ cfg80211_chandef_create(&wdev->wext.ibss.chandef, chan,
+ NL80211_CHAN_NO_HT);
wdev->wext.ibss.channel_fixed = true;
} else {
/* cfg80211_ibss_wext_join will pick one if needed */
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index a1eb210..138dc3b 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2687,7 +2687,7 @@ static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info)
hdr = nl80211hdr_put(msg, info->snd_portid, info->snd_seq, 0,
NL80211_CMD_NEW_KEY);
if (!hdr)
- return -ENOBUFS;
+ goto nla_put_failure;
cookie.msg = msg;
cookie.idx = key_idx;
@@ -5349,6 +5349,10 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)
err = -EINVAL;
goto out_free;
}
+
+ if (!wiphy->bands[band])
+ continue;
+
err = ieee80211_get_ratemask(wiphy->bands[band],
nla_data(attr),
nla_len(attr),
@@ -9633,8 +9637,9 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
nla_put(msg, NL80211_ATTR_IE, req->ie_len, req->ie))
goto nla_put_failure;
- if (req->flags)
- nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags);
+ if (req->flags &&
+ nla_put_u32(msg, NL80211_ATTR_SCAN_FLAGS, req->flags))
+ goto nla_put_failure;
return 0;
nla_put_failure:
@@ -11093,6 +11098,8 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
struct nlattr *reasons;
reasons = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS);
+ if (!reasons)
+ goto free_msg;
if (wakeup->disconnect &&
nla_put_flag(msg, NL80211_WOWLAN_TRIG_DISCONNECT))
@@ -11118,16 +11125,18 @@ void cfg80211_report_wowlan_wakeup(struct wireless_dev *wdev,
wakeup->pattern_idx))
goto free_msg;
- if (wakeup->tcp_match)
- nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH);
+ if (wakeup->tcp_match &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_MATCH))
+ goto free_msg;
- if (wakeup->tcp_connlost)
- nla_put_flag(msg,
- NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST);
+ if (wakeup->tcp_connlost &&
+ nla_put_flag(msg, NL80211_WOWLAN_TRIG_WAKEUP_TCP_CONNLOST))
+ goto free_msg;
- if (wakeup->tcp_nomoretokens)
- nla_put_flag(msg,
- NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS);
+ if (wakeup->tcp_nomoretokens &&
+ nla_put_flag(msg,
+ NL80211_WOWLAN_TRIG_WAKEUP_TCP_NOMORETOKENS))
+ goto free_msg;
if (wakeup->packet) {
u32 pkt_attr = NL80211_WOWLAN_TRIG_WAKEUP_PKT_80211;
@@ -11263,24 +11272,29 @@ void cfg80211_ft_event(struct net_device *netdev,
return;
hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FT_EVENT);
- if (!hdr) {
- nlmsg_free(msg);
- return;
- }
+ if (!hdr)
+ goto out;
- nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx);
- nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex);
- nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap);
- if (ft_event->ies)
- nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies);
- if (ft_event->ric_ies)
- nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
- ft_event->ric_ies);
+ if (nla_put_u32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx) ||
+ nla_put_u32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex) ||
+ nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, ft_event->target_ap))
+ goto out;
+
+ if (ft_event->ies &&
+ nla_put(msg, NL80211_ATTR_IE, ft_event->ies_len, ft_event->ies))
+ goto out;
+ if (ft_event->ric_ies &&
+ nla_put(msg, NL80211_ATTR_IE_RIC, ft_event->ric_ies_len,
+ ft_event->ric_ies))
+ goto out;
genlmsg_end(msg, hdr);
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_MLME, GFP_KERNEL);
+ return;
+ out:
+ nlmsg_free(msg);
}
EXPORT_SYMBOL(cfg80211_ft_event);
diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c
index a271c27..722da61 100644
--- a/net/wireless/radiotap.c
+++ b/net/wireless/radiotap.c
@@ -124,6 +124,10 @@ int ieee80211_radiotap_iterator_init(
/* find payload start allowing for extended bitmap(s) */
if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) {
+ if ((unsigned long)iterator->_arg -
+ (unsigned long)iterator->_rtheader + sizeof(uint32_t) >
+ (unsigned long)iterator->_max_length)
+ return -EINVAL;
while (get_unaligned_le32(iterator->_arg) &
(1 << IEEE80211_RADIOTAP_EXT)) {
iterator->_arg += sizeof(uint32_t);
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 65f8008..d3c5bd7 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -632,6 +632,16 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
}
#endif
+ if (!bss && (status == WLAN_STATUS_SUCCESS)) {
+ WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
+ bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
+ wdev->ssid, wdev->ssid_len,
+ WLAN_CAPABILITY_ESS,
+ WLAN_CAPABILITY_ESS);
+ if (bss)
+ cfg80211_hold_bss(bss_from_pub(bss));
+ }
+
if (wdev->current_bss) {
cfg80211_unhold_bss(wdev->current_bss);
cfg80211_put_bss(wdev->wiphy, &wdev->current_bss->pub);
@@ -649,16 +659,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
return;
}
- if (!bss) {
- WARN_ON_ONCE(!wiphy_to_dev(wdev->wiphy)->ops->connect);
- bss = cfg80211_get_bss(wdev->wiphy, NULL, bssid,
- wdev->ssid, wdev->ssid_len,
- WLAN_CAPABILITY_ESS,
- WLAN_CAPABILITY_ESS);
- if (WARN_ON(!bss))
- return;
- cfg80211_hold_bss(bss_from_pub(bss));
- }
+ if (WARN_ON(!bss))
+ return;
wdev->current_bss = bss_from_pub(bss);
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 32b10f5..2dcb377 100644
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -82,7 +82,9 @@ kallsyms()
kallsymopt="${kallsymopt} --all-symbols"
fi
- kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+ if [ -n "${CONFIG_ARM}" ] && [ -n "${CONFIG_PAGE_OFFSET}" ]; then
+ kallsymopt="${kallsymopt} --page-offset=$CONFIG_PAGE_OFFSET"
+ fi
local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
diff --git a/scripts/sortextable.c b/scripts/sortextable.c
index 5f7a8b6..7941fbd 100644
--- a/scripts/sortextable.c
+++ b/scripts/sortextable.c
@@ -31,6 +31,10 @@
#include <tools/be_byteshift.h>
#include <tools/le_byteshift.h>
+#ifndef EM_ARCOMPACT
+#define EM_ARCOMPACT 93
+#endif
+
#ifndef EM_AARCH64
#define EM_AARCH64 183
#endif
@@ -268,6 +272,7 @@ do_file(char const *const fname)
case EM_S390:
custom_sort = sort_relative_table;
break;
+ case EM_ARCOMPACT:
case EM_ARM:
case EM_AARCH64:
case EM_MIPS:
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
index 7f44c32..8137b27 100644
--- a/security/keys/big_key.c
+++ b/security/keys/big_key.c
@@ -70,7 +70,7 @@ int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
*
* TODO: Encrypt the stored data with a temporary key.
*/
- file = shmem_file_setup("", datalen, 0);
+ file = shmem_kernel_file_setup("", datalen, 0);
if (IS_ERR(file)) {
ret = PTR_ERR(file);
goto err_quota;
diff --git a/security/keys/key.c b/security/keys/key.c
index 55d110f..6e21c11 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -272,7 +272,7 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
/* allocate and initialise the key and its description */
- key = kmem_cache_alloc(key_jar, GFP_KERNEL);
+ key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
@@ -293,18 +293,12 @@ struct key *key_alloc(struct key_type *type, const char *desc,
key->uid = uid;
key->gid = gid;
key->perm = perm;
- key->flags = 0;
- key->expiry = 0;
- key->payload.data = NULL;
- key->security = NULL;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
if (flags & KEY_ALLOC_TRUSTED)
key->flags |= 1 << KEY_FLAG_TRUSTED;
- memset(&key->type_data, 0, sizeof(key->type_data));
-
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
#endif
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 69f0cb7..d46cbc5 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -160,7 +160,7 @@ static u64 mult_64x32_and_fold(u64 x, u32 y)
static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
{
const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
- const unsigned long level_mask = ASSOC_ARRAY_LEVEL_STEP_MASK;
+ const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
const char *description = index_key->description;
unsigned long hash, type;
u32 piece;
@@ -194,10 +194,10 @@ static unsigned long hash_key_type_and_desc(const struct keyring_index_key *inde
* ordinary keys by making sure the lowest level segment in the hash is
* zero for keyrings and non-zero otherwise.
*/
- if (index_key->type != &key_type_keyring && (hash & level_mask) == 0)
+ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
- if (index_key->type == &key_type_keyring && (hash & level_mask) != 0)
- return (hash + (hash << level_shift)) & ~level_mask;
+ if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ return (hash + (hash << level_shift)) & ~fan_mask;
return hash;
}
@@ -279,12 +279,11 @@ static bool keyring_compare_object(const void *object, const void *data)
* Compare the index keys of a pair of objects and determine the bit position
* at which they differ - if they differ.
*/
-static int keyring_diff_objects(const void *_a, const void *_b)
+static int keyring_diff_objects(const void *object, const void *data)
{
- const struct key *key_a = keyring_ptr_to_key(_a);
- const struct key *key_b = keyring_ptr_to_key(_b);
+ const struct key *key_a = keyring_ptr_to_key(object);
const struct keyring_index_key *a = &key_a->index_key;
- const struct keyring_index_key *b = &key_b->index_key;
+ const struct keyring_index_key *b = data;
unsigned long seg_a, seg_b;
int level, i;
@@ -691,8 +690,8 @@ descend_to_node:
smp_read_barrier_depends();
ptr = ACCESS_ONCE(shortcut->next_node);
BUG_ON(!assoc_array_ptr_is_node(ptr));
- node = assoc_array_ptr_to_node(ptr);
}
+ node = assoc_array_ptr_to_node(ptr);
begin_node:
kdebug("begin_node");
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 794c3ca..6625699 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -53,6 +53,7 @@
#include <net/ip.h> /* for local_port_range[] */
#include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
+#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <linux/uaccess.h>
@@ -95,10 +96,6 @@
#include "audit.h"
#include "avc_ss.h"
-#define SB_TYPE_FMT "%s%s%s"
-#define SB_SUBTYPE(sb) (sb->s_subtype && sb->s_subtype[0])
-#define SB_TYPE_ARGS(sb) sb->s_type->name, SB_SUBTYPE(sb) ? "." : "", SB_SUBTYPE(sb) ? sb->s_subtype : ""
-
extern struct security_operations *security_ops;
/* SECMARK reference count */
@@ -413,8 +410,8 @@ static int sb_finish_set_opts(struct super_block *sb)
the first boot of the SELinux kernel before we have
assigned xattr values to the filesystem. */
if (!root_inode->i_op->getxattr) {
- printk(KERN_WARNING "SELinux: (dev %s, type "SB_TYPE_FMT") has no "
- "xattr support\n", sb->s_id, SB_TYPE_ARGS(sb));
+ printk(KERN_WARNING "SELinux: (dev %s, type %s) has no "
+ "xattr support\n", sb->s_id, sb->s_type->name);
rc = -EOPNOTSUPP;
goto out;
}
@@ -422,22 +419,22 @@ static int sb_finish_set_opts(struct super_block *sb)
if (rc < 0 && rc != -ENODATA) {
if (rc == -EOPNOTSUPP)
printk(KERN_WARNING "SELinux: (dev %s, type "
- SB_TYPE_FMT") has no security xattr handler\n",
- sb->s_id, SB_TYPE_ARGS(sb));
+ "%s) has no security xattr handler\n",
+ sb->s_id, sb->s_type->name);
else
printk(KERN_WARNING "SELinux: (dev %s, type "
- SB_TYPE_FMT") getxattr errno %d\n", sb->s_id,
- SB_TYPE_ARGS(sb), -rc);
+ "%s) getxattr errno %d\n", sb->s_id,
+ sb->s_type->name, -rc);
goto out;
}
}
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- printk(KERN_ERR "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), unknown behavior\n",
- sb->s_id, SB_TYPE_ARGS(sb));
+ printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
+ sb->s_id, sb->s_type->name);
else
- printk(KERN_DEBUG "SELinux: initialized (dev %s, type "SB_TYPE_FMT"), %s\n",
- sb->s_id, SB_TYPE_ARGS(sb),
+ printk(KERN_DEBUG "SELinux: initialized (dev %s, type %s), %s\n",
+ sb->s_id, sb->s_type->name,
labeling_behaviors[sbsec->behavior-1]);
sbsec->flags |= SE_SBINITIALIZED;
@@ -600,6 +597,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
const struct cred *cred = current_cred();
int rc = 0, i;
struct superblock_security_struct *sbsec = sb->s_security;
+ const char *name = sb->s_type->name;
struct inode *inode = sbsec->sb->s_root->d_inode;
struct inode_security_struct *root_isec = inode->i_security;
u32 fscontext_sid = 0, context_sid = 0, rootcontext_sid = 0;
@@ -658,8 +656,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
strlen(mount_options[i]), &sid);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
- "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n",
- mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc);
+ "(%s) failed for (dev %s, type %s) errno=%d\n",
+ mount_options[i], sb->s_id, name, rc);
goto out;
}
switch (flags[i]) {
@@ -806,8 +804,7 @@ out:
out_double_mount:
rc = -EINVAL;
printk(KERN_WARNING "SELinux: mount invalid. Same superblock, different "
- "security settings for (dev %s, type "SB_TYPE_FMT")\n", sb->s_id,
- SB_TYPE_ARGS(sb));
+ "security settings for (dev %s, type %s)\n", sb->s_id, name);
goto out;
}
@@ -2480,8 +2477,8 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
rc = security_context_to_sid(mount_options[i], len, &sid);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
- "(%s) failed for (dev %s, type "SB_TYPE_FMT") errno=%d\n",
- mount_options[i], sb->s_id, SB_TYPE_ARGS(sb), rc);
+ "(%s) failed for (dev %s, type %s) errno=%d\n",
+ mount_options[i], sb->s_id, sb->s_type->name, rc);
goto out_free_opts;
}
rc = -EINVAL;
@@ -2519,8 +2516,8 @@ out_free_secdata:
return rc;
out_bad_option:
printk(KERN_WARNING "SELinux: unable to change security options "
- "during remount (dev %s, type "SB_TYPE_FMT")\n", sb->s_id,
- SB_TYPE_ARGS(sb));
+ "during remount (dev %s, type=%s)\n", sb->s_id,
+ sb->s_type->name);
goto out_free_opts;
}
@@ -3828,7 +3825,7 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
u32 nlbl_sid;
u32 nlbl_type;
- err = selinux_skb_xfrm_sid(skb, &xfrm_sid);
+ err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
if (unlikely(err))
return -EACCES;
err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
@@ -3846,6 +3843,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
return 0;
}
+/**
+ * selinux_conn_sid - Determine the child socket label for a connection
+ * @sk_sid: the parent socket's SID
+ * @skb_sid: the packet's SID
+ * @conn_sid: the resulting connection SID
+ *
+ * If @skb_sid is valid then the user:role:type information from @sk_sid is
+ * combined with the MLS information from @skb_sid in order to create
+ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
+ * of @sk_sid. Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
+{
+ int err = 0;
+
+ if (skb_sid != SECSID_NULL)
+ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
+ else
+ *conn_sid = sk_sid;
+
+ return err;
+}
+
/* socket security operations */
static int socket_sockcreate_sid(const struct task_security_struct *tsec,
@@ -4313,8 +4334,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
- if (err)
+ if (err) {
selinux_netlbl_err(skb, err, 0);
+ return err;
+ }
}
if (secmark_active) {
@@ -4452,7 +4475,7 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
int err;
u16 family = sk->sk_family;
- u32 newsid;
+ u32 connsid;
u32 peersid;
/* handle mapped IPv4 packets arriving via IPv6 sockets */
@@ -4462,16 +4485,11 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
err = selinux_skb_peerlbl_sid(skb, family, &peersid);
if (err)
return err;
- if (peersid == SECSID_NULL) {
- req->secid = sksec->sid;
- req->peer_secid = SECSID_NULL;
- } else {
- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
- if (err)
- return err;
- req->secid = newsid;
- req->peer_secid = peersid;
- }
+ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
+ if (err)
+ return err;
+ req->secid = connsid;
+ req->peer_secid = peersid;
return selinux_netlbl_inet_conn_request(req, family);
}
@@ -4731,6 +4749,7 @@ static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family)
{
+ struct sock *sk;
u32 sid;
if (!netlbl_enabled())
@@ -4739,8 +4758,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
- if (skb->sk) {
- struct sk_security_struct *sksec = skb->sk->sk_security;
+ sk = skb->sk;
+ if (sk) {
+ struct sk_security_struct *sksec;
+
+ if (sk->sk_state == TCP_LISTEN)
+ /* if the socket is the listening state then this
+ * packet is a SYN-ACK packet which means it needs to
+ * be labeled based on the connection/request_sock and
+ * not the parent socket. unfortunately, we can't
+ * lookup the request_sock yet as it isn't queued on
+ * the parent socket until after the SYN-ACK is sent.
+ * the "solution" is to simply pass the packet as-is
+ * as any IP option based labeling should be copied
+ * from the initial connection request (in the IP
+ * layer). it is far from ideal, but until we get a
+ * security label in the packet itself this is the
+ * best we can do. */
+ return NF_ACCEPT;
+
+ /* standard practice, label using the parent socket */
+ sksec = sk->sk_security;
sid = sksec->sid;
} else
sid = SECINITSID_KERNEL;
@@ -4810,27 +4848,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
* as fast and as clean as possible. */
if (!selinux_policycap_netpeer)
return selinux_ip_postroute_compat(skb, ifindex, family);
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+ sk = skb->sk;
+
#ifdef CONFIG_XFRM
/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
* packet transformation so allow the packet to pass without any checks
* since we'll have another chance to perform access control checks
* when the packet is on it's final way out.
* NOTE: there appear to be some IPv6 multicast cases where skb->dst
- * is NULL, in this case go ahead and apply access control. */
- if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
+ * is NULL, in this case go ahead and apply access control.
+ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
+ * TCP listening state we cannot wait until the XFRM processing
+ * is done as we will miss out on the SA label if we do;
+ * unfortunately, this means more work, but it is only once per
+ * connection. */
+ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
+ !(sk != NULL && sk->sk_state == TCP_LISTEN))
return NF_ACCEPT;
#endif
- secmark_active = selinux_secmark_enabled();
- peerlbl_active = selinux_peerlbl_enabled();
- if (!secmark_active && !peerlbl_active)
- return NF_ACCEPT;
- /* if the packet is being forwarded then get the peer label from the
- * packet itself; otherwise check to see if it is from a local
- * application or the kernel, if from an application get the peer label
- * from the sending socket, otherwise use the kernel's sid */
- sk = skb->sk;
if (sk == NULL) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+ * to determine which and if the packet is being forwarded
+ * query the packet directly to determine the security label. */
if (skb->skb_iif) {
secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4839,7 +4886,45 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL;
}
+ } else if (sk->sk_state == TCP_LISTEN) {
+ /* Locally generated packet but the associated socket is in the
+ * listening state which means this is a SYN-ACK packet. In
+ * this particular case the correct security label is assigned
+ * to the connection/request_sock but unfortunately we can't
+ * query the request_sock as it isn't queued on the parent
+ * socket until after the SYN-ACK packet is sent; the only
+ * viable choice is to regenerate the label like we do in
+ * selinux_inet_conn_request(). See also selinux_ip_output()
+ * for similar problems. */
+ u32 skb_sid;
+ struct sk_security_struct *sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
+ /* At this point, if the returned skb peerlbl is SECSID_NULL
+ * and the packet has been through at least one XFRM
+ * transformation then we must be dealing with the "final"
+ * form of labeled IPsec packet; since we've already applied
+ * all of our access controls on this packet we can safely
+ * pass the packet. */
+ if (skb_sid == SECSID_NULL) {
+ switch (family) {
+ case PF_INET:
+ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ break;
+ case PF_INET6:
+ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ default:
+ return NF_DROP_ERR(-ECONNREFUSED);
+ }
+ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
} else {
+ /* Locally generated packet, fetch the security label from the
+ * associated socket. */
struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid;
secmark_perm = PACKET__SEND;
@@ -5503,11 +5588,11 @@ static int selinux_setprocattr(struct task_struct *p,
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
- task_lock(p);
+ rcu_read_lock();
tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
- task_unlock(p);
+ rcu_read_unlock();
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 0dec76c..48c3cc9 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -39,6 +39,7 @@ int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
struct common_audit_data *ad, u8 proto);
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
static inline void selinux_xfrm_notify_policyload(void)
{
@@ -79,11 +80,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
static inline void selinux_xfrm_notify_policyload(void)
{
}
-#endif
-static inline int selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
{
- return selinux_xfrm_decode_session(skb, sid, 0);
+ *sid = SECSID_NULL;
+ return 0;
}
+#endif
#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index ee470a0..d106733 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -2334,50 +2334,16 @@ int security_fs_use(struct super_block *sb)
struct ocontext *c;
struct superblock_security_struct *sbsec = sb->s_security;
const char *fstype = sb->s_type->name;
- const char *subtype = (sb->s_subtype && sb->s_subtype[0]) ? sb->s_subtype : NULL;
- struct ocontext *base = NULL;
read_lock(&policy_rwlock);
- for (c = policydb.ocontexts[OCON_FSUSE]; c; c = c->next) {
- char *sub;
- int baselen;
-
- baselen = strlen(fstype);
-
- /* if base does not match, this is not the one */
- if (strncmp(fstype, c->u.name, baselen))
- continue;
-
- /* if there is no subtype, this is the one! */
- if (!subtype)
- break;
-
- /* skip past the base in this entry */
- sub = c->u.name + baselen;
-
- /* entry is only a base. save it. keep looking for subtype */
- if (sub[0] == '\0') {
- base = c;
- continue;
- }
-
- /* entry is not followed by a subtype, so it is not a match */
- if (sub[0] != '.')
- continue;
-
- /* whew, we found a subtype of this fstype */
- sub++; /* move past '.' */
-
- /* exact match of fstype AND subtype */
- if (!strcmp(subtype, sub))
+ c = policydb.ocontexts[OCON_FSUSE];
+ while (c) {
+ if (strcmp(fstype, c->u.name) == 0)
break;
+ c = c->next;
}
- /* in case we had found an fstype match but no subtype match */
- if (!c)
- c = base;
-
if (c) {
sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index a91d205..0462cb3 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -209,19 +209,26 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
NULL) ? 0 : 1);
}
-/*
- * LSM hook implementation that checks and/or returns the xfrm sid for the
- * incoming packet.
- */
-int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
{
- u32 sid_session = SECSID_NULL;
- struct sec_path *sp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x;
- if (skb == NULL)
- goto out;
+ if (dst == NULL)
+ return SECSID_NULL;
+ x = dst->xfrm;
+ if (x == NULL || !selinux_authorizable_xfrm(x))
+ return SECSID_NULL;
+
+ return x->security->ctx_sid;
+}
+
+static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
+ u32 *sid, int ckall)
+{
+ u32 sid_session = SECSID_NULL;
+ struct sec_path *sp = skb->sp;
- sp = skb->sp;
if (sp) {
int i;
@@ -248,6 +255,30 @@ out:
}
/*
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
+ */
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+{
+ if (skb == NULL) {
+ *sid = SECSID_NULL;
+ return 0;
+ }
+ return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+}
+
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+ int rc;
+
+ rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+ if (rc == 0 && *sid == SECSID_NULL)
+ *sid = selinux_xfrm_skb_sid_egress(skb);
+
+ return rc;
+}
+
+/*
* LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
@@ -327,19 +358,22 @@ int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
return rc;
ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
- if (!ctx)
- return -ENOMEM;
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
ctx->ctx_doi = XFRM_SC_DOI_LSM;
ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
ctx->ctx_sid = secid;
ctx->ctx_len = str_len;
memcpy(ctx->ctx_str, ctx_str, str_len);
- kfree(ctx_str);
x->security = ctx;
atomic_inc(&selinux_xfrm_refcount);
- return 0;
+out:
+ kfree(ctx_str);
+ return rc;
}
/*
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index 6e03b46..a210467 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -1937,6 +1937,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream,
case SNDRV_PCM_STATE_DISCONNECTED:
err = -EBADFD;
goto _endloop;
+ case SNDRV_PCM_STATE_PAUSED:
+ continue;
}
if (!tout) {
snd_printd("%s write error (DMA or IRQ trouble?)\n",
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index c4671d0..c7f6d1c 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -474,6 +474,20 @@ static void invalidate_nid_path(struct hda_codec *codec, int idx)
memset(path, 0, sizeof(*path));
}
+/* return a DAC if paired to the given pin by codec driver */
+static hda_nid_t get_preferred_dac(struct hda_codec *codec, hda_nid_t pin)
+{
+ struct hda_gen_spec *spec = codec->spec;
+ const hda_nid_t *list = spec->preferred_dacs;
+
+ if (!list)
+ return 0;
+ for (; *list; list += 2)
+ if (*list == pin)
+ return list[1];
+ return 0;
+}
+
/* look for an empty DAC slot */
static hda_nid_t look_for_dac(struct hda_codec *codec, hda_nid_t pin,
bool is_digital)
@@ -1192,7 +1206,14 @@ static int try_assign_dacs(struct hda_codec *codec, int num_outs,
continue;
}
- dacs[i] = look_for_dac(codec, pin, false);
+ dacs[i] = get_preferred_dac(codec, pin);
+ if (dacs[i]) {
+ if (is_dac_already_used(codec, dacs[i]))
+ badness += bad->shared_primary;
+ }
+
+ if (!dacs[i])
+ dacs[i] = look_for_dac(codec, pin, false);
if (!dacs[i] && !i) {
/* try to steal the DAC of surrounds for the front */
for (j = 1; j < num_outs; j++) {
@@ -4297,6 +4318,26 @@ static unsigned int snd_hda_gen_path_power_filter(struct hda_codec *codec,
return AC_PWRST_D3;
}
+/* mute all aamix inputs initially; parse up to the first leaves */
+static void mute_all_mixer_nid(struct hda_codec *codec, hda_nid_t mix)
+{
+ int i, nums;
+ const hda_nid_t *conn;
+ bool has_amp;
+
+ nums = snd_hda_get_conn_list(codec, mix, &conn);
+ has_amp = nid_has_mute(codec, mix, HDA_INPUT);
+ for (i = 0; i < nums; i++) {
+ if (has_amp)
+ snd_hda_codec_amp_stereo(codec, mix,
+ HDA_INPUT, i,
+ 0xff, HDA_AMP_MUTE);
+ else if (nid_has_volume(codec, conn[i], HDA_OUTPUT))
+ snd_hda_codec_amp_stereo(codec, conn[i],
+ HDA_OUTPUT, 0,
+ 0xff, HDA_AMP_MUTE);
+ }
+}
/*
* Parse the given BIOS configuration and set up the hda_gen_spec
@@ -4435,6 +4476,10 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
}
}
+ /* mute all aamix input initially */
+ if (spec->mixer_nid)
+ mute_all_mixer_nid(codec, spec->mixer_nid);
+
dig_only:
parse_digital(codec);
diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h
index 7e45cb4..0929a06 100644
--- a/sound/pci/hda/hda_generic.h
+++ b/sound/pci/hda/hda_generic.h
@@ -249,6 +249,9 @@ struct hda_gen_spec {
const struct badness_table *main_out_badness;
const struct badness_table *extra_out_badness;
+ /* preferred pin/DAC pairs; an array of paired NIDs */
+ const hda_nid_t *preferred_dacs;
+
/* loopback mixing mode */
bool aamix_mode;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 27aa140..956871d 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -3433,6 +3433,10 @@ static void check_probe_mask(struct azx *chip, int dev)
* white/black-list for enable_msi
*/
static struct snd_pci_quirk msi_black_list[] = {
+ SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */
+ SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */
SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */
SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */
SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
index cac015b..699262a 100644
--- a/sound/pci/hda/patch_analog.c
+++ b/sound/pci/hda/patch_analog.c
@@ -340,6 +340,14 @@ static int patch_ad1986a(struct hda_codec *codec)
{
int err;
struct ad198x_spec *spec;
+ static hda_nid_t preferred_pairs[] = {
+ 0x1a, 0x03,
+ 0x1b, 0x03,
+ 0x1c, 0x04,
+ 0x1d, 0x05,
+ 0x1e, 0x03,
+ 0
+ };
err = alloc_ad_spec(codec);
if (err < 0)
@@ -360,6 +368,8 @@ static int patch_ad1986a(struct hda_codec *codec)
* So, let's disable the shared stream.
*/
spec->gen.multiout.no_share_stream = 1;
+ /* give fixed DAC/pin pairs */
+ spec->gen.preferred_dacs = preferred_pairs;
/* AD1986A can't manage the dynamic pin on/off smoothly */
spec->gen.auto_mute_via_amp = 1;
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 1f2717f..3fbf288 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -2936,7 +2936,6 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
- SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
SND_PCI_QUIRK(0x103c, 0x360b, "HP G60", CXT5066_HP_LAPTOP),
SND_PCI_QUIRK(0x1043, 0x13f3, "Asus A52J", CXT5066_ASUS),
SND_PCI_QUIRK(0x1043, 0x1643, "Asus K52JU", CXT5066_ASUS),
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index c4a66ef..f281c80 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2337,8 +2337,9 @@ static int simple_playback_build_controls(struct hda_codec *codec)
int err;
per_cvt = get_cvt(spec, 0);
- err = snd_hda_create_spdif_out_ctls(codec, per_cvt->cvt_nid,
- per_cvt->cvt_nid);
+ err = snd_hda_create_dig_out_ctls(codec, per_cvt->cvt_nid,
+ per_cvt->cvt_nid,
+ HDA_PCM_TYPE_HDMI);
if (err < 0)
return err;
return simple_hdmi_build_jack(codec, 0);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index c5ea483..c564694 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -3849,6 +3849,7 @@ enum {
ALC269_FIXUP_ASUS_X101,
ALC271_FIXUP_AMIC_MIC2,
ALC271_FIXUP_HP_GATE_MIC_JACK,
+ ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572,
ALC269_FIXUP_ACER_AC700,
ALC269_FIXUP_LIMIT_INT_MIC_BOOST,
ALC269VB_FIXUP_ASUS_ZENBOOK,
@@ -4111,6 +4112,12 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC271_FIXUP_AMIC_MIC2,
},
+ [ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc269_fixup_limit_int_mic_boost,
+ .chained = true,
+ .chain_id = ALC271_FIXUP_HP_GATE_MIC_JACK,
+ },
[ALC269_FIXUP_ACER_AC700] = {
.type = HDA_FIXUP_PINS,
.v.pins = (const struct hda_pintbl[]) {
@@ -4208,6 +4215,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x0740, "Acer AO725", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK(0x1025, 0x0742, "Acer AO756", ALC271_FIXUP_HP_GATE_MIC_JACK),
SND_PCI_QUIRK_VENDOR(0x1025, "Acer Aspire", ALC271_FIXUP_DMIC),
+ SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05be, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
@@ -4239,12 +4247,16 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0606, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0608, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0609, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0613, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0614, "Dell Inspiron 3135", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_MONO_SPEAKERS),
SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0629, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS),
+ SND_PCI_QUIRK(0x1028, 0x063e, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x063f, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0640, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cc, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x15cd, "Dell X5 Precision", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5034,8 +5046,11 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
SND_PCI_QUIRK(0x1025, 0x038b, "Acer Aspire 8943G", ALC662_FIXUP_ASPIRE),
SND_PCI_QUIRK(0x1028, 0x05d8, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x05db, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0623, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0624, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0625, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x0628, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A_CHMAP),
SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_CHMAP),
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index 8697ced..1ead3c9 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -648,7 +648,7 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
dma_params = ssc_p->dma_params[dir];
- ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+ ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
ssc_writel(ssc_p->ssc->regs, IDR, dma_params->mask->ssc_error);
pr_debug("%s enabled SSC_SR=0x%08x\n",
@@ -657,6 +657,33 @@ static int atmel_ssc_prepare(struct snd_pcm_substream *substream,
return 0;
}
+static int atmel_ssc_trigger(struct snd_pcm_substream *substream,
+ int cmd, struct snd_soc_dai *dai)
+{
+ struct atmel_ssc_info *ssc_p = &ssc_info[dai->id];
+ struct atmel_pcm_dma_params *dma_params;
+ int dir;
+
+ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
+ dir = 0;
+ else
+ dir = 1;
+
+ dma_params = ssc_p->dma_params[dir];
+
+ switch (cmd) {
+ case SNDRV_PCM_TRIGGER_START:
+ case SNDRV_PCM_TRIGGER_RESUME:
+ case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
+ ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_enable);
+ break;
+ default:
+ ssc_writel(ssc_p->ssc->regs, CR, dma_params->mask->ssc_disable);
+ break;
+ }
+
+ return 0;
+}
#ifdef CONFIG_PM
static int atmel_ssc_suspend(struct snd_soc_dai *cpu_dai)
@@ -731,6 +758,7 @@ static const struct snd_soc_dai_ops atmel_ssc_dai_ops = {
.startup = atmel_ssc_startup,
.shutdown = atmel_ssc_shutdown,
.prepare = atmel_ssc_prepare,
+ .trigger = atmel_ssc_trigger,
.hw_params = atmel_ssc_hw_params,
.set_fmt = atmel_ssc_set_dai_fmt,
.set_clkdiv = atmel_ssc_set_dai_clkdiv,
diff --git a/sound/soc/atmel/sam9x5_wm8731.c b/sound/soc/atmel/sam9x5_wm8731.c
index 1b37228..7d6a905 100644
--- a/sound/soc/atmel/sam9x5_wm8731.c
+++ b/sound/soc/atmel/sam9x5_wm8731.c
@@ -109,7 +109,7 @@ static int sam9x5_wm8731_driver_probe(struct platform_device *pdev)
dai->stream_name = "WM8731 PCM";
dai->codec_dai_name = "wm8731-hifi";
dai->init = sam9x5_wm8731_init;
- dai->dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF
+ dai->dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_NB_NF
| SND_SOC_DAIFMT_CBM_CFM;
ret = snd_soc_of_parse_card_name(card, "atmel,model");
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 99b359e..0ab2dc2 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -1012,7 +1012,7 @@ static const struct snd_soc_dapm_route wm5110_dapm_routes[] = {
{ "AEC Loopback", "HPOUT3L", "OUT3L" },
{ "AEC Loopback", "HPOUT3R", "OUT3R" },
{ "HPOUT3L", NULL, "OUT3L" },
- { "HPOUT3R", NULL, "OUT3L" },
+ { "HPOUT3R", NULL, "OUT3R" },
{ "AEC Loopback", "SPKOUTL", "OUT4L" },
{ "SPKOUTLN", NULL, "OUT4L" },
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 3938fb1..53bbfac 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1444,7 +1444,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_DSP_B:
- aif1 |= WM8904_AIF_LRCLK_INV;
+ aif1 |= 0x3 | WM8904_AIF_LRCLK_INV;
case SND_SOC_DAIFMT_DSP_A:
aif1 |= 0x3;
break;
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 543c5c2..0f17ed3 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2439,7 +2439,20 @@ static void wm8962_configure_bclk(struct snd_soc_codec *codec)
snd_soc_update_bits(codec, WM8962_CLOCKING_4,
WM8962_SYSCLK_RATE_MASK, clocking4);
+ /* DSPCLK_DIV can be only generated correctly after enabling SYSCLK.
+ * So we here provisionally enable it and then disable it afterward
+ * if current bias_level hasn't reached SND_SOC_BIAS_ON.
+ */
+ if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
+ snd_soc_update_bits(codec, WM8962_CLOCKING2,
+ WM8962_SYSCLK_ENA_MASK, WM8962_SYSCLK_ENA);
+
dspclk = snd_soc_read(codec, WM8962_CLOCKING1);
+
+ if (codec->dapm.bias_level != SND_SOC_BIAS_ON)
+ snd_soc_update_bits(codec, WM8962_CLOCKING2,
+ WM8962_SYSCLK_ENA_MASK, 0);
+
if (dspclk < 0) {
dev_err(codec->dev, "Failed to read DSPCLK: %d\n", dspclk);
return;
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 46ec0e9..4fbcab6 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1474,13 +1474,17 @@ static int wm_adsp2_ena(struct wm_adsp *dsp)
return ret;
/* Wait for the RAM to start, should be near instantaneous */
- count = 0;
- do {
+ for (count = 0; count < 10; ++count) {
ret = regmap_read(dsp->regmap, dsp->base + ADSP2_STATUS1,
&val);
if (ret != 0)
return ret;
- } while (!(val & ADSP2_RAM_RDY) && ++count < 10);
+
+ if (val & ADSP2_RAM_RDY)
+ break;
+
+ msleep(1);
+ }
if (!(val & ADSP2_RAM_RDY)) {
adsp_err(dsp, "Failed to start DSP RAM\n");
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 61e4885..3fd76bc 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -130,8 +130,6 @@ static int imx_wm8962_set_bias_level(struct snd_soc_card *card,
break;
}
- dapm->bias_level = level;
-
return 0;
}
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 0b18f65..3920a5e 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -473,17 +473,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
.playback = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
.formats = KIRKWOOD_I2S_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
.formats = KIRKWOOD_I2S_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
@@ -494,17 +494,17 @@ static struct snd_soc_dai_driver kirkwood_i2s_dai_extclk[2] = {
.playback = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
.formats = KIRKWOOD_SPDIF_FORMATS,
},
.capture = {
.channels_min = 1,
.channels_max = 2,
- .rates = SNDRV_PCM_RATE_8000_192000 |
- SNDRV_PCM_RATE_CONTINUOUS |
- SNDRV_PCM_RATE_KNOT,
+ .rates = SNDRV_PCM_RATE_CONTINUOUS,
+ .rate_min = 5512,
+ .rate_max = 192000,
.formats = KIRKWOOD_SPDIF_FORMATS,
},
.ops = &kirkwood_i2s_dai_ops,
diff --git a/sound/soc/soc-generic-dmaengine-pcm.c b/sound/soc/soc-generic-dmaengine-pcm.c
index cbc9c96..41949af 100644
--- a/sound/soc/soc-generic-dmaengine-pcm.c
+++ b/sound/soc/soc-generic-dmaengine-pcm.c
@@ -305,6 +305,20 @@ static void dmaengine_pcm_request_chan_of(struct dmaengine_pcm *pcm,
}
}
+static void dmaengine_pcm_release_chan(struct dmaengine_pcm *pcm)
+{
+ unsigned int i;
+
+ for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE;
+ i++) {
+ if (!pcm->chan[i])
+ continue;
+ dma_release_channel(pcm->chan[i]);
+ if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
+ break;
+ }
+}
+
/**
* snd_dmaengine_pcm_register - Register a dmaengine based PCM device
* @dev: The parent device for the PCM device
@@ -315,6 +329,7 @@ int snd_dmaengine_pcm_register(struct device *dev,
const struct snd_dmaengine_pcm_config *config, unsigned int flags)
{
struct dmaengine_pcm *pcm;
+ int ret;
pcm = kzalloc(sizeof(*pcm), GFP_KERNEL);
if (!pcm)
@@ -326,11 +341,20 @@ int snd_dmaengine_pcm_register(struct device *dev,
dmaengine_pcm_request_chan_of(pcm, dev);
if (flags & SND_DMAENGINE_PCM_FLAG_NO_RESIDUE)
- return snd_soc_add_platform(dev, &pcm->platform,
+ ret = snd_soc_add_platform(dev, &pcm->platform,
&dmaengine_no_residue_pcm_platform);
else
- return snd_soc_add_platform(dev, &pcm->platform,
+ ret = snd_soc_add_platform(dev, &pcm->platform,
&dmaengine_pcm_platform);
+ if (ret)
+ goto err_free_dma;
+
+ return 0;
+
+err_free_dma:
+ dmaengine_pcm_release_chan(pcm);
+ kfree(pcm);
+ return ret;
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_register);
@@ -345,7 +369,6 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
{
struct snd_soc_platform *platform;
struct dmaengine_pcm *pcm;
- unsigned int i;
platform = snd_soc_lookup_platform(dev);
if (!platform)
@@ -353,15 +376,8 @@ void snd_dmaengine_pcm_unregister(struct device *dev)
pcm = soc_platform_to_pcm(platform);
- for (i = SNDRV_PCM_STREAM_PLAYBACK; i <= SNDRV_PCM_STREAM_CAPTURE; i++) {
- if (pcm->chan[i]) {
- dma_release_channel(pcm->chan[i]);
- if (pcm->flags & SND_DMAENGINE_PCM_FLAG_HALF_DUPLEX)
- break;
- }
- }
-
snd_soc_remove_platform(platform);
+ dmaengine_pcm_release_chan(pcm);
kfree(pcm);
}
EXPORT_SYMBOL_GPL(snd_dmaengine_pcm_unregister);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 11a90cd..891b9a9 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -600,12 +600,13 @@ static int soc_pcm_hw_free(struct snd_pcm_substream *substream)
struct snd_soc_platform *platform = rtd->platform;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
- struct snd_soc_codec *codec = rtd->codec;
+ bool playback = substream->stream == SNDRV_PCM_STREAM_PLAYBACK;
mutex_lock_nested(&rtd->pcm_mutex, rtd->pcm_subclass);
/* apply codec digital mute */
- if (!codec->active)
+ if ((playback && codec_dai->playback_active == 1) ||
+ (!playback && codec_dai->capture_active == 1))
snd_soc_dai_digital_mute(codec_dai, 1, substream->stream);
/* free any machine hw params */
diff --git a/sound/soc/tegra/tegra20_i2s.c b/sound/soc/tegra/tegra20_i2s.c
index 364bf6a9..8c819f8 100644
--- a/sound/soc/tegra/tegra20_i2s.c
+++ b/sound/soc/tegra/tegra20_i2s.c
@@ -74,7 +74,7 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra20_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- unsigned int mask, val;
+ unsigned int mask = 0, val = 0;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -83,10 +83,10 @@ static int tegra20_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- mask = TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ mask |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- val = TEGRA20_I2S_CTRL_MASTER_ENABLE;
+ val |= TEGRA20_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
diff --git a/sound/soc/tegra/tegra20_spdif.c b/sound/soc/tegra/tegra20_spdif.c
index 08bc693..8c7c102 100644
--- a/sound/soc/tegra/tegra20_spdif.c
+++ b/sound/soc/tegra/tegra20_spdif.c
@@ -67,15 +67,15 @@ static int tegra20_spdif_hw_params(struct snd_pcm_substream *substream,
{
struct device *dev = dai->dev;
struct tegra20_spdif *spdif = snd_soc_dai_get_drvdata(dai);
- unsigned int mask, val;
+ unsigned int mask = 0, val = 0;
int ret, spdifclock;
- mask = TEGRA20_SPDIF_CTRL_PACK |
- TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
+ mask |= TEGRA20_SPDIF_CTRL_PACK |
+ TEGRA20_SPDIF_CTRL_BIT_MODE_MASK;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- val = TEGRA20_SPDIF_CTRL_PACK |
- TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
+ val |= TEGRA20_SPDIF_CTRL_PACK |
+ TEGRA20_SPDIF_CTRL_BIT_MODE_16BIT;
break;
default:
return -EINVAL;
diff --git a/sound/soc/tegra/tegra30_i2s.c b/sound/soc/tegra/tegra30_i2s.c
index 231a785..02247fe 100644
--- a/sound/soc/tegra/tegra30_i2s.c
+++ b/sound/soc/tegra/tegra30_i2s.c
@@ -118,7 +118,7 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
unsigned int fmt)
{
struct tegra30_i2s *i2s = snd_soc_dai_get_drvdata(dai);
- unsigned int mask, val;
+ unsigned int mask = 0, val = 0;
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
@@ -127,10 +127,10 @@ static int tegra30_i2s_set_fmt(struct snd_soc_dai *dai,
return -EINVAL;
}
- mask = TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ mask |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
- val = TEGRA30_I2S_CTRL_MASTER_ENABLE;
+ val |= TEGRA30_I2S_CTRL_MASTER_ENABLE;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 3454262..f4b12c2 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -1603,7 +1603,7 @@ static int snd_microii_controls_create(struct usb_mixer_interface *mixer)
return err;
}
- return err;
+ return 0;
}
int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer)
diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c
index dc4de37..bcf1d2f 100644
--- a/tools/power/cpupower/utils/cpupower-set.c
+++ b/tools/power/cpupower/utils/cpupower-set.c
@@ -18,9 +18,9 @@
#include "helpers/bitmask.h"
static struct option set_opts[] = {
- { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'},
- { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'},
- { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'},
+ { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'},
+ { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'},
+ { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'},
{ },
};
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a0aa84b..4f588bc 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -1898,6 +1898,9 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, u32 id)
int r;
struct kvm_vcpu *vcpu, *v;
+ if (id >= KVM_MAX_VCPUS)
+ return -EINVAL;
+
vcpu = kvm_arch_vcpu_create(kvm, id);
if (IS_ERR(vcpu))
return PTR_ERR(vcpu);
OpenPOWER on IntegriCloud