summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap4
-rw-r--r--Documentation/ABI/testing/sysfs-devices-deferred_probe12
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/mtd/tango-nand.txt6
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt10
-rw-r--r--Documentation/devicetree/bindings/net/brcm,systemport.txt5
-rw-r--r--Documentation/devicetree/bindings/net/dsa/dsa.txt20
-rw-r--r--Documentation/devicetree/bindings/net/dsa/marvell.txt93
-rw-r--r--Documentation/devicetree/bindings/net/mediatek-net.txt2
-rw-r--r--Documentation/devicetree/bindings/net/meson-dwmac.txt16
-rw-r--r--Documentation/devicetree/bindings/net/phy.txt5
-rw-r--r--Documentation/devicetree/bindings/net/ti,dp83867.txt6
-rw-r--r--Documentation/devicetree/bindings/net/wireless/ieee80211.txt24
-rw-r--r--Documentation/devicetree/bindings/spi/sh-msiof.txt19
-rw-r--r--Documentation/driver-api/80211/cfg80211.rst3
-rw-r--r--Documentation/filesystems/proc.txt5
-rw-r--r--Documentation/networking/dsa/dsa.txt24
-rw-r--r--Documentation/networking/ip-sysctl.txt47
-rw-r--r--Documentation/networking/regulatory.txt8
-rw-r--r--Documentation/networking/vrf.txt7
-rw-r--r--Documentation/power/states.txt4
-rw-r--r--Documentation/vm/page_frags42
-rw-r--r--MAINTAINERS68
-rw-r--r--Makefile4
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/arc/Kconfig2
-rw-r--r--arch/arc/include/asm/cache.h9
-rw-r--r--arch/arc/include/asm/entry-arcv2.h2
-rw-r--r--arch/arc/include/asm/module.h4
-rw-r--r--arch/arc/include/asm/ptrace.h2
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/kernel/intc-arcv2.c6
-rw-r--r--arch/arc/kernel/intc-compact.c4
-rw-r--r--arch/arc/kernel/mcip.c4
-rw-r--r--arch/arc/kernel/module.c4
-rw-r--r--arch/arc/mm/cache.c155
-rw-r--r--arch/arc/mm/init.c5
-rw-r--r--arch/arm/boot/dts/Makefile1
-rw-r--r--arch/arm/boot/dts/am335x-icev2.dts1
-rw-r--r--arch/arm/boot/dts/bcm-nsp.dtsi2
-rw-r--r--arch/arm/boot/dts/da850-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi1
-rw-r--r--arch/arm/boot/dts/dra72-evm-revc.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi4
-rw-r--r--arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts11
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom-mdm9615.dtsi2
-rw-r--r--arch/arm/boot/dts/sun6i-a31-hummingbird.dts4
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi1
-rw-r--r--arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts2
-rw-r--r--arch/arm/configs/multi_v7_defconfig7
-rw-r--r--arch/arm/configs/s3c2410_defconfig6
-rw-r--r--arch/arm/include/asm/cputype.h3
-rw-r--r--arch/arm/include/asm/ftrace.h18
-rw-r--r--arch/arm/include/asm/virt.h5
-rw-r--r--arch/arm/include/uapi/asm/types.h (renamed from arch/arm/include/asm/types.h)6
-rw-r--r--arch/arm/kernel/hw_breakpoint.c16
-rw-r--r--arch/arm/kernel/smp_tlb.c7
-rw-r--r--arch/arm/kvm/arm.c3
-rw-r--r--arch/arm/mach-omap1/dma.c16
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c2
-rw-r--r--arch/arm/mach-ux500/pm.c4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts4
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi2
-rw-r--r--arch/arm64/boot/dts/exynos/exynos5433.dtsi2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts2
-rw-r--r--arch/arm64/boot/dts/xilinx/zynqmp.dtsi6
-rw-r--r--arch/arm64/include/asm/assembler.h36
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/virt.h9
-rw-r--r--arch/arm64/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arm64/kernel/entry.S2
-rw-r--r--arch/arm64/kernel/ptrace.c16
-rw-r--r--arch/arm64/kernel/traps.c28
-rw-r--r--arch/arm64/mm/hugetlbpage.c2
-rw-r--r--arch/arm64/mm/init.c2
-rw-r--r--arch/frv/include/asm/atomic.h35
-rw-r--r--arch/m68k/emu/nfeth.c1
-rw-r--r--arch/mn10300/include/asm/switch_to.h2
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash-4k.h5
-rw-r--r--arch/powerpc/include/asm/book3s/64/hash.h4
-rw-r--r--arch/powerpc/include/asm/hugetlb.h14
-rw-r--r--arch/powerpc/include/asm/nohash/pgtable.h5
-rw-r--r--arch/powerpc/include/asm/page.h3
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h1
-rw-r--r--arch/powerpc/include/asm/pgtable-be-types.h8
-rw-r--r--arch/powerpc/include/asm/pgtable-types.h7
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h10
-rw-r--r--arch/powerpc/kernel/eeh.c10
-rw-r--r--arch/powerpc/kernel/ptrace.c14
-rw-r--r--arch/powerpc/mm/hash_utils_64.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c31
-rw-r--r--arch/powerpc/mm/init-common.c13
-rw-r--r--arch/powerpc/mm/pgtable-book3s64.c18
-rw-r--r--arch/powerpc/perf/core-book3s.c2
-rw-r--r--arch/powerpc/perf/power9-events-list.h2
-rw-r--r--arch/powerpc/perf/power9-pmu.c2
-rw-r--r--arch/powerpc/sysdev/xics/icp-opal.c31
-rw-r--r--arch/s390/configs/default_defconfig27
-rw-r--r--arch/s390/configs/gcov_defconfig50
-rw-r--r--arch/s390/configs/performance_defconfig33
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/asm/ctl_reg.h4
-rw-r--r--arch/s390/kernel/ptrace.c8
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/mm/pgtable.c7
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/tile/kernel/ptrace.c2
-rw-r--r--arch/x86/boot/string.c1
-rw-r--r--arch/x86/boot/string.h9
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c3
-rw-r--r--arch/x86/entry/entry_32.S30
-rw-r--r--arch/x86/entry/entry_64.S11
-rw-r--r--arch/x86/events/amd/ibs.c2
-rw-r--r--arch/x86/events/core.c4
-rw-r--r--arch/x86/events/intel/core.c9
-rw-r--r--arch/x86/events/intel/cstate.c2
-rw-r--r--arch/x86/events/intel/ds.c6
-rw-r--r--arch/x86/events/intel/rapl.c1
-rw-r--r--arch/x86/events/intel/uncore.c1
-rw-r--r--arch/x86/events/intel/uncore_snbep.c2
-rw-r--r--arch/x86/include/asm/intel-family.h2
-rw-r--r--arch/x86/include/asm/microcode_intel.h15
-rw-r--r--arch/x86/include/asm/processor.h18
-rw-r--r--arch/x86/include/asm/stacktrace.h2
-rw-r--r--arch/x86/include/asm/switch_to.h10
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c9
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c11
-rw-r--r--arch/x86/kernel/cpu/microcode/intel.c70
-rw-r--r--arch/x86/kernel/tsc.c1
-rw-r--r--arch/x86/kernel/unwind_frame.c30
-rw-r--r--arch/x86/kvm/emulate.c70
-rw-r--r--arch/x86/kvm/lapic.c6
-rw-r--r--arch/x86/kvm/lapic.h1
-rw-r--r--arch/x86/kvm/x86.c6
-rw-r--r--arch/x86/mm/mpx.c2
-rw-r--r--arch/x86/pci/acpi.c10
-rw-r--r--arch/x86/platform/efi/efi.c66
-rw-r--r--arch/x86/platform/efi/quirks.c4
-rw-r--r--arch/x86/platform/intel-mid/device_libs/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c (renamed from arch/x86/platform/intel-mid/device_libs/platform_spidev.c)4
-rw-r--r--block/blk-lib.c13
-rw-r--r--block/blk-mq.c1
-rw-r--r--block/blk-zoned.c4
-rw-r--r--block/partition-generic.c14
-rw-r--r--drivers/acpi/acpica/tbdata.c9
-rw-r--r--drivers/acpi/acpica/tbinstal.c17
-rw-r--r--drivers/acpi/sleep.c8
-rw-r--r--drivers/acpi/video_detect.c11
-rw-r--r--drivers/auxdisplay/Kconfig6
-rw-r--r--drivers/base/base.h2
-rw-r--r--drivers/base/core.c7
-rw-r--r--drivers/base/dd.c13
-rw-r--r--drivers/base/memory.c4
-rw-r--r--drivers/block/nbd.c12
-rw-r--r--drivers/block/virtio_blk.c7
-rw-r--r--drivers/block/xen-blkfront.c22
-rw-r--r--drivers/block/zram/zram_drv.c19
-rw-r--r--drivers/char/mem.c10
-rw-r--r--drivers/char/ppdev.c13
-rw-r--r--drivers/char/virtio_console.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c14
-rw-r--r--drivers/clocksource/exynos_mct.c1
-rw-r--r--drivers/cpufreq/intel_pstate.c14
-rw-r--r--drivers/dma/dw/Kconfig2
-rw-r--r--drivers/dma/ioat/hw.h2
-rw-r--r--drivers/dma/ioat/init.c15
-rw-r--r--drivers/dma/omap-dma.c61
-rw-r--r--drivers/dma/pl330.c11
-rw-r--r--drivers/dma/sh/rcar-dmac.c8
-rw-r--r--drivers/dma/stm32-dma.c17
-rw-r--r--drivers/dma/ti-dma-crossbar.c2
-rw-r--r--drivers/extcon/extcon.c2
-rw-r--r--drivers/firmware/efi/fake_mem.c3
-rw-r--r--drivers/firmware/efi/libstub/efistub.h8
-rw-r--r--drivers/firmware/efi/libstub/fdt.c87
-rw-r--r--drivers/firmware/efi/memmap.c38
-rw-r--r--drivers/gpio/gpio-mxs.c2
-rw-r--r--drivers/gpio/gpiolib.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c31
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c84
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c42
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c10
-rw-r--r--drivers/gpu/drm/amd/include/amd_shared.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c24
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c3
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c1
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h1
-rw-r--r--drivers/gpu/drm/ast/ast_main.c157
-rw-r--r--drivers/gpu/drm/ast/ast_post.c18
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c7
-rw-r--r--drivers/gpu/drm/cirrus/Kconfig9
-rw-r--r--drivers/gpu/drm/drm_atomic.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c4
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_probe_helper.c12
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_mmu.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c36
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c74
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c66
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c81
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h1
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h8
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c22
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/opregion.c8
-rw-r--r--drivers/gpu/drm/i915/gvt/reg.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c14
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c162
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c2
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c34
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c1
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c1
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c9
-rw-r--r--drivers/gpu/drm/i915/intel_display.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h2
-rw-r--r--drivers/gpu/drm/i915/intel_fbdev.c3
-rw-r--r--drivers/gpu/drm/i915/intel_lrc.c10
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c8
-rw-r--r--drivers/gpu/drm/meson/meson_plane.c3
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c19
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c2
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c14
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c6
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c18
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c43
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c7
-rw-r--r--drivers/gpu/drm/radeon/si.c79
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c25
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_crtc.c27
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_gem.c4
-rw-r--r--drivers/gpu/drm/vc4/vc4_render_cl.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-corsair.c60
-rw-r--r--drivers/hid/hid-cypress.c3
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c9
-rw-r--r--drivers/hid/wacom_sys.c16
-rw-r--r--drivers/hid/wacom_wac.c10
-rw-r--r--drivers/i2c/busses/i2c-piix4.c22
-rw-r--r--drivers/i2c/i2c-core.c21
-rw-r--r--drivers/i2c/i2c-dev.c2
-rw-r--r--drivers/infiniband/core/cma.c3
-rw-r--r--drivers/infiniband/core/umem.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c11
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c21
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c9
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h24
-rw-r--r--drivers/infiniband/hw/cxgb4/provider.c33
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c147
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h2
-rw-r--r--drivers/infiniband/hw/i40iw/i40iw_verbs.c11
-rw-r--r--drivers/infiniband/hw/mlx5/main.c13
-rw-r--r--drivers/infiniband/hw/nes/nes_verbs.c12
-rw-r--r--drivers/infiniband/hw/qedr/main.c23
-rw-r--r--drivers/infiniband/hw/qedr/qedr.h8
-rw-r--r--drivers/infiniband/hw/qedr/qedr_cm.c14
-rw-r--r--drivers/infiniband/hw/qedr/verbs.c62
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c4
-rw-r--r--drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_net.c2
-rw-r--r--drivers/infiniband/sw/rxe/rxe_qp.c3
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.c11
-rw-r--r--drivers/infiniband/ulp/iser/iscsi_iser.h2
-rw-r--r--drivers/infiniband/ulp/iser/iser_verbs.c13
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c15
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/joystick/xpad.c6
-rw-r--r--drivers/input/misc/adxl34x-i2c.c4
-rw-r--r--drivers/input/mouse/alps.h2
-rw-r--r--drivers/input/mouse/synaptics_i2c.c4
-rw-r--r--drivers/input/rmi4/Kconfig3
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h6
-rw-r--r--drivers/input/touchscreen/elants_i2c.c4
-rw-r--r--drivers/isdn/hardware/eicon/message.c19
-rw-r--r--drivers/md/md.h8
-rw-r--r--drivers/md/raid0.c12
-rw-r--r--drivers/md/raid1.c275
-rw-r--r--drivers/md/raid10.c245
-rw-r--r--drivers/md/raid5-cache.c36
-rw-r--r--drivers/md/raid5.c7
-rw-r--r--drivers/media/cec/cec-adap.c103
-rw-r--r--drivers/media/dvb-core/dvb_net.c15
-rw-r--r--drivers/media/i2c/Kconfig1
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c33
-rw-r--r--drivers/media/i2c/tvp5150.c56
-rw-r--r--drivers/media/i2c/tvp5150_reg.h9
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.c8
-rw-r--r--drivers/media/pci/cobalt/cobalt-driver.h2
-rw-r--r--drivers/media/usb/dvb-usb/pctv452e.c133
-rw-r--r--drivers/memstick/core/memstick.c2
-rw-r--r--drivers/misc/mei/bus-fixup.c3
-rw-r--r--drivers/misc/mei/debugfs.c2
-rw-r--r--drivers/misc/mei/hbm.c4
-rw-r--r--drivers/misc/mei/hw.h6
-rw-r--r--drivers/misc/mei/mei_dev.h2
-rw-r--r--drivers/mmc/core/mmc_ops.c25
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/meson-gx-mmc.c8
-rw-r--r--drivers/mmc/host/mxs-mmc.c6
-rw-r--r--drivers/mmc/host/sdhci-acpi.c3
-rw-r--r--drivers/mtd/nand/Kconfig3
-rw-r--r--drivers/mtd/nand/lpc32xx_mlc.c2
-rw-r--r--drivers/mtd/nand/tango_nand.c4
-rw-r--r--drivers/mtd/nand/xway_nand.c5
-rw-r--r--drivers/net/appletalk/ipddp.c2
-rw-r--r--drivers/net/can/at91_can.c2
-rw-r--r--drivers/net/can/c_can/c_can.c2
-rw-r--r--drivers/net/can/c_can/c_can_pci.c1
-rw-r--r--drivers/net/can/dev.c136
-rw-r--r--drivers/net/can/flexcan.c2
-rw-r--r--drivers/net/can/ifi_canfd/ifi_canfd.c2
-rw-r--r--drivers/net/can/janz-ican3.c2
-rw-r--r--drivers/net/can/m_can/m_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_can.c2
-rw-r--r--drivers/net/can/rcar/rcar_canfd.c2
-rw-r--r--drivers/net/can/softing/softing_cs.c2
-rw-r--r--drivers/net/can/ti_hecc.c16
-rw-r--r--drivers/net/can/xilinx_can.c2
-rw-r--r--drivers/net/dsa/Makefile3
-rw-r--r--drivers/net/dsa/b53/b53_common.c99
-rw-r--r--drivers/net/dsa/b53/b53_mdio.c13
-rw-r--r--drivers/net/dsa/b53/b53_priv.h11
-rw-r--r--drivers/net/dsa/b53/b53_regs.h32
-rw-r--r--drivers/net/dsa/bcm_sf2.c192
-rw-r--r--drivers/net/dsa/bcm_sf2.h58
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c613
-rw-r--r--drivers/net/dsa/bcm_sf2_regs.h197
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.c461
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.c111
-rw-r--r--drivers/net/dsa/mv88e6xxx/global2.h33
-rw-r--r--drivers/net/dsa/mv88e6xxx/mv88e6xxx.h67
-rw-r--r--drivers/net/dsa/qca8k.c19
-rw-r--r--drivers/net/dsa/qca8k.h1
-rw-r--r--drivers/net/dummy.c217
-rw-r--r--drivers/net/ethernet/3com/typhoon.c2
-rw-r--r--drivers/net/ethernet/Kconfig2
-rw-r--r--drivers/net/ethernet/Makefile2
-rw-r--r--drivers/net/ethernet/adi/bfin_mac.c2
-rw-r--r--drivers/net/ethernet/agere/et131x.c2
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-common.h2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-dev.c8
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c4
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-pci.c15
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe.h2
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c2
-rw-r--r--drivers/net/ethernet/aquantia/Kconfig24
-rw-r--r--drivers/net/ethernet/aquantia/Makefile5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/Makefile42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_cfg.h77
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_common.h23
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c262
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h177
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c68
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h47
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.c273
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_main.h17
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c952
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.h110
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h46
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c343
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c375
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h157
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_rss.h26
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_utils.h50
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.c392
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_vec.h42
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c905
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h155
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c958
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h34
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h207
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c1394
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h677
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h2375
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c570
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h210
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/ver.h18
-rw-r--r--drivers/net/ethernet/arc/emac_main.c2
-rw-r--r--drivers/net/ethernet/atheros/alx/main.c13
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/ethernet/atheros/atlx/atl1.c2
-rw-r--r--drivers/net/ethernet/broadcom/b44.c2
-rw-r--r--drivers/net/ethernet/broadcom/bcm63xx_enet.c8
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c359
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h80
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c108
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-bcma.c6
-rw-r--r--drivers/net/ethernet/broadcom/bgmac-platform.c2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c27
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.h5
-rw-r--r--drivers/net/ethernet/broadcom/bnx2.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c199
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c126
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.h17
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c44
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h23
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h261
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c5
-rw-r--r--drivers/net/ethernet/broadcom/sb1250-mac.c2
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c34
-rw-r--r--drivers/net/ethernet/cadence/macb.h74
-rw-r--r--drivers/net/ethernet/calxeda/xgmac.c2
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_main.c43
-rw-r--r--drivers/net/ethernet/cavium/liquidio/lio_vf_main.c36
-rw-r--r--drivers/net/ethernet/cavium/liquidio/liquidio_common.h1
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_device.c4
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c5
-rw-r--r--drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h4
-rw-r--r--drivers/net/ethernet/cavium/octeon/octeon_mgmt.c2
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c41
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c5
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c19
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h16
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c106
-rw-r--r--drivers/net/ethernet/chelsio/cxgb/sge.c2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb3/sge.c4
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4.h1
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c54
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sched.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c57
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h25
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h12
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c2
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c4
-rw-r--r--drivers/net/ethernet/dec/tulip/interrupt.c6
-rw-r--r--drivers/net/ethernet/dnet.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c20
-rw-r--r--drivers/net/ethernet/ethoc.c4
-rw-r--r--drivers/net/ethernet/ezchip/nps_enet.c2
-rw-r--r--drivers/net/ethernet/freescale/dpaa/dpaa_eth.c2
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c2
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c6
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hip04_eth.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hisi_femac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hix5hd2_gmac.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns/hns_enet.c1
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c9
-rw-r--r--drivers/net/ethernet/ibm/ibmvnic.c2
-rw-r--r--drivers/net/ethernet/intel/e100.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c6
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c12
-rw-r--r--drivers/net/ethernet/intel/ixgb/ixgb_main.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_common.c2
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c7
-rw-r--r--drivers/net/ethernet/jme.c34
-rw-r--r--drivers/net/ethernet/jme.h6
-rw-r--r--drivers/net/ethernet/korina.c16
-rw-r--r--drivers/net/ethernet/lantiq_etop.c21
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c41
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c2
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c2
-rw-r--r--drivers/net/ethernet/marvell/skge.c63
-rw-r--r--drivers/net/ethernet/marvell/sky2.c74
-rw-r--r--drivers/net/ethernet/marvell/sky2.h1
-rw-r--r--drivers/net/ethernet/mediatek/mtk_eth_soc.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c38
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c15
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_port.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c23
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h11
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c170
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c64
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en.h21
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_clock.c225
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c45
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs.c91
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c120
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx.c66
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_stats.h40
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_tc.c324
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c109
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.h20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c13
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fs_core.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/fw.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/health.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/port.c72
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/vport.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/reg.h41
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.c112
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/switchx2.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/trap.h1
-rw-r--r--drivers/net/ethernet/micrel/ks8851.c7
-rw-r--r--drivers/net/ethernet/moxa/moxart_ether.c4
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c2
-rw-r--r--drivers/net/ethernet/natsemi/natsemi.c2
-rw-r--r--drivers/net/ethernet/neterion/s2io.c4
-rw-r--r--drivers/net/ethernet/neterion/vxge/vxge-main.c6
-rw-r--r--drivers/net/ethernet/netronome/nfp/nfp_net_common.c15
-rw-r--r--drivers/net/ethernet/nvidia/forcedeth.c2
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c2
-rw-r--r--drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c2
-rw-r--r--drivers/net/ethernet/pasemi/pasemi_mac.c2
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_l2.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.c88
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_ll2.h24
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_roce.c2
-rw-r--r--drivers/net/ethernet/qlogic/qed/qed_sriov.c7
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_fp.c6
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c10
-rw-r--r--drivers/net/ethernet/qlogic/qlge/qlge_main.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.c44
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-mac.h1
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.c12
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-phy.h13
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c2
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.c183
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac-sgmii.h25
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.c31
-rw-r--r--drivers/net/ethernet/qualcomm/emac/emac.h8
-rw-r--r--drivers/net/ethernet/realtek/atp.c7
-rw-r--r--drivers/net/ethernet/realtek/r8169.c2
-rw-r--r--drivers/net/ethernet/renesas/ravb.h10
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c166
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c99
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h42
-rw-r--r--drivers/net/ethernet/rocker/rocker_main.c2
-rw-r--r--drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c2
-rw-r--r--drivers/net/ethernet/sfc/ef10.c705
-rw-r--r--drivers/net/ethernet/sfc/ef10_sriov.c3
-rw-r--r--drivers/net/ethernet/sfc/efx.c6
-rw-r--r--drivers/net/ethernet/sfc/ethtool.c29
-rw-r--r--drivers/net/ethernet/sfc/falcon/efx.c2
-rw-r--r--drivers/net/ethernet/sfc/farch.c16
-rw-r--r--drivers/net/ethernet/sfc/filter.h41
-rw-r--r--drivers/net/ethernet/sfc/mcdi.c19
-rw-r--r--drivers/net/ethernet/sfc/net_driver.h14
-rw-r--r--drivers/net/ethernet/sfc/nic.h20
-rw-r--r--drivers/net/ethernet/sfc/siena.c28
-rw-r--r--drivers/net/ethernet/sfc/tx.c2
-rw-r--r--drivers/net/ethernet/smsc/smc91c92_cs.c6
-rw-r--r--drivers/net/ethernet/smsc/smsc9420.c2
-rw-r--r--drivers/net/ethernet/stmicro/Kconfig3
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig15
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c20
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c21
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c3
-rw-r--r--drivers/net/ethernet/sun/niu.c2
-rw-r--r--drivers/net/ethernet/sun/sungem.c2
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c2
-rw-r--r--drivers/net/ethernet/synopsys/Kconfig27
-rw-r--r--drivers/net/ethernet/synopsys/Makefile5
-rw-r--r--drivers/net/ethernet/synopsys/dwc_eth_qos.c2996
-rw-r--r--drivers/net/ethernet/tehuti/tehuti.c2
-rw-r--r--drivers/net/ethernet/ti/cpmac.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c202
-rw-r--r--drivers/net/ethernet/ti/davinci_cpdma.c3
-rw-r--r--drivers/net/ethernet/ti/davinci_emac.c2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c6
-rw-r--r--drivers/net/ethernet/tile/tilegx.c2
-rw-r--r--drivers/net/ethernet/tile/tilepro.c6
-rw-r--r--drivers/net/ethernet/toshiba/ps3_gelic_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/spider_net.c2
-rw-r--r--drivers/net/ethernet/toshiba/tc35815.c2
-rw-r--r--drivers/net/ethernet/tundra/tsi108_eth.c2
-rw-r--r--drivers/net/ethernet/via/via-rhine.c2
-rw-r--r--drivers/net/ethernet/via/via-velocity.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5100.c2
-rw-r--r--drivers/net/ethernet/wiznet/w5300.c2
-rw-r--r--drivers/net/ethernet/xilinx/xilinx_emaclite.c4
-rw-r--r--drivers/net/fjes/fjes_main.c2
-rw-r--r--drivers/net/gtp.c37
-rw-r--r--drivers/net/hyperv/hyperv_net.h216
-rw-r--r--drivers/net/hyperv/netvsc.c321
-rw-r--r--drivers/net/hyperv/netvsc_drv.c572
-rw-r--r--drivers/net/hyperv/rndis_filter.c338
-rw-r--r--drivers/net/ieee802154/at86rf230.c4
-rw-r--r--drivers/net/ieee802154/atusb.c59
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c7
-rw-r--r--drivers/net/irda/bfin_sir.c5
-rw-r--r--drivers/net/irda/sh_sir.c1
-rw-r--r--drivers/net/macvlan.c2
-rw-r--r--drivers/net/macvtap.c2
-rw-r--r--drivers/net/phy/Kconfig1
-rw-r--r--drivers/net/phy/bcm63xx.c21
-rw-r--r--drivers/net/phy/bcm7xxx.c36
-rw-r--r--drivers/net/phy/broadcom.c42
-rw-r--r--drivers/net/phy/dp83848.c3
-rw-r--r--drivers/net/phy/dp83867.c8
-rw-r--r--drivers/net/phy/marvell.c429
-rw-r--r--drivers/net/phy/mdio-gpio.c60
-rw-r--r--drivers/net/phy/micrel.c14
-rw-r--r--drivers/net/phy/phy.c15
-rw-r--r--drivers/net/phy/phy_led_triggers.c9
-rw-r--r--drivers/net/tun.c78
-rw-r--r--drivers/net/usb/cdc_ether.c10
-rw-r--r--drivers/net/usb/qmi_wwan.c7
-rw-r--r--drivers/net/usb/r8152.c117
-rw-r--r--drivers/net/virtio_net.c37
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c4
-rw-r--r--drivers/net/vrf.c4
-rw-r--r--drivers/net/vxlan.c27
-rw-r--r--drivers/net/wan/fsl_ucc_hdlc.c2
-rw-r--r--drivers/net/wan/hd64572.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c3
-rw-r--r--drivers/net/wireless/ath/ath10k/pci.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c2
-rw-r--r--drivers/net/wireless/marvell/mwifiex/cfg80211.c2
-rw-r--r--drivers/net/wireless/realtek/rtlwifi/usb.c1
-rw-r--r--drivers/net/xen-netback/interface.c8
-rw-r--r--drivers/net/xen-netback/xenbus.c13
-rw-r--r--drivers/net/xen-netfront.c4
-rw-r--r--drivers/nvdimm/namespace_devs.c23
-rw-r--r--drivers/nvdimm/pmem.c4
-rw-r--r--drivers/nvme/host/core.c7
-rw-r--r--drivers/nvme/host/fc.c11
-rw-r--r--drivers/nvme/host/nvme.h8
-rw-r--r--drivers/nvme/host/pci.c19
-rw-r--r--drivers/nvme/host/rdma.c15
-rw-r--r--drivers/nvme/target/configfs.c1
-rw-r--r--drivers/nvme/target/core.c15
-rw-r--r--drivers/nvme/target/fc.c36
-rw-r--r--drivers/nvme/target/nvmet.h1
-rw-r--r--drivers/nvme/target/rdma.c17
-rw-r--r--drivers/pci/host/pci-xgene-msi.c2
-rw-r--r--drivers/pci/host/pcie-designware.c10
-rw-r--r--drivers/pci/pci-driver.c6
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pinctrl/intel/pinctrl-baytrail.c39
-rw-r--r--drivers/pinctrl/intel/pinctrl-broxton.c2
-rw-r--r--drivers/pinctrl/intel/pinctrl-intel.c30
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxbb.c7
-rw-r--r--drivers/pinctrl/meson/pinctrl-meson-gxl.c7
-rw-r--r--drivers/pinctrl/pinctrl-amd.c2
-rw-r--r--drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c2
-rw-r--r--drivers/platform/x86/ideapad-laptop.c1
-rw-r--r--drivers/platform/x86/intel_mid_powerbtn.c2
-rw-r--r--drivers/platform/x86/mlx-platform.c2
-rw-r--r--drivers/platform/x86/surface3-wmi.c6
-rw-r--r--drivers/remoteproc/remoteproc_core.c29
-rw-r--r--drivers/rpmsg/rpmsg_core.c4
-rw-r--r--drivers/s390/net/qeth_core.h5
-rw-r--r--drivers/s390/net/qeth_core_main.c135
-rw-r--r--drivers/s390/net/qeth_core_mpc.h17
-rw-r--r--drivers/s390/net/qeth_l2_main.c189
-rw-r--r--drivers/s390/net/qeth_l3_main.c15
-rw-r--r--drivers/s390/net/qeth_l3_sys.c33
-rw-r--r--drivers/s390/virtio/virtio_ccw.c29
-rw-r--r--drivers/scsi/bfa/bfad.c6
-rw-r--r--drivers/scsi/bfa/bfad_bsg.c2
-rw-r--r--drivers/scsi/bfa/bfad_drv.h2
-rw-r--r--drivers/scsi/fnic/fnic.h1
-rw-r--r--drivers/scsi/fnic/fnic_scsi.c16
-rw-r--r--drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c9
-rw-r--r--drivers/scsi/lpfc/lpfc_els.c2
-rw-r--r--drivers/scsi/lpfc/lpfc_sli.c7
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_base.h12
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_scsih.c40
-rw-r--r--drivers/scsi/qedi/Kconfig2
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c21
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h6
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c92
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c37
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c5
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h3
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_nx2.h17
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c57
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h22
-rw-r--r--drivers/scsi/qla2xxx/qla_tmpl.c24
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c4
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h1
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd.c29
-rw-r--r--drivers/scsi/ses.c2
-rw-r--r--drivers/scsi/snic/snic_main.c3
-rw-r--r--drivers/soc/ti/wkup_m3_ipc.c1
-rw-r--r--drivers/spi/Kconfig1
-rw-r--r--drivers/spi/spi-armada-3700.c11
-rw-r--r--drivers/spi/spi-axi-spi-engine.c3
-rw-r--r--drivers/spi/spi-davinci.c4
-rw-r--r--drivers/spi/spi-dw-mid.c4
-rw-r--r--drivers/spi/spi-dw.c5
-rw-r--r--drivers/spi/spi-pxa2xx.c1
-rw-r--r--drivers/spi/spi-sh-msiof.c4
-rw-r--r--drivers/staging/ks7010/ks_hostif.c2
-rw-r--r--drivers/staging/netlogic/xlr_net.c1
-rw-r--r--drivers/staging/octeon/ethernet-rx.c2
-rw-r--r--drivers/staging/rtl8192e/rtllib_rx.c1
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c4
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c2
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c1
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c2
-rw-r--r--drivers/target/target_core_transport.c24
-rw-r--r--drivers/target/target_core_xcopy.c157
-rw-r--r--drivers/target/target_core_xcopy.h7
-rw-r--r--drivers/thermal/rockchip_thermal.c153
-rw-r--r--drivers/thermal/thermal_core.c10
-rw-r--r--drivers/tty/serial/8250/8250_core.c2
-rw-r--r--drivers/tty/serial/8250/8250_pci.c12
-rw-r--r--drivers/tty/serial/8250/8250_port.c2
-rw-r--r--drivers/tty/serial/atmel_serial.c22
-rw-r--r--drivers/tty/sysrq.c4
-rw-r--r--drivers/usb/dwc2/core.h4
-rw-r--r--drivers/usb/dwc2/gadget.c18
-rw-r--r--drivers/usb/dwc2/hcd.c7
-rw-r--r--drivers/usb/dwc2/params.c10
-rw-r--r--drivers/usb/dwc3/dwc3-exynos.c4
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_fs.c12
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.c3
-rw-r--r--drivers/usb/gadget/udc/atmel_usba_udc.h1
-rw-r--r--drivers/usb/host/xhci-plat.c2
-rw-r--r--drivers/usb/host/xhci-ring.c11
-rw-r--r--drivers/usb/host/xhci.c13
-rw-r--r--drivers/usb/musb/musb_debugfs.c20
-rw-r--r--drivers/usb/serial/ch341.c108
-rw-r--r--drivers/usb/serial/kl5kusb105.c9
-rw-r--r--drivers/usb/wusbcore/crypto.c3
-rw-r--r--drivers/vfio/vfio_iommu_spapr_tce.c4
-rw-r--r--drivers/vfio/vfio_iommu_type1.c4
-rw-r--r--drivers/vhost/net.c23
-rw-r--r--drivers/vhost/scsi.c4
-rw-r--r--drivers/vhost/vhost.c8
-rw-r--r--drivers/vhost/vsock.c13
-rw-r--r--drivers/video/fbdev/core/fbcmap.c26
-rw-r--r--drivers/virtio/virtio_mmio.c20
-rw-r--r--drivers/virtio/virtio_ring.c7
-rw-r--r--drivers/vme/bridges/vme_ca91cx42.c2
-rw-r--r--drivers/xen/platform-pci.c71
-rw-r--r--drivers/xen/swiotlb-xen.c5
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/afs/file.c2
-rw-r--r--fs/aio.c6
-rw-r--r--fs/binfmt_elf.c1
-rw-r--r--fs/block_dev.c6
-rw-r--r--fs/btrfs/async-thread.c15
-rw-r--r--fs/btrfs/extent-tree.c8
-rw-r--r--fs/btrfs/inode.c39
-rw-r--r--fs/btrfs/tree-log.c13
-rw-r--r--fs/btrfs/uuid-tree.c4
-rw-r--r--fs/ceph/addr.c4
-rw-r--r--fs/ceph/caps.c7
-rw-r--r--fs/ceph/dir.c5
-rw-r--r--fs/ceph/inode.c3
-rw-r--r--fs/ceph/mds_client.c14
-rw-r--r--fs/coredump.c18
-rw-r--r--fs/dax.c53
-rw-r--r--fs/dcache.c7
-rw-r--r--fs/direct-io.c3
-rw-r--r--fs/ext2/Kconfig1
-rw-r--r--fs/ext4/Kconfig1
-rw-r--r--fs/f2fs/segment.c4
-rw-r--r--fs/f2fs/super.c6
-rw-r--r--fs/fuse/dev.c3
-rw-r--r--fs/fuse/dir.c2
-rw-r--r--fs/libfs.c3
-rw-r--r--fs/namespace.c64
-rw-r--r--fs/nfs/nfs4proc.c29
-rw-r--r--fs/nfs/nfs4state.c1
-rw-r--r--fs/nfsd/nfs4xdr.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/ocfs2/stackglue.c6
-rw-r--r--fs/ocfs2/stackglue.h3
-rw-r--r--fs/overlayfs/namei.c27
-rw-r--r--fs/posix_acl.c9
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/proc_sysctl.c3
-rw-r--r--fs/romfs/super.c23
-rw-r--r--fs/ubifs/Kconfig2
-rw-r--r--fs/ubifs/dir.c58
-rw-r--r--fs/ubifs/ioctl.c3
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/tnc.c25
-rw-r--r--fs/userfaultfd.c37
-rw-r--r--fs/xfs/libxfs/xfs_ag_resv.c70
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c115
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h2
-rw-r--r--fs/xfs/libxfs/xfs_attr.c6
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c51
-rw-r--r--fs/xfs/libxfs/xfs_bmap.h6
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c39
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h8
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c90
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.h3
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c10
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/xfs_aops.c19
-rw-r--r--fs/xfs/xfs_bmap_util.c28
-rw-r--r--fs/xfs/xfs_buf.c1
-rw-r--r--fs/xfs/xfs_dquot.c4
-rw-r--r--fs/xfs/xfs_inode.c23
-rw-r--r--fs/xfs/xfs_iomap.c2
-rw-r--r--fs/xfs/xfs_iops.c50
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_log.c12
-rw-r--r--fs/xfs/xfs_mount.h1
-rw-r--r--fs/xfs/xfs_qm.c3
-rw-r--r--include/drm/drm_atomic.h2
-rw-r--r--include/drm/drm_mode_config.h2
-rw-r--r--include/kvm/arm_arch_timer.h1
-rw-r--r--include/linux/blkdev.h19
-rw-r--r--include/linux/bpf.h11
-rw-r--r--include/linux/bpf_trace.h7
-rw-r--r--include/linux/brcmphy.h18
-rw-r--r--include/linux/can/dev.h8
-rw-r--r--include/linux/coredump.h1
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/efi.h2
-rw-r--r--include/linux/etherdevice.h5
-rw-r--r--include/linux/filter.h6
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio/driver.h70
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/ieee80211.h2
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/if_frad.h2
-rw-r--r--include/linux/ipv6.h1
-rw-r--r--include/linux/jump_label_ratelimit.h5
-rw-r--r--include/linux/kernel.h4
-rw-r--r--include/linux/mdio.h2
-rw-r--r--include/linux/memcontrol.h26
-rw-r--r--include/linux/memory_hotplug.h4
-rw-r--r--include/linux/micrel_phy.h2
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/device.h79
-rw-r--r--include/linux/mlx5/driver.h14
-rw-r--r--include/linux/mlx5/mlx5_ifc.h215
-rw-r--r--include/linux/mlx5/vport.h1
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/mm_inline.h2
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/mroute.h57
-rw-r--r--include/linux/mroute6.h2
-rw-r--r--include/linux/netdevice.h46
-rw-r--r--include/linux/nmi.h1
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/perf_event.h1
-rw-r--r--include/linux/phy.h9
-rw-r--r--include/linux/phy_led_triggers.h4
-rw-r--r--include/linux/rcupdate.h4
-rw-r--r--include/linux/remoteproc.h4
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sctp.h27
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h4
-rw-r--r--include/linux/sunrpc/svc_xprt.h1
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swap.h3
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/timerfd.h20
-rw-r--r--include/linux/trace_events.h3
-rw-r--r--include/linux/virtio_net.h6
-rw-r--r--include/net/act_api.h1
-rw-r--r--include/net/addrconf.h4
-rw-r--r--include/net/cfg80211.h135
-rw-r--r--include/net/checksum.h2
-rw-r--r--include/net/dsa.h79
-rw-r--r--include/net/dst_ops.h9
-rw-r--r--include/net/flow_dissector.h19
-rw-r--r--include/net/if_inet6.h1
-rw-r--r--include/net/inet6_connection_sock.h5
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h10
-rw-r--r--include/net/inet_frag.h8
-rw-r--r--include/net/inet_hashtables.h15
-rw-r--r--include/net/inet_sock.h6
-rw-r--r--include/net/ip.h10
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/iw_handler.h66
-rw-r--r--include/net/lwtunnel.h19
-rw-r--r--include/net/mac80211.h6
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netfilter/nft_fib.h6
-rw-r--r--include/net/netns/ipv4.h5
-rw-r--r--include/net/netns/sctp.h3
-rw-r--r--include/net/pkt_cls.h8
-rw-r--r--include/net/psample.h36
-rw-r--r--include/net/rtnetlink.h4
-rw-r--r--include/net/sctp/constants.h4
-rw-r--r--include/net/sctp/sctp.h8
-rw-r--r--include/net/sctp/sm.h8
-rw-r--r--include/net/sctp/structs.h21
-rw-r--r--include/net/sock.h5
-rw-r--r--include/net/tc_act/tc_sample.h50
-rw-r--r--include/net/tcp.h43
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/xfrm.h4
-rw-r--r--include/rdma/ib_verbs.h14
-rw-r--r--include/scsi/libfc.h6
-rw-r--r--include/sound/hdmi-codec.h8
-rw-r--r--include/sound/soc.h3
-rw-r--r--include/target/target_core_base.h4
-rw-r--r--include/trace/events/bpf.h347
-rw-r--r--include/trace/events/btrfs.h146
-rw-r--r--include/trace/events/mmflags.h3
-rw-r--r--include/trace/events/xdp.h53
-rw-r--r--include/trace/trace_events.h8
-rw-r--r--include/uapi/linux/Kbuild7
-rw-r--r--include/uapi/linux/batman_adv.h2
-rw-r--r--include/uapi/linux/bpf.h23
-rw-r--r--include/uapi/linux/can/netlink.h7
-rw-r--r--include/uapi/linux/cec-funcs.h10
-rw-r--r--include/uapi/linux/if_link.h2
-rw-r--r--include/uapi/linux/igmp.h4
-rw-r--r--include/uapi/linux/ipv6.h1
-rw-r--r--include/uapi/linux/mpls.h30
-rw-r--r--include/uapi/linux/netfilter/nf_log.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h4
-rw-r--r--include/uapi/linux/nl80211.h68
-rw-r--r--include/uapi/linux/pkt_cls.h16
-rw-r--r--include/uapi/linux/psample.h35
-rw-r--r--include/uapi/linux/sctp.h18
-rw-r--r--include/uapi/linux/seg6.h2
-rw-r--r--include/uapi/linux/seg6_hmac.h1
-rw-r--r--include/uapi/linux/seg6_iptunnel.h4
-rw-r--r--include/uapi/linux/tc_act/Kbuild1
-rw-r--r--include/uapi/linux/tc_act/tc_bpf.h2
-rw-r--r--include/uapi/linux/tc_act/tc_sample.h26
-rw-r--r--include/uapi/linux/tcp.h3
-rw-r--r--include/uapi/linux/timerfd.h36
-rw-r--r--include/uapi/linux/tipc.h6
-rw-r--r--include/uapi/rdma/Kbuild1
-rw-r--r--include/uapi/rdma/cxgb3-abi.h2
-rw-r--r--include/uapi/rdma/mlx5-abi.h14
-rw-r--r--init/Kconfig4
-rw-r--r--ipc/sem.c2
-rw-r--r--kernel/bpf/Makefile2
-rw-r--r--kernel/bpf/arraymap.c20
-rw-r--r--kernel/bpf/core.c23
-rw-r--r--kernel/bpf/hashtab.c24
-rw-r--r--kernel/bpf/inode.c17
-rw-r--r--kernel/bpf/lpm_trie.c503
-rw-r--r--kernel/bpf/stackmap.c20
-rw-r--r--kernel/bpf/syscall.c53
-rw-r--r--kernel/bpf/verifier.c77
-rw-r--r--kernel/capability.c1
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/events/core.c175
-rw-r--r--kernel/jump_label.c7
-rw-r--r--kernel/memremap.c4
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/panic.c4
-rw-r--r--kernel/pid_namespace.c10
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/rcu.h1
-rw-r--r--kernel/rcu/tiny.c4
-rw-r--r--kernel/rcu/tiny_plugin.h9
-rw-r--r--kernel/rcu/tree.c33
-rw-r--r--kernel/rcu/tree_exp.h52
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c38
-rw-r--r--kernel/signal.c4
-rw-r--r--kernel/sysctl.c1
-rw-r--r--kernel/time/tick-sched.c9
-rw-r--r--kernel/time/tick-sched.h2
-rw-r--r--kernel/trace/bpf_trace.c56
-rw-r--r--kernel/trace/trace_output.c7
-rw-r--r--kernel/ucount.c14
-rw-r--r--kernel/watchdog.c9
-rw-r--r--kernel/watchdog_hld.c3
-rw-r--r--lib/Kconfig.debug2
-rw-r--r--lib/ioremap.c1
-rw-r--r--lib/iov_iter.c54
-rw-r--r--lib/radix-tree.c2
-rw-r--r--lib/swiotlb.c6
-rw-r--r--mm/filemap.c2
-rw-r--r--mm/huge_memory.c27
-rw-r--r--mm/hugetlb.c37
-rw-r--r--mm/khugepaged.c26
-rw-r--r--mm/memcontrol.c22
-rw-r--r--mm/memory.c41
-rw-r--r--mm/memory_hotplug.c28
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/page_alloc.c118
-rw-r--r--mm/slab.c8
-rw-r--r--mm/slub.c23
-rw-r--r--mm/swapfile.c20
-rw-r--r--mm/vmscan.c27
-rw-r--r--net/6lowpan/nhc.c8
-rw-r--r--net/Kconfig5
-rw-r--r--net/Makefile1
-rw-r--r--net/ax25/ax25_subr.c2
-rw-r--r--net/batman-adv/Makefile2
-rw-r--r--net/batman-adv/bat_algo.c2
-rw-r--r--net/batman-adv/bat_algo.h2
-rw-r--r--net/batman-adv/bat_iv_ogm.c2
-rw-r--r--net/batman-adv/bat_iv_ogm.h2
-rw-r--r--net/batman-adv/bat_v.c2
-rw-r--r--net/batman-adv/bat_v.h2
-rw-r--r--net/batman-adv/bat_v_elp.c2
-rw-r--r--net/batman-adv/bat_v_elp.h2
-rw-r--r--net/batman-adv/bat_v_ogm.c2
-rw-r--r--net/batman-adv/bat_v_ogm.h2
-rw-r--r--net/batman-adv/bitarray.c2
-rw-r--r--net/batman-adv/bitarray.h2
-rw-r--r--net/batman-adv/bridge_loop_avoidance.c3
-rw-r--r--net/batman-adv/bridge_loop_avoidance.h20
-rw-r--r--net/batman-adv/debugfs.c4
-rw-r--r--net/batman-adv/debugfs.h2
-rw-r--r--net/batman-adv/distributed-arp-table.c3
-rw-r--r--net/batman-adv/distributed-arp-table.h2
-rw-r--r--net/batman-adv/fragmentation.c12
-rw-r--r--net/batman-adv/fragmentation.h2
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/gateway_client.h2
-rw-r--r--net/batman-adv/gateway_common.c2
-rw-r--r--net/batman-adv/gateway_common.h2
-rw-r--r--net/batman-adv/hard-interface.c2
-rw-r--r--net/batman-adv/hard-interface.h2
-rw-r--r--net/batman-adv/hash.c2
-rw-r--r--net/batman-adv/hash.h2
-rw-r--r--net/batman-adv/icmp_socket.c2
-rw-r--r--net/batman-adv/icmp_socket.h2
-rw-r--r--net/batman-adv/log.c2
-rw-r--r--net/batman-adv/log.h2
-rw-r--r--net/batman-adv/main.c2
-rw-r--r--net/batman-adv/main.h4
-rw-r--r--net/batman-adv/multicast.c2
-rw-r--r--net/batman-adv/multicast.h2
-rw-r--r--net/batman-adv/netlink.c2
-rw-r--r--net/batman-adv/netlink.h2
-rw-r--r--net/batman-adv/network-coding.c2
-rw-r--r--net/batman-adv/network-coding.h2
-rw-r--r--net/batman-adv/originator.c2
-rw-r--r--net/batman-adv/originator.h2
-rw-r--r--net/batman-adv/packet.h2
-rw-r--r--net/batman-adv/routing.c11
-rw-r--r--net/batman-adv/routing.h2
-rw-r--r--net/batman-adv/send.c6
-rw-r--r--net/batman-adv/send.h2
-rw-r--r--net/batman-adv/soft-interface.c7
-rw-r--r--net/batman-adv/soft-interface.h2
-rw-r--r--net/batman-adv/sysfs.c2
-rw-r--r--net/batman-adv/sysfs.h2
-rw-r--r--net/batman-adv/tp_meter.c4
-rw-r--r--net/batman-adv/tp_meter.h2
-rw-r--r--net/batman-adv/translation-table.c4
-rw-r--r--net/batman-adv/translation-table.h2
-rw-r--r--net/batman-adv/tvlv.c2
-rw-r--r--net/batman-adv/tvlv.h2
-rw-r--r--net/batman-adv/types.h2
-rw-r--r--net/bridge/br_forward.c39
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c92
-rw-r--r--net/bridge/br_netlink.c38
-rw-r--r--net/bridge/br_private.h3
-rw-r--r--net/bridge/br_sysfs_if.c2
-rw-r--r--net/caif/chnl_net.c1
-rw-r--r--net/ceph/crypto.c2
-rw-r--r--net/core/dev.c48
-rw-r--r--net/core/ethtool.c41
-rw-r--r--net/core/filter.c207
-rw-r--r--net/core/flow_dissector.c61
-rw-r--r--net/core/lwt_bpf.c5
-rw-r--r--net/core/lwtunnel.c68
-rw-r--r--net/core/rtnetlink.c60
-rw-r--r--net/core/secure_seq.c4
-rw-r--r--net/core/skbuff.c22
-rw-r--r--net/core/sock.c6
-rw-r--r--net/dccp/ipv4.c1
-rw-r--r--net/dccp/ipv6.c6
-rw-r--r--net/dsa/Kconfig11
-rw-r--r--net/dsa/Makefile1
-rw-r--r--net/dsa/dsa.c49
-rw-r--r--net/dsa/dsa2.c155
-rw-r--r--net/dsa/dsa_priv.h25
-rw-r--r--net/dsa/hwmon.c147
-rw-r--r--net/dsa/slave.c367
-rw-r--r--net/dsa/tag_brcm.c11
-rw-r--r--net/dsa/tag_dsa.c10
-rw-r--r--net/dsa/tag_edsa.c10
-rw-r--r--net/dsa/tag_qca.c4
-rw-r--r--net/dsa/tag_trailer.c6
-rw-r--r--net/ethernet/eth.c28
-rw-r--r--net/ipv4/af_inet.c40
-rw-r--r--net/ipv4/ah4.c3
-rw-r--r--net/ipv4/esp4.c332
-rw-r--r--net/ipv4/fib_frontend.c8
-rw-r--r--net/ipv4/fib_semantics.c47
-rw-r--r--net/ipv4/inet_connection_sock.c278
-rw-r--r--net/ipv4/inet_diag.c2
-rw-r--r--net/ipv4/inet_hashtables.c19
-rw-r--r--net/ipv4/ip_output.c1
-rw-r--r--net/ipv4/ip_tunnel_core.c6
-rw-r--r--net/ipv4/ipmr.c255
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c7
-rw-r--r--net/ipv4/netfilter/ipt_rpfilter.c8
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c2
-rw-r--r--net/ipv4/netfilter/nft_fib_ipv4.c15
-rw-r--r--net/ipv4/ping.c6
-rw-r--r--net/ipv4/proc.c2
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/sysctl_net_ipv4.c70
-rw-r--r--net/ipv4/tcp.c54
-rw-r--r--net/ipv4/tcp_fastopen.c57
-rw-r--r--net/ipv4/tcp_input.c255
-rw-r--r--net/ipv4/tcp_ipv4.c15
-rw-r--r--net/ipv4/tcp_metrics.c2
-rw-r--r--net/ipv4/tcp_minisocks.c1
-rw-r--r--net/ipv4/tcp_output.c109
-rw-r--r--net/ipv4/tcp_recovery.c148
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/udp.c85
-rw-r--r--net/ipv4/xfrm4_state.c8
-rw-r--r--net/ipv6/addrconf.c113
-rw-r--r--net/ipv6/af_inet6.c3
-rw-r--r--net/ipv6/ah6.c3
-rw-r--r--net/ipv6/esp6.c318
-rw-r--r--net/ipv6/ila/ila_lwt.c3
-rw-r--r--net/ipv6/inet6_connection_sock.c42
-rw-r--r--net/ipv6/inet6_hashtables.c46
-rw-r--r--net/ipv6/ip6_fib.c3
-rw-r--r--net/ipv6/ip6_gre.c11
-rw-r--r--net/ipv6/ip6_offload.c1
-rw-r--r--net/ipv6/ip6_output.c4
-rw-r--r--net/ipv6/ip6_tunnel.c40
-rw-r--r--net/ipv6/ip6_vti.c4
-rw-r--r--net/ipv6/ip6mr.c9
-rw-r--r--net/ipv6/mcast.c51
-rw-r--r--net/ipv6/netfilter/ip6t_rpfilter.c8
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c3
-rw-r--r--net/ipv6/netfilter/nft_fib_ipv6.c13
-rw-r--r--net/ipv6/route.c86
-rw-r--r--net/ipv6/seg6.c2
-rw-r--r--net/ipv6/seg6_hmac.c48
-rw-r--r--net/ipv6/seg6_iptunnel.c7
-rw-r--r--net/ipv6/tcp_ipv6.c14
-rw-r--r--net/ipv6/udp.c29
-rw-r--r--net/iucv/af_iucv.c25
-rw-r--r--net/mac80211/cfg.c12
-rw-r--r--net/mac80211/chan.c7
-rw-r--r--net/mac80211/debugfs.c27
-rw-r--r--net/mac80211/debugfs_netdev.c3
-rw-r--r--net/mac80211/ieee80211_i.h6
-rw-r--r--net/mac80211/iface.c21
-rw-r--r--net/mac80211/main.c13
-rw-r--r--net/mac80211/mesh.c7
-rw-r--r--net/mac80211/mesh.h2
-rw-r--r--net/mac80211/mesh_plink.c14
-rw-r--r--net/mac80211/mesh_sync.c27
-rw-r--r--net/mac80211/mlme.c4
-rw-r--r--net/mac80211/rc80211_minstrel.c21
-rw-r--r--net/mac80211/rc80211_minstrel.h33
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c24
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c68
-rw-r--r--net/mac80211/rc80211_minstrel_ht.h6
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c32
-rw-r--r--net/mac80211/rx.c42
-rw-r--r--net/mac80211/scan.c8
-rw-r--r--net/mac80211/sta_info.c22
-rw-r--r--net/mac80211/status.c8
-rw-r--r--net/mac80211/tx.c149
-rw-r--r--net/mac80211/vht.c8
-rw-r--r--net/mac80211/wep.c3
-rw-r--r--net/mac80211/wpa.c3
-rw-r--r--net/mpls/af_mpls.c227
-rw-r--r--net/mpls/internal.h58
-rw-r--r--net/mpls/mpls_iptunnel.c14
-rw-r--r--net/netfilter/Kconfig2
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c7
-rw-r--r--net/netfilter/nf_conntrack_core.c44
-rw-r--r--net/netfilter/nf_log.c1
-rw-r--r--net/netfilter/nf_tables_api.c67
-rw-r--r--net/netfilter/nft_dynset.c3
-rw-r--r--net/netfilter/nft_log.c3
-rw-r--r--net/netfilter/nft_lookup.c3
-rw-r--r--net/netfilter/nft_objref.c6
-rw-r--r--net/netfilter/nft_set_hash.c2
-rw-r--r--net/netfilter/nft_set_rbtree.c2
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/openvswitch/actions.c42
-rw-r--r--net/openvswitch/conntrack.c6
-rw-r--r--net/packet/af_packet.c4
-rw-r--r--net/psample/Kconfig15
-rw-r--r--net/psample/Makefile5
-rw-r--r--net/psample/psample.c301
-rw-r--r--net/qrtr/qrtr.c4
-rw-r--r--net/rfkill/core.c100
-rw-r--r--net/sched/Kconfig12
-rw-r--r--net/sched/Makefile1
-rw-r--r--net/sched/act_api.c50
-rw-r--r--net/sched/act_bpf.c5
-rw-r--r--net/sched/act_sample.c274
-rw-r--r--net/sched/cls_bpf.c4
-rw-r--r--net/sched/cls_flower.c91
-rw-r--r--net/sched/sch_fq_codel.c6
-rw-r--r--net/sctp/associola.c12
-rw-r--r--net/sctp/debug.c5
-rw-r--r--net/sctp/endpointola.c1
-rw-r--r--net/sctp/ipv6.c3
-rw-r--r--net/sctp/offload.c2
-rw-r--r--net/sctp/outqueue.c35
-rw-r--r--net/sctp/primitive.c3
-rw-r--r--net/sctp/protocol.c3
-rw-r--r--net/sctp/sm_make_chunk.c138
-rw-r--r--net/sctp/sm_sideeffect.c32
-rw-r--r--net/sctp/sm_statefuns.c41
-rw-r--r--net/sctp/sm_statetable.c40
-rw-r--r--net/sctp/socket.c130
-rw-r--r--net/sctp/stream.c79
-rw-r--r--net/sctp/transport.c17
-rw-r--r--net/smc/smc_clc.c10
-rw-r--r--net/smc/smc_core.c5
-rw-r--r--net/smc/smc_ib.h4
-rw-r--r--net/socket.c2
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c2
-rw-r--r--net/sunrpc/svc_xprt.c10
-rw-r--r--net/sunrpc/xprtrdma/svc_rdma_recvfrom.c2
-rw-r--r--net/tipc/bcast.c200
-rw-r--r--net/tipc/bcast.h33
-rw-r--r--net/tipc/bearer.c15
-rw-r--r--net/tipc/bearer.h8
-rw-r--r--net/tipc/discover.c4
-rw-r--r--net/tipc/link.c14
-rw-r--r--net/tipc/msg.c33
-rw-r--r--net/tipc/msg.h11
-rw-r--r--net/tipc/name_distr.c2
-rw-r--r--net/tipc/name_table.c38
-rw-r--r--net/tipc/name_table.h9
-rw-r--r--net/tipc/node.c36
-rw-r--r--net/tipc/node.h4
-rw-r--r--net/tipc/server.c48
-rw-r--r--net/tipc/socket.c64
-rw-r--r--net/tipc/subscr.c124
-rw-r--r--net/tipc/subscr.h1
-rw-r--r--net/tipc/udp_media.c8
-rw-r--r--net/unix/af_unix.c27
-rw-r--r--net/wireless/Makefile1
-rw-r--r--net/wireless/core.c3
-rw-r--r--net/wireless/core.h8
-rw-r--r--net/wireless/mlme.c29
-rw-r--r--net/wireless/nl80211.c152
-rw-r--r--net/wireless/nl80211.h10
-rw-r--r--net/wireless/of.c138
-rw-r--r--net/wireless/reg.c27
-rw-r--r--net/wireless/scan.c9
-rw-r--r--net/wireless/sme.c72
-rw-r--r--net/wireless/sysfs.c6
-rw-r--r--net/wireless/util.c32
-rw-r--r--net/wireless/wext-core.c67
-rw-r--r--net/wireless/wext-sme.c23
-rw-r--r--net/xfrm/xfrm_input.c12
-rw-r--r--net/xfrm/xfrm_output.c8
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--net/xfrm/xfrm_state.c86
-rw-r--r--samples/bpf/map_perf_test_kern.c30
-rw-r--r--samples/bpf/map_perf_test_user.c49
-rw-r--r--samples/bpf/sock_example.h2
-rw-r--r--samples/bpf/tc_l2_redirect_kern.c1
-rw-r--r--samples/bpf/trace_output_user.c1
-rw-r--r--samples/bpf/xdp_tx_iptunnel_kern.c1
-rw-r--r--samples/vfio-mdev/mtty.c23
-rw-r--r--security/selinux/hooks.c3
-rw-r--r--sound/soc/codecs/nau8825.c9
-rw-r--r--sound/soc/codecs/nau8825.h7
-rw-r--r--sound/soc/codecs/rt5645.c3
-rw-r--r--sound/soc/codecs/tlv320aic3x.c13
-rw-r--r--sound/soc/codecs/wm_adsp.c25
-rw-r--r--sound/soc/dwc/designware_i2s.c25
-rw-r--r--sound/soc/fsl/fsl_ssi.c74
-rw-r--r--sound/soc/intel/boards/bytcr_rt5640.c18
-rw-r--r--sound/soc/intel/skylake/skl-pcm.c3
-rw-r--r--sound/soc/intel/skylake/skl-sst.c3
-rw-r--r--sound/soc/sh/rcar/core.c4
-rw-r--r--sound/soc/soc-core.c10
-rw-r--r--sound/soc/soc-pcm.c4
-rw-r--r--sound/soc/soc-topology.c3
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/lib/subcmd/parse-options.c3
-rw-r--r--tools/lib/subcmd/parse-options.h5
-rw-r--r--tools/lib/traceevent/event-parse.c34
-rw-r--r--tools/lib/traceevent/event-parse.h1
-rw-r--r--tools/lib/traceevent/plugin_sched_switch.c4
-rw-r--r--tools/perf/Documentation/perf-record.txt4
-rw-r--r--tools/perf/Makefile.perf4
-rw-r--r--tools/perf/builtin-kmem.c1
-rw-r--r--tools/perf/builtin-record.c4
-rw-r--r--tools/perf/builtin-sched.c17
-rw-r--r--tools/perf/util/probe-event.c166
-rw-r--r--tools/perf/util/probe-finder.c15
-rw-r--r--tools/perf/util/probe-finder.h3
-rw-r--r--tools/perf/util/scripting-engines/trace-event-perl.c1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c1
-rw-r--r--tools/perf/util/symbol-elf.c6
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/bpf/.gitignore1
-rw-r--r--tools/testing/selftests/bpf/Makefile4
-rwxr-xr-xtools/testing/selftests/bpf/test_kmod.sh2
-rw-r--r--tools/testing/selftests/bpf/test_lpm_map.c358
-rw-r--r--tools/testing/selftests/bpf/test_lru_map.c53
-rw-r--r--tools/testing/selftests/bpf/test_tag.c202
-rw-r--r--tools/testing/selftests/bpf/test_verifier.c524
-rw-r--r--tools/testing/selftests/net/psock_lib.h39
-rwxr-xr-xtools/testing/selftests/net/run_netsocktests2
-rw-r--r--tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c2
-rw-r--r--tools/testing/selftests/x86/protection_keys.c2
-rw-r--r--tools/virtio/ringtest/main.h12
-rwxr-xr-xtools/virtio/ringtest/run-on-all.sh5
-rw-r--r--virt/kvm/arm/arch_timer.c26
-rw-r--r--virt/kvm/arm/hyp/timer-sr.c33
-rw-r--r--virt/kvm/arm/vgic/vgic-init.c18
-rw-r--r--virt/kvm/arm/vgic/vgic-v2.c2
-rw-r--r--virt/kvm/arm/vgic/vgic-v3.c2
-rw-r--r--virt/lib/irqbypass.c4
1369 files changed, 35232 insertions, 12841 deletions
diff --git a/.mailmap b/.mailmap
index 02d2614..67dc22f 100644
--- a/.mailmap
+++ b/.mailmap
@@ -137,6 +137,7 @@ Ricardo Ribalda Delgado <ricardo.ribalda@gmail.com>
Rudolf Marek <R.Marek@sh.cvut.cz>
Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
+Sarangdhar Joshi <spjoshi@codeaurora.org>
Sam Ravnborg <sam@mars.ravnborg.org>
Santosh Shilimkar <ssantosh@kernel.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
@@ -150,10 +151,13 @@ Shuah Khan <shuah@kernel.org> <shuah.kh@samsung.com>
Simon Kelley <simon@thekelleys.org.uk>
Stéphane Witzmann <stephane.witzmann@ubpmes.univ-bpclermont.fr>
Stephen Hemminger <shemminger@osdl.org>
+Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
+Subhash Jadavani <subhashj@codeaurora.org>
Sudeep Holla <sudeep.holla@arm.com> Sudeep KarkadaNagesha <sudeep.karkadanagesha@arm.com>
Sumit Semwal <sumit.semwal@ti.com>
Tejun Heo <htejun@gmail.com>
Thomas Graf <tgraf@suug.ch>
+Thomas Pedersen <twp@codeaurora.org>
Tony Luck <tony.luck@intel.com>
Tsuneo Yoshioka <Tsuneo.Yoshioka@f-secure.com>
Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
diff --git a/Documentation/ABI/testing/sysfs-devices-deferred_probe b/Documentation/ABI/testing/sysfs-devices-deferred_probe
deleted file mode 100644
index 58553d7..0000000
--- a/Documentation/ABI/testing/sysfs-devices-deferred_probe
+++ /dev/null
@@ -1,12 +0,0 @@
-What: /sys/devices/.../deferred_probe
-Date: August 2016
-Contact: Ben Hutchings <ben.hutchings@codethink.co.uk>
-Description:
- The /sys/devices/.../deferred_probe attribute is
- present for all devices. If a driver detects during
- probing a device that a related device is not yet
- ready, it may defer probing of the first device. The
- kernel will retry probing the first device after any
- other device is successfully probed. This attribute
- reads as 1 if probing of this device is currently
- deferred, or 0 otherwise.
diff --git a/Documentation/devicetree/bindings/i2c/i2c.txt b/Documentation/devicetree/bindings/i2c/i2c.txt
index 5fa691e..cee9d50 100644
--- a/Documentation/devicetree/bindings/i2c/i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/i2c.txt
@@ -62,6 +62,9 @@ wants to support one of the below features, it should adapt the bindings below.
"irq" and "wakeup" names are recognized by I2C core, other names are
left to individual drivers.
+- host-notify
+ device uses SMBus host notify protocol instead of interrupt line.
+
- multi-master
states that there is another master active on this bus. The OS can use
this information to adapt power management to keep the arbitration awake
@@ -81,6 +84,11 @@ Binding may contain optional "interrupts" property, describing interrupts
used by the device. I2C core will assign "irq" interrupt (or the very first
interrupt if not using interrupt names) as primary interrupt for the slave.
+Alternatively, devices supporting SMbus Host Notify, and connected to
+adapters that support this feature, may use "host-notify" property. I2C
+core will create a virtual interrupt for Host Notify and assign it as
+primary interrupt for the slave.
+
Also, if device is marked as a wakeup source, I2C core will set up "wakeup"
interrupt for the device. If "wakeup" interrupt name is not present in the
binding, then primary interrupt will be used as wakeup interrupt.
diff --git a/Documentation/devicetree/bindings/mtd/tango-nand.txt b/Documentation/devicetree/bindings/mtd/tango-nand.txt
index ad5a02f..cd1bf2a 100644
--- a/Documentation/devicetree/bindings/mtd/tango-nand.txt
+++ b/Documentation/devicetree/bindings/mtd/tango-nand.txt
@@ -5,7 +5,7 @@ Required properties:
- compatible: "sigma,smp8758-nand"
- reg: address/size of nfc_reg, nfc_mem, and pbus_reg
- dmas: reference to the DMA channel used by the controller
-- dma-names: "nfc_sbox"
+- dma-names: "rxtx"
- clocks: reference to the system clock
- #address-cells: <1>
- #size-cells: <0>
@@ -17,9 +17,9 @@ Example:
nandc: nand-controller@2c000 {
compatible = "sigma,smp8758-nand";
- reg = <0x2c000 0x30 0x2d000 0x800 0x20000 0x1000>;
+ reg = <0x2c000 0x30>, <0x2d000 0x800>, <0x20000 0x1000>;
dmas = <&dma0 3>;
- dma-names = "nfc_sbox";
+ dma-names = "rxtx";
clocks = <&clkgen SYS_CLK>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index fb40891..9a734d8 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -2,7 +2,7 @@
Required properties:
-- compatible: should be "brcm,bcm7445-switch-v4.0"
+- compatible: should be "brcm,bcm7445-switch-v4.0" or "brcm,bcm7278-switch-v4.0"
- reg: addresses and length of the register sets for the device, must be 6
pairs of register addresses and lengths
- interrupts: interrupts for the devices, must be two interrupts
@@ -41,6 +41,13 @@ Optional properties:
Admission Control Block supports reporting the number of packets in-flight in a
switch queue
+Port subnodes:
+
+Optional properties:
+
+- brcm,use-bcm-hdr: boolean property, if present, indicates that the switch
+ port has Broadcom tags enabled (per-packet metadata)
+
Example:
switch_top@f0b00000 {
@@ -114,6 +121,7 @@ switch_top@f0b00000 {
port@0 {
label = "gphy";
reg = <0>;
+ brcm,use-bcm-hdr;
};
...
};
diff --git a/Documentation/devicetree/bindings/net/brcm,systemport.txt b/Documentation/devicetree/bindings/net/brcm,systemport.txt
index 877da34..83f29e0 100644
--- a/Documentation/devicetree/bindings/net/brcm,systemport.txt
+++ b/Documentation/devicetree/bindings/net/brcm,systemport.txt
@@ -1,7 +1,10 @@
* Broadcom BCM7xxx Ethernet Systemport Controller (SYSTEMPORT)
Required properties:
-- compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
+- compatible: should be one of:
+ "brcm,systemport-v1.00"
+ "brcm,systemportlite-v1.00" or
+ "brcm,systemport"
- reg: address and length of the register set for the device.
- interrupts: interrupts for the device, first cell must be for the rx
interrupts, and the second cell should be for the transmit queues. An
diff --git a/Documentation/devicetree/bindings/net/dsa/dsa.txt b/Documentation/devicetree/bindings/net/dsa/dsa.txt
index a4a570f..cfe8f64 100644
--- a/Documentation/devicetree/bindings/net/dsa/dsa.txt
+++ b/Documentation/devicetree/bindings/net/dsa/dsa.txt
@@ -34,13 +34,9 @@ Required properties:
Each port children node must have the following mandatory properties:
- reg : Describes the port address in the switch
-- label : Describes the label associated with this port, which
- will become the netdev name. Special labels are
- "cpu" to indicate a CPU port and "dsa" to
- indicate an uplink/downlink port between switches in
- the cluster.
-A port labelled "dsa" has the following mandatory property:
+An uplink/downlink port between switches in the cluster has the following
+mandatory property:
- link : Should be a list of phandles to other switch's DSA
port. This port is used as the outgoing port
@@ -48,12 +44,17 @@ A port labelled "dsa" has the following mandatory property:
information must be given, not just the one hop
routes to neighbouring switches.
-A port labelled "cpu" has the following mandatory property:
+A CPU port has the following mandatory property:
- ethernet : Should be a phandle to a valid Ethernet device node.
This host device is what the switch port is
connected to.
+A user port has the following optional property:
+
+- label : Describes the label associated with this port, which
+ will become the netdev name.
+
Port child nodes may also contain the following optional standardised
properties, described in binding documents:
@@ -107,7 +108,6 @@ linked into one DSA cluster.
switch0port5: port@5 {
reg = <5>;
- label = "dsa";
phy-mode = "rgmii-txid";
link = <&switch1port6
&switch2port9>;
@@ -119,7 +119,6 @@ linked into one DSA cluster.
port@6 {
reg = <6>;
- label = "cpu";
ethernet = <&fec1>;
fixed-link {
speed = <100>;
@@ -165,7 +164,6 @@ linked into one DSA cluster.
switch1port5: port@5 {
reg = <5>;
- label = "dsa";
link = <&switch2port9>;
phy-mode = "rgmii-txid";
fixed-link {
@@ -176,7 +174,6 @@ linked into one DSA cluster.
switch1port6: port@6 {
reg = <6>;
- label = "dsa";
phy-mode = "rgmii-txid";
link = <&switch0port5>;
fixed-link {
@@ -255,7 +252,6 @@ linked into one DSA cluster.
switch2port9: port@9 {
reg = <9>;
- label = "dsa";
phy-mode = "rgmii-txid";
link = <&switch1port5
&switch0port5>;
diff --git a/Documentation/devicetree/bindings/net/dsa/marvell.txt b/Documentation/devicetree/bindings/net/dsa/marvell.txt
index b3dd6b4..7ef9dbb 100644
--- a/Documentation/devicetree/bindings/net/dsa/marvell.txt
+++ b/Documentation/devicetree/bindings/net/dsa/marvell.txt
@@ -14,9 +14,9 @@ The properties described here are those specific to Marvell devices.
Additional required and optional properties can be found in dsa.txt.
Required properties:
-- compatible : Should be one of "marvell,mv88e6085" or
- "marvell,mv88e6190"
-- reg : Address on the MII bus for the switch.
+- compatible : Should be one of "marvell,mv88e6085" or
+ "marvell,mv88e6190"
+- reg : Address on the MII bus for the switch.
Optional properties:
@@ -26,30 +26,67 @@ Optional properties:
- interrupt-controller : Indicates the switch is itself an interrupt
controller. This is used for the PHY interrupts.
#interrupt-cells = <2> : Controller uses two cells, number and flag
-- mdio : container of PHY and devices on the switches MDIO
- bus
+- mdio : Container of PHY and devices on the switches MDIO
+ bus.
+- mdio? : Container of PHYs and devices on the external MDIO
+ bus. The node must contains a compatible string of
+ "marvell,mv88e6xxx-mdio-external"
+
Example:
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- interrupt-parent = <&gpio0>;
- interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
- interrupt-controller;
- #interrupt-cells = <2>;
-
- switch0: switch@0 {
- compatible = "marvell,mv88e6085";
- reg = <0>;
- reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
- };
- mdio {
- #address-cells = <1>;
- #size-cells = <0>;
- switch1phy0: switch1phy0@0 {
- reg = <0>;
- interrupt-parent = <&switch0>;
- interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
- };
- };
- };
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ switch0: switch@0 {
+ compatible = "marvell,mv88e6085";
+ reg = <0>;
+ reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+ };
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ interrupt-parent = <&switch0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+ };
+
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ interrupt-parent = <&gpio0>;
+ interrupts = <27 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ switch0: switch@0 {
+ compatible = "marvell,mv88e6390";
+ reg = <0>;
+ reset-gpios = <&gpio5 1 GPIO_ACTIVE_LOW>;
+ };
+ mdio {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy0: switch1phy0@0 {
+ reg = <0>;
+ interrupt-parent = <&switch0>;
+ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ };
+ };
+
+ mdio1 {
+ compatible = "marvell,mv88e6xxx-mdio-external";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ switch1phy9: switch1phy0@9 {
+ reg = <9>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/net/mediatek-net.txt b/Documentation/devicetree/bindings/net/mediatek-net.txt
index c010faf..c7194e8 100644
--- a/Documentation/devicetree/bindings/net/mediatek-net.txt
+++ b/Documentation/devicetree/bindings/net/mediatek-net.txt
@@ -7,7 +7,7 @@ have dual GMAC each represented by a child node..
* Ethernet controller node
Required properties:
-- compatible: Should be "mediatek,mt7623-eth"
+- compatible: Should be "mediatek,mt2701-eth"
- reg: Address and length of the register set for the device
- interrupts: Should contain the three frame engines interrupts in numeric
order. These are fe_int0, fe_int1 and fe_int2.
diff --git a/Documentation/devicetree/bindings/net/meson-dwmac.txt b/Documentation/devicetree/bindings/net/meson-dwmac.txt
index 89e62dd..0703ad3 100644
--- a/Documentation/devicetree/bindings/net/meson-dwmac.txt
+++ b/Documentation/devicetree/bindings/net/meson-dwmac.txt
@@ -25,6 +25,22 @@ Required properties on Meson8b and newer:
- "clkin0" - first parent clock of the internal mux
- "clkin1" - second parent clock of the internal mux
+Optional properties on Meson8b and newer:
+- amlogic,tx-delay-ns: The internal RGMII TX clock delay (provided
+ by this driver) in nanoseconds. Allowed values
+ are: 0ns, 2ns, 4ns, 6ns.
+ When phy-mode is set to "rgmii" then the TX
+ delay should be explicitly configured. When
+ not configured a fallback of 2ns is used.
+ When the phy-mode is set to either "rgmii-id"
+ or "rgmii-txid" the TX clock delay is already
+ provided by the PHY. In that case this
+ property should be set to 0ns (which disables
+ the TX clock delay in the MAC to prevent the
+ clock from going off because both PHY and MAC
+ are adding a delay).
+ Any configuration is ignored when the phy-mode
+ is set to "rmii".
Example for Meson6:
diff --git a/Documentation/devicetree/bindings/net/phy.txt b/Documentation/devicetree/bindings/net/phy.txt
index ff1bc4b..fb5056b 100644
--- a/Documentation/devicetree/bindings/net/phy.txt
+++ b/Documentation/devicetree/bindings/net/phy.txt
@@ -19,8 +19,9 @@ Optional Properties:
specifications. If neither of these are specified, the default is to
assume clause 22.
- If the phy's identifier is known then the list may contain an entry
- of the form: "ethernet-phy-idAAAA.BBBB" where
+ If the PHY reports an incorrect ID (or none at all) then the
+ "compatible" list may contain an entry with the correct PHY ID in the
+ form: "ethernet-phy-idAAAA.BBBB" where
AAAA - The value of the 16 bit Phy Identifier 1 register as
4 hex digits. This is the chip vendor OUI bits 3:18
BBBB - The value of the 16 bit Phy Identifier 2 register as
diff --git a/Documentation/devicetree/bindings/net/ti,dp83867.txt b/Documentation/devicetree/bindings/net/ti,dp83867.txt
index 85bf945..afe9630 100644
--- a/Documentation/devicetree/bindings/net/ti,dp83867.txt
+++ b/Documentation/devicetree/bindings/net/ti,dp83867.txt
@@ -3,9 +3,11 @@
Required properties:
- reg - The ID number for the phy, usually a small integer
- ti,rx-internal-delay - RGMII Receive Clock Delay - see dt-bindings/net/ti-dp83867.h
- for applicable values
+ for applicable values. Required only if interface type is
+ PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_RXID
- ti,tx-internal-delay - RGMII Transmit Clock Delay - see dt-bindings/net/ti-dp83867.h
- for applicable values
+ for applicable values. Required only if interface type is
+ PHY_INTERFACE_MODE_RGMII_ID or PHY_INTERFACE_MODE_RGMII_TXID
- ti,fifo-depth - Transmitt FIFO depth- see dt-bindings/net/ti-dp83867.h
for applicable values
diff --git a/Documentation/devicetree/bindings/net/wireless/ieee80211.txt b/Documentation/devicetree/bindings/net/wireless/ieee80211.txt
new file mode 100644
index 0000000..f6442b1
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/wireless/ieee80211.txt
@@ -0,0 +1,24 @@
+Common IEEE 802.11 properties
+
+This provides documentation of common properties that are valid for all wireless
+devices.
+
+Optional properties:
+ - ieee80211-freq-limit : list of supported frequency ranges in KHz. This can be
+ used for devices that in a given config support less channels than
+ normally. It may happen chipset supports a wide wireless band but it is
+ limited to some part of it due to used antennas or power amplifier.
+ An example case for this can be tri-band wireless router with two
+ identical chipsets used for two different 5 GHz subbands. Using them
+ incorrectly could not work or decrease performance noticeably.
+
+Example:
+
+pcie@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ wifi@0,0 {
+ reg = <0x0000 0 0 0 0>;
+ ieee80211-freq-limit = <2402000 2482000>,
+ <5170000 5250000>;
+ };
+};
diff --git a/Documentation/devicetree/bindings/spi/sh-msiof.txt b/Documentation/devicetree/bindings/spi/sh-msiof.txt
index da6614c..dc97506 100644
--- a/Documentation/devicetree/bindings/spi/sh-msiof.txt
+++ b/Documentation/devicetree/bindings/spi/sh-msiof.txt
@@ -1,17 +1,23 @@
Renesas MSIOF spi controller
Required properties:
-- compatible : "renesas,msiof-<soctype>" for SoCs,
- "renesas,sh-msiof" for SuperH, or
- "renesas,sh-mobile-msiof" for SH Mobile series.
- Examples with soctypes are:
- "renesas,msiof-r8a7790" (R-Car H2)
+- compatible : "renesas,msiof-r8a7790" (R-Car H2)
"renesas,msiof-r8a7791" (R-Car M2-W)
"renesas,msiof-r8a7792" (R-Car V2H)
"renesas,msiof-r8a7793" (R-Car M2-N)
"renesas,msiof-r8a7794" (R-Car E2)
"renesas,msiof-r8a7796" (R-Car M3-W)
"renesas,msiof-sh73a0" (SH-Mobile AG5)
+ "renesas,sh-mobile-msiof" (generic SH-Mobile compatibile device)
+ "renesas,rcar-gen2-msiof" (generic R-Car Gen2 compatible device)
+ "renesas,rcar-gen3-msiof" (generic R-Car Gen3 compatible device)
+ "renesas,sh-msiof" (deprecated)
+
+ When compatible with the generic version, nodes
+ must list the SoC-specific version corresponding
+ to the platform first followed by the generic
+ version.
+
- reg : A list of offsets and lengths of the register sets for
the device.
If only one register set is present, it is to be used
@@ -61,7 +67,8 @@ Documentation/devicetree/bindings/pinctrl/renesas,*.
Example:
msiof0: spi@e6e20000 {
- compatible = "renesas,msiof-r8a7791";
+ compatible = "renesas,msiof-r8a7791",
+ "renesas,rcar-gen2-msiof";
reg = <0 0xe6e20000 0 0x0064>;
interrupts = <0 156 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp0_clks R8A7791_CLK_MSIOF0>;
diff --git a/Documentation/driver-api/80211/cfg80211.rst b/Documentation/driver-api/80211/cfg80211.rst
index b1e149e..eca534a 100644
--- a/Documentation/driver-api/80211/cfg80211.rst
+++ b/Documentation/driver-api/80211/cfg80211.rst
@@ -45,6 +45,9 @@ Device registration
:functions: wiphy_new
.. kernel-doc:: include/net/cfg80211.h
+ :functions: wiphy_read_of_freq_limits
+
+.. kernel-doc:: include/net/cfg80211.h
:functions: wiphy_register
.. kernel-doc:: include/net/cfg80211.h
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 72624a1..c94b467 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -212,10 +212,11 @@ asynchronous manner and the value may not be very precise. To see a precise
snapshot of a moment, you can see /proc/<pid>/smaps file and scan page table.
It's slow but very precise.
-Table 1-2: Contents of the status files (as of 4.1)
+Table 1-2: Contents of the status files (as of 4.8)
..............................................................................
Field Content
Name filename of the executable
+ Umask file mode creation mask
State state (R is running, S is sleeping, D is sleeping
in an uninterruptible wait, Z is zombie,
T is traced or stopped)
@@ -226,7 +227,6 @@ Table 1-2: Contents of the status files (as of 4.1)
TracerPid PID of process tracing this process (0 if not)
Uid Real, effective, saved set, and file system UIDs
Gid Real, effective, saved set, and file system GIDs
- Umask file mode creation mask
FDSize number of file descriptor slots currently allocated
Groups supplementary group list
NStgid descendant namespace thread group ID hierarchy
@@ -236,6 +236,7 @@ Table 1-2: Contents of the status files (as of 4.1)
VmPeak peak virtual memory size
VmSize total program size
VmLck locked memory size
+ VmPin pinned memory size
VmHWM peak resident set size ("high water mark")
VmRSS size of memory portions. It contains the three
following parts (VmRSS = RssAnon + RssFile + RssShmem)
diff --git a/Documentation/networking/dsa/dsa.txt b/Documentation/networking/dsa/dsa.txt
index 63912ef3..b8b4075 100644
--- a/Documentation/networking/dsa/dsa.txt
+++ b/Documentation/networking/dsa/dsa.txt
@@ -295,7 +295,6 @@ DSA currently leverages the following subsystems:
- MDIO/PHY library: drivers/net/phy/phy.c, mdio_bus.c
- Switchdev: net/switchdev/*
- Device Tree for various of_* functions
-- HWMON: drivers/hwmon/*
MDIO/PHY library
----------------
@@ -349,12 +348,6 @@ Documentation/devicetree/bindings/net/dsa/dsa.txt. PHY/MDIO library helper
functions such as of_get_phy_mode(), of_phy_connect() are also used to query
per-port PHY specific details: interface connection, MDIO bus location etc..
-HWMON
------
-
-Some switch drivers feature internal temperature sensors which are exposed as
-regular HWMON devices in /sys/class/hwmon/.
-
Driver development
==================
@@ -495,23 +488,6 @@ Power management
BR_STATE_DISABLED and propagating changes to the hardware if this port is
disabled while being a bridge member
-Hardware monitoring
--------------------
-
-These callbacks are only available if CONFIG_NET_DSA_HWMON is enabled:
-
-- get_temp: this function queries the given switch for its temperature
-
-- get_temp_limit: this function returns the switch current maximum temperature
- limit
-
-- set_temp_limit: this function configures the maximum temperature limit allowed
-
-- get_temp_alarm: this function returns the critical temperature threshold
- returning an alarm notification
-
-See Documentation/hwmon/sysfs-interface for details.
-
Bridge layer
------------
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 7dd65c9..fc73eeb 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -246,21 +246,12 @@ tcp_dsack - BOOLEAN
Allows TCP to send "duplicate" SACKs.
tcp_early_retrans - INTEGER
- Enable Early Retransmit (ER), per RFC 5827. ER lowers the threshold
- for triggering fast retransmit when the amount of outstanding data is
- small and when no previously unsent data can be transmitted (such
- that limited transmit could be used). Also controls the use of
- Tail loss probe (TLP) that converts RTOs occurring due to tail
- losses into fast recovery (draft-dukkipati-tcpm-tcp-loss-probe-01).
+ Tail loss probe (TLP) converts RTOs occurring due to tail
+ losses into fast recovery (draft-ietf-tcpm-rack). Note that
+ TLP requires RACK to function properly (see tcp_recovery below)
Possible values:
- 0 disables ER
- 1 enables ER
- 2 enables ER but delays fast recovery and fast retransmit
- by a fourth of RTT. This mitigates connection falsely
- recovers when network has a small degree of reordering
- (less than 3 packets).
- 3 enables delayed ER and TLP.
- 4 enables TLP only.
+ 0 disables TLP
+ 3 or 4 enables TLP
Default: 3
tcp_ecn - INTEGER
@@ -712,18 +703,6 @@ tcp_thin_linear_timeouts - BOOLEAN
Documentation/networking/tcp-thin.txt
Default: 0
-tcp_thin_dupack - BOOLEAN
- Enable dynamic triggering of retransmissions after one dupACK
- for thin streams. If set, a check is performed upon reception
- of a dupACK to determine if the stream is thin (less than 4
- packets in flight). As long as the stream is found to be thin,
- data is retransmitted on the first received dupACK. This
- improves retransmission latency for non-aggressive thin
- streams, often found to be time-dependent.
- For more information on thin streams, see
- Documentation/networking/tcp-thin.txt
- Default: 0
-
tcp_limit_output_bytes - INTEGER
Controls TCP Small Queue limit per tcp socket.
TCP bulk sender tends to increase packets in flight until it
@@ -742,6 +721,13 @@ tcp_challenge_ack_limit - INTEGER
UDP variables:
+udp_l3mdev_accept - BOOLEAN
+ Enabling this option allows a "global" bound socket to work
+ across L3 master domains (e.g., VRFs) with packets capable of
+ being received regardless of the L3 domain in which they
+ originated. Only valid when the kernel was compiled with
+ CONFIG_NET_L3_MASTER_DEV.
+
udp_mem - vector of 3 INTEGERs: min, pressure, max
Number of pages allowed for queueing by all UDP sockets.
@@ -843,6 +829,15 @@ ip_local_reserved_ports - list of comma separated ranges
Default: Empty
+ip_unprivileged_port_start - INTEGER
+ This is a per-namespace sysctl. It defines the first
+ unprivileged port in the network namespace. Privileged ports
+ require root or CAP_NET_BIND_SERVICE in order to bind to them.
+ To disable all privileged ports, set this to 0. It may not
+ overlap with the ip_local_reserved_ports range.
+
+ Default: 1024
+
ip_nonlocal_bind - BOOLEAN
If set, allows processes to bind() to non-local IP addresses,
which can be quite useful - but may break some applications.
diff --git a/Documentation/networking/regulatory.txt b/Documentation/networking/regulatory.txt
index 356f791..7818b5f 100644
--- a/Documentation/networking/regulatory.txt
+++ b/Documentation/networking/regulatory.txt
@@ -156,12 +156,12 @@ struct ieee80211_regdomain mydriver_jp_regdom = {
//.alpha2 = "99", /* If I have no alpha2 to map it to */
.reg_rules = {
/* IEEE 802.11b/g, channels 1..14 */
- REG_RULE(2412-20, 2484+20, 40, 6, 20, 0),
+ REG_RULE(2412-10, 2484+10, 40, 6, 20, 0),
/* IEEE 802.11a, channels 34..48 */
- REG_RULE(5170-20, 5240+20, 40, 6, 20,
+ REG_RULE(5170-10, 5240+10, 40, 6, 20,
NL80211_RRF_NO_IR),
/* IEEE 802.11a, channels 52..64 */
- REG_RULE(5260-20, 5320+20, 40, 6, 20,
+ REG_RULE(5260-10, 5320+10, 40, 6, 20,
NL80211_RRF_NO_IR|
NL80211_RRF_DFS),
}
@@ -205,7 +205,7 @@ the data in regdb.c as an alternative to using CRDA.
The file net/wireless/db.txt should be kept up-to-date with the db.txt
file available in the git repository here:
- git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless-regdb.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/sforshee/wireless-regdb.git
Again, most users in most situations should be using the CRDA package
provided with their distribution, and in most other situations users
diff --git a/Documentation/networking/vrf.txt b/Documentation/networking/vrf.txt
index 755dab8..3918dae 100644
--- a/Documentation/networking/vrf.txt
+++ b/Documentation/networking/vrf.txt
@@ -98,10 +98,11 @@ VRF device:
or to specify the output device using cmsg and IP_PKTINFO.
-TCP services running in the default VRF context (ie., not bound to any VRF
-device) can work across all VRF domains by enabling the tcp_l3mdev_accept
-sysctl option:
+TCP & UDP services running in the default VRF context (ie., not bound
+to any VRF device) can work across all VRF domains by enabling the
+tcp_l3mdev_accept and udp_l3mdev_accept sysctl options:
sysctl -w net.ipv4.tcp_l3mdev_accept=1
+ sysctl -w net.ipv4.udp_l3mdev_accept=1
netfilter rules on the VRF device can be used to limit access to services
running in the default VRF context as well.
diff --git a/Documentation/power/states.txt b/Documentation/power/states.txt
index 8a39ce4..008ecb5 100644
--- a/Documentation/power/states.txt
+++ b/Documentation/power/states.txt
@@ -35,9 +35,7 @@ only one way to cause the system to go into the Suspend-To-RAM state (write
The default suspend mode (ie. the one to be used without writing anything into
/sys/power/mem_sleep) is either "deep" (if Suspend-To-RAM is supported) or
"s2idle", but it can be overridden by the value of the "mem_sleep_default"
-parameter in the kernel command line. On some ACPI-based systems, depending on
-the information in the FADT, the default may be "s2idle" even if Suspend-To-RAM
-is supported.
+parameter in the kernel command line.
The properties of all of the sleep states are described below.
diff --git a/Documentation/vm/page_frags b/Documentation/vm/page_frags
new file mode 100644
index 0000000..a671456
--- /dev/null
+++ b/Documentation/vm/page_frags
@@ -0,0 +1,42 @@
+Page fragments
+--------------
+
+A page fragment is an arbitrary-length arbitrary-offset area of memory
+which resides within a 0 or higher order compound page. Multiple
+fragments within that page are individually refcounted, in the page's
+reference counter.
+
+The page_frag functions, page_frag_alloc and page_frag_free, provide a
+simple allocation framework for page fragments. This is used by the
+network stack and network device drivers to provide a backing region of
+memory for use as either an sk_buff->head, or to be used in the "frags"
+portion of skb_shared_info.
+
+In order to make use of the page fragment APIs a backing page fragment
+cache is needed. This provides a central point for the fragment allocation
+and tracks allows multiple calls to make use of a cached page. The
+advantage to doing this is that multiple calls to get_page can be avoided
+which can be expensive at allocation time. However due to the nature of
+this caching it is required that any calls to the cache be protected by
+either a per-cpu limitation, or a per-cpu limitation and forcing interrupts
+to be disabled when executing the fragment allocation.
+
+The network stack uses two separate caches per CPU to handle fragment
+allocation. The netdev_alloc_cache is used by callers making use of the
+__netdev_alloc_frag and __netdev_alloc_skb calls. The napi_alloc_cache is
+used by callers of the __napi_alloc_frag and __napi_alloc_skb calls. The
+main difference between these two calls is the context in which they may be
+called. The "netdev" prefixed functions are usable in any context as these
+functions will disable interrupts, while the "napi" prefixed functions are
+only usable within the softirq context.
+
+Many network device drivers use a similar methodology for allocating page
+fragments, but the page fragments are cached at the ring or descriptor
+level. In order to enable these cases it is necessary to provide a generic
+way of tearing down a page cache. For this reason __page_frag_cache_drain
+was implemented. It allows for freeing multiple references from a single
+page via a single call. The advantage to doing this is that it allows for
+cleaning up the multiple references that were added to a page in order to
+avoid calling get_page per allocation.
+
+Alexander Duyck, Nov 29, 2016.
diff --git a/MAINTAINERS b/MAINTAINERS
index 0b5c80e..300d2ec 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -81,7 +81,6 @@ Descriptions of section entries:
Q: Patchwork web based patch tracking system site
T: SCM tree type and location.
Type is one of: git, hg, quilt, stgit, topgit
- B: Bug tracking system location.
S: Status, one of the following:
Supported: Someone is actually paid to look after this.
Maintained: Someone actually looks after it.
@@ -977,6 +976,7 @@ M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
W: http://www.armlinux.org.uk/
S: Maintained
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git
F: arch/arm/
ARM SUB-ARCHITECTURES
@@ -1154,6 +1154,7 @@ ARM/CLKDEV SUPPORT
M: Russell King <linux@armlinux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
S: Maintained
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git clkdev
F: arch/arm/include/asm/clkdev.h
F: drivers/clk/clkdev.c
@@ -1689,6 +1690,7 @@ M: Krzysztof Kozlowski <krzk@kernel.org>
R: Javier Martinez Canillas <javier@osg.samsung.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
L: linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
+Q: https://patchwork.kernel.org/project/linux-samsung-soc/list/
S: Maintained
F: arch/arm/boot/dts/s3c*
F: arch/arm/boot/dts/s5p*
@@ -2194,14 +2196,6 @@ L: alsa-devel@alsa-project.org (moderated for non-subscribers)
S: Supported
F: sound/soc/atmel
-ATMEL DMA DRIVER
-M: Nicolas Ferre <nicolas.ferre@atmel.com>
-L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S: Supported
-F: drivers/dma/at_hdmac.c
-F: drivers/dma/at_hdmac_regs.h
-F: include/linux/platform_data/dma-atmel.h
-
ATMEL XDMA DRIVER
M: Ludovic Desroches <ludovic.desroches@atmel.com>
L: linux-arm-kernel@lists.infradead.org
@@ -3579,7 +3573,7 @@ F: drivers/infiniband/hw/cxgb3/
F: include/uapi/rdma/cxgb3-abi.h
CXGB4 ETHERNET DRIVER (CXGB4)
-M: Hariprasad S <hariprasad@chelsio.com>
+M: Ganesh Goudar <ganeshgr@chelsio.com>
L: netdev@vger.kernel.org
W: http://www.chelsio.com
S: Supported
@@ -4112,18 +4106,24 @@ F: drivers/gpu/drm/bridge/
DRM DRIVER FOR BOCHS VIRTUAL GPU
M: Gerd Hoffmann <kraxel@redhat.com>
-S: Odd Fixes
+L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
+S: Maintained
F: drivers/gpu/drm/bochs/
DRM DRIVER FOR QEMU'S CIRRUS DEVICE
M: Dave Airlie <airlied@redhat.com>
-S: Odd Fixes
+M: Gerd Hoffmann <kraxel@redhat.com>
+L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
+S: Obsolete
+W: https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
F: drivers/gpu/drm/cirrus/
RADEON and AMDGPU DRM DRIVERS
M: Alex Deucher <alexander.deucher@amd.com>
M: Christian König <christian.koenig@amd.com>
-L: dri-devel@lists.freedesktop.org
+L: amd-gfx@lists.freedesktop.org
T: git git://people.freedesktop.org/~agd5f/linux
S: Supported
F: drivers/gpu/drm/radeon/
@@ -4159,7 +4159,7 @@ F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
-L: igvt-g-dev@lists.01.org
+L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
@@ -4310,7 +4310,10 @@ F: Documentation/devicetree/bindings/display/renesas,du.txt
DRM DRIVER FOR QXL VIRTUAL GPU
M: Dave Airlie <airlied@redhat.com>
-S: Odd Fixes
+M: Gerd Hoffmann <kraxel@redhat.com>
+L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
+S: Maintained
F: drivers/gpu/drm/qxl/
F: include/uapi/drm/qxl_drm.h
@@ -7712,8 +7715,10 @@ F: drivers/net/dsa/mv88e6xxx/
F: Documentation/devicetree/bindings/net/dsa/marvell.txt
MARVELL ARMADA DRM SUPPORT
-M: Russell King <rmk+kernel@armlinux.org.uk>
+M: Russell King <linux@armlinux.org.uk>
S: Maintained
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-devel
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-armada-fixes
F: drivers/gpu/drm/armada/
F: include/uapi/drm/armada_drm.h
F: Documentation/devicetree/bindings/display/armada/
@@ -8185,6 +8190,15 @@ S: Maintained
F: drivers/tty/serial/atmel_serial.c
F: include/linux/atmel_serial.h
+MICROCHIP / ATMEL DMA DRIVER
+M: Ludovic Desroches <ludovic.desroches@microchip.com>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: dmaengine@vger.kernel.org
+S: Supported
+F: drivers/dma/at_hdmac.c
+F: drivers/dma/at_hdmac_regs.h
+F: include/linux/platform_data/dma-atmel.h
+
MICROCHIP / ATMEL ISC DRIVER
M: Songjun Wu <songjun.wu@microchip.com>
L: linux-media@vger.kernel.org
@@ -8909,8 +8923,10 @@ S: Supported
F: drivers/nfc/nxp-nci
NXP TDA998X DRM DRIVER
-M: Russell King <rmk+kernel@armlinux.org.uk>
+M: Russell King <linux@armlinux.org.uk>
S: Supported
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-devel
+T: git git://git.armlinux.org.uk/~rmk/linux-arm.git drm-tda998x-fixes
F: drivers/gpu/drm/i2c/tda998x_drv.c
F: include/drm/i2c/tda998x.h
@@ -9957,6 +9973,13 @@ L: linuxppc-dev@lists.ozlabs.org
S: Maintained
F: drivers/block/ps3vram.c
+PSAMPLE PACKET SAMPLING SUPPORT:
+M: Yotam Gigi <yotamg@mellanox.com>
+S: Maintained
+F: net/psample
+F: include/net/psample.h
+F: include/uapi/linux/psample.h
+
PSTORE FILESYSTEM
M: Anton Vorontsov <anton@enomsg.org>
M: Colin Cross <ccross@android.com>
@@ -10865,13 +10888,6 @@ F: include/linux/dma/dw.h
F: include/linux/platform_data/dma-dw.h
F: drivers/dma/dw/
-SYNOPSYS DESIGNWARE ETHERNET QOS 4.10a driver
-M: Lars Persson <lars.persson@axis.com>
-L: netdev@vger.kernel.org
-S: Supported
-F: Documentation/devicetree/bindings/net/snps,dwc-qos-ethernet.txt
-F: drivers/net/ethernet/synopsys/dwc_eth_qos.c
-
SYNOPSYS DESIGNWARE I2C DRIVER
M: Jarkko Nikula <jarkko.nikula@linux.intel.com>
R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
@@ -13105,6 +13121,7 @@ M: David Airlie <airlied@linux.ie>
M: Gerd Hoffmann <kraxel@redhat.com>
L: dri-devel@lists.freedesktop.org
L: virtualization@lists.linux-foundation.org
+T: git git://git.kraxel.org/linux drm-qemu
S: Maintained
F: drivers/gpu/drm/virtio/
F: include/uapi/linux/virtio_gpu.h
@@ -13456,6 +13473,7 @@ F: arch/x86/
X86 PLATFORM DRIVERS
M: Darren Hart <dvhart@infradead.org>
+M: Andy Shevchenko <andy@infradead.org>
L: platform-driver-x86@vger.kernel.org
T: git git://git.infradead.org/users/dvhart/linux-platform-drivers-x86.git
S: Maintained
@@ -13627,6 +13645,7 @@ F: drivers/net/hamradio/z8530.h
ZBUD COMPRESSED PAGE ALLOCATOR
M: Seth Jennings <sjenning@redhat.com>
+M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
F: mm/zbud.c
@@ -13682,6 +13701,7 @@ F: Documentation/vm/zsmalloc.txt
ZSWAP COMPRESSED SWAP CACHING
M: Seth Jennings <sjenning@redhat.com>
+M: Dan Streetman <ddstreet@ieee.org>
L: linux-mm@kvack.org
S: Maintained
F: mm/zswap.c
diff --git a/Makefile b/Makefile
index 5f1a847..0988400 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
VERSION = 4
PATCHLEVEL = 10
SUBLEVEL = 0
-EXTRAVERSION = -rc3
-NAME = Roaring Lionus
+EXTRAVERSION = -rc5
+NAME = Anniversary Edition
# *DOCUMENTATION*
# To see a list of typical targets execute "make help"
diff --git a/arch/Kconfig b/arch/Kconfig
index 99839c2..bd04eac 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -781,4 +781,7 @@ config VMAP_STACK
the stack to map directly to the KASAN shadow map using a formula
that is incorrect if the stack is in vmalloc space.
+config ARCH_WANT_RELAX_ORDER
+ bool
+
source "kernel/gcov/Kconfig"
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index c75d290..283099c 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -29,7 +29,7 @@ config ARC
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_MEMBLOCK
- select HAVE_MOD_ARCH_SPECIFIC if ARC_DW2_UNWIND
+ select HAVE_MOD_ARCH_SPECIFIC
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HANDLE_DOMAIN_IRQ
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3410ff..5008021 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -67,7 +67,7 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_IC_PTAG_HI 0x1F
/* Bit val in IC_CTRL */
-#define IC_CTRL_CACHE_DISABLE 0x1
+#define IC_CTRL_DIS 0x1
/* Data cache related Auxiliary registers */
#define ARC_REG_DC_BCR 0x72 /* Build Config reg */
@@ -80,8 +80,9 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_DC_PTAG_HI 0x5F
/* Bit val in DC_CTRL */
-#define DC_CTRL_INV_MODE_FLUSH 0x40
-#define DC_CTRL_FLUSH_STATUS 0x100
+#define DC_CTRL_DIS 0x001
+#define DC_CTRL_INV_MODE_FLUSH 0x040
+#define DC_CTRL_FLUSH_STATUS 0x100
/*System-level cache (L2 cache) related Auxiliary registers */
#define ARC_REG_SLC_CFG 0x901
@@ -92,8 +93,8 @@ extern unsigned long perip_base, perip_end;
#define ARC_REG_SLC_RGN_END 0x916
/* Bit val in SLC_CONTROL */
+#define SLC_CTRL_DIS 0x001
#define SLC_CTRL_IM 0x040
-#define SLC_CTRL_DISABLE 0x001
#define SLC_CTRL_BUSY 0x100
#define SLC_CTRL_RGN_OP_INV 0x200
diff --git a/arch/arc/include/asm/entry-arcv2.h b/arch/arc/include/asm/entry-arcv2.h
index b5ff87e..aee1a77 100644
--- a/arch/arc/include/asm/entry-arcv2.h
+++ b/arch/arc/include/asm/entry-arcv2.h
@@ -16,6 +16,7 @@
;
; Now manually save: r12, sp, fp, gp, r25
+ PUSH r30
PUSH r12
; Saving pt_regs->sp correctly requires some extra work due to the way
@@ -72,6 +73,7 @@
POPAX AUX_USER_SP
1:
POP r12
+ POP r30
.endm
diff --git a/arch/arc/include/asm/module.h b/arch/arc/include/asm/module.h
index 6e91d8b..567590e 100644
--- a/arch/arc/include/asm/module.h
+++ b/arch/arc/include/asm/module.h
@@ -14,13 +14,13 @@
#include <asm-generic/module.h>
-#ifdef CONFIG_ARC_DW2_UNWIND
struct mod_arch_specific {
+#ifdef CONFIG_ARC_DW2_UNWIND
void *unw_info;
int unw_sec_idx;
+#endif
const char *secstr;
};
-#endif
#define MODULE_PROC_FAMILY "ARC700"
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 69095da..47111d5 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -84,7 +84,7 @@ struct pt_regs {
unsigned long fp;
unsigned long sp; /* user/kernel sp depending on where we came from */
- unsigned long r12;
+ unsigned long r12, r30;
/*------- Below list auto saved by h/w -----------*/
unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index cb954cd..c568a9d 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -31,6 +31,7 @@ extern int root_mountflags, end_mem;
void setup_processor(void);
void __init setup_arch_memory(void);
+long __init arc_get_mem_sz(void);
/* Helpers used in arc_*_mumbojumbo routines */
#define IS_AVAIL1(v, s) ((v) ? s : "")
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 994dca7..ecef0fb 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -77,20 +77,20 @@ void arc_init_IRQ(void)
static void arcv2_irq_mask(struct irq_data *data)
{
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 0);
}
static void arcv2_irq_unmask(struct irq_data *data)
{
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_ENABLE, 1);
}
void arcv2_irq_enable(struct irq_data *data)
{
/* set default priority */
- write_aux_reg(AUX_IRQ_SELECT, data->irq);
+ write_aux_reg(AUX_IRQ_SELECT, data->hwirq);
write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
/*
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index ce9deb9..8c1fd5c 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -57,7 +57,7 @@ static void arc_irq_mask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
- ienb &= ~(1 << data->irq);
+ ienb &= ~(1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
@@ -66,7 +66,7 @@ static void arc_irq_unmask(struct irq_data *data)
unsigned int ienb;
ienb = read_aux_reg(AUX_IENABLE);
- ienb |= (1 << data->irq);
+ ienb |= (1 << data->hwirq);
write_aux_reg(AUX_IENABLE, ienb);
}
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index 560c4af..9274f8a 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -10,6 +10,7 @@
#include <linux/smp.h>
#include <linux/irq.h>
+#include <linux/irqchip/chained_irq.h>
#include <linux/spinlock.h>
#include <soc/arc/mcip.h>
#include <asm/irqflags-arcv2.h>
@@ -221,10 +222,13 @@ static irq_hw_number_t idu_first_hwirq;
static void idu_cascade_isr(struct irq_desc *desc)
{
struct irq_domain *idu_domain = irq_desc_get_handler_data(desc);
+ struct irq_chip *core_chip = irq_desc_get_chip(desc);
irq_hw_number_t core_hwirq = irqd_to_hwirq(irq_desc_get_irq_data(desc));
irq_hw_number_t idu_hwirq = core_hwirq - idu_first_hwirq;
+ chained_irq_enter(core_chip, desc);
generic_handle_irq(irq_find_mapping(idu_domain, idu_hwirq));
+ chained_irq_exit(core_chip, desc);
}
static int idu_irq_map(struct irq_domain *d, unsigned int virq, irq_hw_number_t hwirq)
diff --git a/arch/arc/kernel/module.c b/arch/arc/kernel/module.c
index 42e964d..3d99a60 100644
--- a/arch/arc/kernel/module.c
+++ b/arch/arc/kernel/module.c
@@ -32,8 +32,8 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
#ifdef CONFIG_ARC_DW2_UNWIND
mod->arch.unw_sec_idx = 0;
mod->arch.unw_info = NULL;
- mod->arch.secstr = secstr;
#endif
+ mod->arch.secstr = secstr;
return 0;
}
@@ -113,8 +113,10 @@ int apply_relocate_add(Elf32_Shdr *sechdrs,
}
+#ifdef CONFIG_ARC_DW2_UNWIND
if (strcmp(module->arch.secstr+sechdrs[tgtsec].sh_name, ".eh_frame") == 0)
module->arch.unw_sec_idx = tgtsec;
+#endif
return 0;
diff --git a/arch/arc/mm/cache.c b/arch/arc/mm/cache.c
index ec86ac0..d408fa2 100644
--- a/arch/arc/mm/cache.c
+++ b/arch/arc/mm/cache.c
@@ -23,7 +23,7 @@
static int l2_line_sz;
static int ioc_exists;
-int slc_enable = 1, ioc_enable = 0;
+int slc_enable = 1, ioc_enable = 1;
unsigned long perip_base = ARC_UNCACHED_ADDR_SPACE; /* legacy value for boot */
unsigned long perip_end = 0xFFFFFFFF; /* legacy value */
@@ -271,7 +271,11 @@ void __cache_line_loop_v2(phys_addr_t paddr, unsigned long vaddr,
/*
* For ARC700 MMUv3 I-cache and D-cache flushes
- * Also reused for HS38 aliasing I-cache configuration
+ * - ARC700 programming model requires paddr and vaddr be passed in seperate
+ * AUX registers (*_IV*L and *_PTAG respectively) irrespective of whether the
+ * caches actually alias or not.
+ * - For HS38, only the aliasing I-cache configuration uses the PTAG reg
+ * (non aliasing I-cache version doesn't; while D-cache can't possibly alias)
*/
static inline
void __cache_line_loop_v3(phys_addr_t paddr, unsigned long vaddr,
@@ -458,6 +462,21 @@ static inline void __dc_entire_op(const int op)
__after_dc_op(op);
}
+static inline void __dc_disable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ __dc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | DC_CTRL_DIS);
+}
+
+static void __dc_enable(void)
+{
+ const int r = ARC_REG_DC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~DC_CTRL_DIS);
+}
+
/* For kernel mappings cache operation: index is same as paddr */
#define __dc_line_op_k(p, sz, op) __dc_line_op(p, p, sz, op)
@@ -483,6 +502,8 @@ static inline void __dc_line_op(phys_addr_t paddr, unsigned long vaddr,
#else
#define __dc_entire_op(op)
+#define __dc_disable()
+#define __dc_enable()
#define __dc_line_op(paddr, vaddr, sz, op)
#define __dc_line_op_k(paddr, sz, op)
@@ -597,6 +618,40 @@ noinline void slc_op(phys_addr_t paddr, unsigned long sz, const int op)
#endif
}
+noinline static void slc_entire_op(const int op)
+{
+ unsigned int ctrl, r = ARC_REG_SLC_CTRL;
+
+ ctrl = read_aux_reg(r);
+
+ if (!(op & OP_FLUSH)) /* i.e. OP_INV */
+ ctrl &= ~SLC_CTRL_IM; /* clear IM: Disable flush before Inv */
+ else
+ ctrl |= SLC_CTRL_IM;
+
+ write_aux_reg(r, ctrl);
+
+ write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
+
+ /* Important to wait for flush to complete */
+ while (read_aux_reg(r) & SLC_CTRL_BUSY);
+}
+
+static inline void arc_slc_disable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ slc_entire_op(OP_FLUSH_N_INV);
+ write_aux_reg(r, read_aux_reg(r) | SLC_CTRL_DIS);
+}
+
+static inline void arc_slc_enable(void)
+{
+ const int r = ARC_REG_SLC_CTRL;
+
+ write_aux_reg(r, read_aux_reg(r) & ~SLC_CTRL_DIS);
+}
+
/***********************************************************
* Exported APIs
*/
@@ -923,21 +978,54 @@ SYSCALL_DEFINE3(cacheflush, uint32_t, start, uint32_t, sz, uint32_t, flags)
return 0;
}
-void arc_cache_init(void)
+/*
+ * IO-Coherency (IOC) setup rules:
+ *
+ * 1. Needs to be at system level, so only once by Master core
+ * Non-Masters need not be accessing caches at that time
+ * - They are either HALT_ON_RESET and kick started much later or
+ * - if run on reset, need to ensure that arc_platform_smp_wait_to_boot()
+ * doesn't perturb caches or coherency unit
+ *
+ * 2. caches (L1 and SLC) need to be purged (flush+inv) before setting up IOC,
+ * otherwise any straggler data might behave strangely post IOC enabling
+ *
+ * 3. All Caches need to be disabled when setting up IOC to elide any in-flight
+ * Coherency transactions
+ */
+noinline void __init arc_ioc_setup(void)
{
- unsigned int __maybe_unused cpu = smp_processor_id();
- char str[256];
+ unsigned int ap_sz;
- printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+ /* Flush + invalidate + disable L1 dcache */
+ __dc_disable();
+
+ /* Flush + invalidate SLC */
+ if (read_aux_reg(ARC_REG_SLC_BCR))
+ slc_entire_op(OP_FLUSH_N_INV);
+
+ /* IOC Aperture start: TDB: handle non default CONFIG_LINUX_LINK_BASE */
+ write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
/*
- * Only master CPU needs to execute rest of function:
- * - Assume SMP so all cores will have same cache config so
- * any geomtry checks will be same for all
- * - IOC setup / dma callbacks only need to be setup once
+ * IOC Aperture size:
+ * decoded as 2 ^ (SIZE + 2) KB: so setting 0x11 implies 512M
+ * TBD: fix for PGU + 1GB of low mem
+ * TBD: fix for PAE
*/
- if (cpu)
- return;
+ ap_sz = order_base_2(arc_get_mem_sz()/1024) - 2;
+ write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, ap_sz);
+
+ write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
+ write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
+
+ /* Re-enable L1 dcache */
+ __dc_enable();
+}
+
+void __init arc_cache_init_master(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
if (IS_ENABLED(CONFIG_ARC_HAS_ICACHE)) {
struct cpuinfo_arc_cache *ic = &cpuinfo_arc700[cpu].icache;
@@ -985,30 +1073,14 @@ void arc_cache_init(void)
}
}
- if (is_isa_arcv2() && l2_line_sz && !slc_enable) {
-
- /* IM set : flush before invalidate */
- write_aux_reg(ARC_REG_SLC_CTRL,
- read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_IM);
+ /* Note that SLC disable not formally supported till HS 3.0 */
+ if (is_isa_arcv2() && l2_line_sz && !slc_enable)
+ arc_slc_disable();
- write_aux_reg(ARC_REG_SLC_INVALIDATE, 1);
-
- /* Important to wait for flush to complete */
- while (read_aux_reg(ARC_REG_SLC_CTRL) & SLC_CTRL_BUSY);
- write_aux_reg(ARC_REG_SLC_CTRL,
- read_aux_reg(ARC_REG_SLC_CTRL) | SLC_CTRL_DISABLE);
- }
+ if (is_isa_arcv2() && ioc_enable)
+ arc_ioc_setup();
if (is_isa_arcv2() && ioc_enable) {
- /* IO coherency base - 0x8z */
- write_aux_reg(ARC_REG_IO_COH_AP0_BASE, 0x80000);
- /* IO coherency aperture size - 512Mb: 0x8z-0xAz */
- write_aux_reg(ARC_REG_IO_COH_AP0_SIZE, 0x11);
- /* Enable partial writes */
- write_aux_reg(ARC_REG_IO_COH_PARTIAL, 1);
- /* Enable IO coherency */
- write_aux_reg(ARC_REG_IO_COH_ENABLE, 1);
-
__dma_cache_wback_inv = __dma_cache_wback_inv_ioc;
__dma_cache_inv = __dma_cache_inv_ioc;
__dma_cache_wback = __dma_cache_wback_ioc;
@@ -1022,3 +1094,20 @@ void arc_cache_init(void)
__dma_cache_wback = __dma_cache_wback_l1;
}
}
+
+void __ref arc_cache_init(void)
+{
+ unsigned int __maybe_unused cpu = smp_processor_id();
+ char str[256];
+
+ printk(arc_cache_mumbojumbo(0, str, sizeof(str)));
+
+ /*
+ * Only master CPU needs to execute rest of function:
+ * - Assume SMP so all cores will have same cache config so
+ * any geomtry checks will be same for all
+ * - IOC setup / dma callbacks only need to be setup once
+ */
+ if (!cpu)
+ arc_cache_init_master();
+}
diff --git a/arch/arc/mm/init.c b/arch/arc/mm/init.c
index 399e2f2..8c9415e 100644
--- a/arch/arc/mm/init.c
+++ b/arch/arc/mm/init.c
@@ -40,6 +40,11 @@ struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
EXPORT_SYMBOL(node_data);
#endif
+long __init arc_get_mem_sz(void)
+{
+ return low_mem_sz;
+}
+
/* User can over-ride above with "mem=nnn[KkMm]" in cmdline */
static int __init setup_mem_sz(char *str)
{
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 7327250..f10fe85 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -846,6 +846,7 @@ dtb-$(CONFIG_MACH_SUN8I) += \
sun8i-a83t-allwinner-h8homlet-v2.dtb \
sun8i-a83t-cubietruck-plus.dtb \
sun8i-h3-bananapi-m2-plus.dtb \
+ sun8i-h3-nanopi-m1.dtb \
sun8i-h3-nanopi-neo.dtb \
sun8i-h3-orangepi-2.dtb \
sun8i-h3-orangepi-lite.dtb \
diff --git a/arch/arm/boot/dts/am335x-icev2.dts b/arch/arm/boot/dts/am335x-icev2.dts
index 1463df3..8ed46f9 100644
--- a/arch/arm/boot/dts/am335x-icev2.dts
+++ b/arch/arm/boot/dts/am335x-icev2.dts
@@ -170,7 +170,6 @@
AM33XX_IOPAD(0x8fc, PIN_INPUT_PULLUP | MUX_MODE0) /* (G16) mmc0_dat0.mmc0_dat0 */
AM33XX_IOPAD(0x900, PIN_INPUT_PULLUP | MUX_MODE0) /* (G17) mmc0_clk.mmc0_clk */
AM33XX_IOPAD(0x904, PIN_INPUT_PULLUP | MUX_MODE0) /* (G18) mmc0_cmd.mmc0_cmd */
- AM33XX_IOPAD(0x960, PIN_INPUT_PULLUP | MUX_MODE5) /* (C15) spi0_cs1.mmc0_sdcd */
>;
};
diff --git a/arch/arm/boot/dts/bcm-nsp.dtsi b/arch/arm/boot/dts/bcm-nsp.dtsi
index b6142bd..15f07f9 100644
--- a/arch/arm/boot/dts/bcm-nsp.dtsi
+++ b/arch/arm/boot/dts/bcm-nsp.dtsi
@@ -160,7 +160,7 @@
axi {
compatible = "simple-bus";
- ranges = <0x00000000 0x18000000 0x0011c40a>;
+ ranges = <0x00000000 0x18000000 0x0011c40c>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/da850-evm.dts b/arch/arm/boot/dts/da850-evm.dts
index 41de15f..78492a0 100644
--- a/arch/arm/boot/dts/da850-evm.dts
+++ b/arch/arm/boot/dts/da850-evm.dts
@@ -99,6 +99,7 @@
#size-cells = <1>;
compatible = "m25p64";
spi-max-frequency = <30000000>;
+ m25p,fast-read;
reg = <0>;
partition@0 {
label = "U-Boot-SPL";
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 3a8579c..3e1f750 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1378,6 +1378,7 @@
phy-names = "sata-phy";
clocks = <&sata_ref_clk>;
ti,hwmods = "sata";
+ ports-implemented = <0x1>;
};
rtc: rtc@48838000 {
diff --git a/arch/arm/boot/dts/dra72-evm-revc.dts b/arch/arm/boot/dts/dra72-evm-revc.dts
index c3d939c..3f808a4 100644
--- a/arch/arm/boot/dts/dra72-evm-revc.dts
+++ b/arch/arm/boot/dts/dra72-evm-revc.dts
@@ -75,6 +75,6 @@
ti,rx-internal-delay = <DP83867_RGMIIDCTL_2_25_NS>;
ti,tx-internal-delay = <DP83867_RGMIIDCTL_250_PS>;
ti,fifo-depth = <DP83867_PHYCR_FIFO_DEPTH_8_B_NIB>;
- ti,min-output-imepdance;
+ ti,min-output-impedance;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
index 34887a1..47ba972 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_max.dtsi
@@ -319,8 +319,6 @@
compatible = "fsl,imx6q-nitrogen6_max-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_max-sgtl5000";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@@ -402,6 +400,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
index d80f21a..31d4cc6 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6_som2.dtsi
@@ -250,8 +250,6 @@
compatible = "fsl,imx6q-nitrogen6_som2-sgtl5000",
"fsl,imx-audio-sgtl5000";
model = "imx6q-nitrogen6_som2-sgtl5000";
- pinctrl-names = "default";
- pinctrl-0 = <&pinctrl_sgtl5000>;
ssi-controller = <&ssi1>;
audio-codec = <&codec>;
audio-routing =
@@ -320,6 +318,8 @@
codec: sgtl5000@0a {
compatible = "fsl,sgtl5000";
+ pinctrl-names = "default";
+ pinctrl-0 = <&pinctrl_sgtl5000>;
reg = <0x0a>;
clocks = <&clks IMX6QDL_CLK_CKO>;
VDDA-supply = <&reg_2p5v>;
diff --git a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
index da85984..38faa90 100644
--- a/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
+++ b/arch/arm/boot/dts/logicpd-som-lv-37xx-devkit.dts
@@ -158,7 +158,7 @@
&mmc1 {
interrupts-extended = <&intc 83 &omap3_pmx_core 0x11a>;
pinctrl-names = "default";
- pinctrl-0 = <&mmc1_pins &mmc1_cd>;
+ pinctrl-0 = <&mmc1_pins>;
wp-gpios = <&gpio4 30 GPIO_ACTIVE_HIGH>; /* gpio_126 */
cd-gpios = <&gpio4 14 IRQ_TYPE_LEVEL_LOW>; /* gpio_110 */
vmmc-supply = <&vmmc1>;
@@ -193,7 +193,8 @@
OMAP3_CORE1_IOPAD(0x214a, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat1.sdmmc1_dat1 */
OMAP3_CORE1_IOPAD(0x214c, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat2.sdmmc1_dat2 */
OMAP3_CORE1_IOPAD(0x214e, PIN_INPUT | MUX_MODE0) /* sdmmc1_dat3.sdmmc1_dat3 */
- OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 sdmmc1_wp*/
+ OMAP3_CORE1_IOPAD(0x2132, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_strobe.gpio_126 */
+ OMAP3_CORE1_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
>;
};
@@ -242,12 +243,6 @@
OMAP3_WKUP_IOPAD(0x2a16, PIN_OUTPUT | PIN_OFF_OUTPUT_LOW | MUX_MODE4) /* sys_boot6.gpio_8 */
>;
};
-
- mmc1_cd: pinmux_mmc1_cd {
- pinctrl-single,pins = <
- OMAP3_WKUP_IOPAD(0x212c, PIN_INPUT_PULLUP | MUX_MODE4) /* cam_d11.gpio_110 */
- >;
- };
};
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 7cd92ba..0844737 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -988,6 +988,7 @@
phy-names = "sata-phy";
clocks = <&sata_ref_clk>;
ti,hwmods = "sata";
+ ports-implemented = <0x1>;
};
dss: dss@58000000 {
diff --git a/arch/arm/boot/dts/qcom-mdm9615.dtsi b/arch/arm/boot/dts/qcom-mdm9615.dtsi
index 5ae4ec5..c852b69 100644
--- a/arch/arm/boot/dts/qcom-mdm9615.dtsi
+++ b/arch/arm/boot/dts/qcom-mdm9615.dtsi
@@ -357,7 +357,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
index 735914f..7cae328 100644
--- a/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
+++ b/arch/arm/boot/dts/sun6i-a31-hummingbird.dts
@@ -140,6 +140,10 @@
cpu-supply = <&reg_dcdc3>;
};
+&de {
+ status = "okay";
+};
+
&ehci0 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 2b26175..e78faaf 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -234,6 +234,7 @@
de: display-engine {
compatible = "allwinner,sun6i-a31-display-engine";
allwinner,pipelines = <&fe0>;
+ status = "disabled";
};
soc@01c00000 {
diff --git a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
index 5ea4915..10d3074 100644
--- a/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
+++ b/arch/arm/boot/dts/sun7i-a20-olinuxino-lime2-emmc.dts
@@ -56,7 +56,7 @@
};
&pio {
- mmc2_pins_nrst: mmc2@0 {
+ mmc2_pins_nrst: mmc2-rst-pin {
allwinner,pins = "PC16";
allwinner,function = "gpio_out";
allwinner,drive = <SUN4I_PINCTRL_10_MA>;
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index b01a438..b416abc 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -253,7 +253,8 @@ CONFIG_R8169=y
CONFIG_SH_ETH=y
CONFIG_SMSC911X=y
CONFIG_STMMAC_ETH=y
-CONFIG_SYNOPSYS_DWC_ETH_QOS=y
+CONFIG_STMMAC_PLATFORM=y
+CONFIG_DWMAC_DWC_QOS_ETH=y
CONFIG_TI_CPSW=y
CONFIG_XILINX_EMACLITE=y
CONFIG_AT803X_PHY=y
@@ -471,7 +472,7 @@ CONFIG_MESON_WATCHDOG=y
CONFIG_DW_WATCHDOG=y
CONFIG_DIGICOLOR_WATCHDOG=y
CONFIG_BCM2835_WDT=y
-CONFIG_BCM47XX_WATCHDOG=y
+CONFIG_BCM47XX_WDT=y
CONFIG_BCM7038_WDT=m
CONFIG_BCM_KONA_WDT=y
CONFIG_MFD_ACT8945A=y
@@ -893,7 +894,7 @@ CONFIG_BCM2835_MBOX=y
CONFIG_RASPBERRYPI_FIRMWARE=y
CONFIG_EFI_VARS=m
CONFIG_EFI_CAPSULE_LOADER=m
-CONFIG_CONFIG_BCM47XX_NVRAM=y
+CONFIG_BCM47XX_NVRAM=y
CONFIG_BCM47XX_SPROM=y
CONFIG_EXT4_FS=y
CONFIG_AUTOFS4_FS=y
diff --git a/arch/arm/configs/s3c2410_defconfig b/arch/arm/configs/s3c2410_defconfig
index 4364040..1e6c48d 100644
--- a/arch/arm/configs/s3c2410_defconfig
+++ b/arch/arm/configs/s3c2410_defconfig
@@ -86,9 +86,9 @@ CONFIG_IPV6_TUNNEL=m
CONFIG_NETFILTER=y
CONFIG_NF_CONNTRACK=m
CONFIG_NF_CONNTRACK_EVENTS=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_SCTP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
+CONFIG_NF_CT_PROTO_DCCP=y
+CONFIG_NF_CT_PROTO_SCTP=y
+CONFIG_NF_CT_PROTO_UDPLITE=y
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
diff --git a/arch/arm/include/asm/cputype.h b/arch/arm/include/asm/cputype.h
index 522b5fe..b62eaeb 100644
--- a/arch/arm/include/asm/cputype.h
+++ b/arch/arm/include/asm/cputype.h
@@ -94,6 +94,9 @@
#define ARM_CPU_XSCALE_ARCH_V2 0x4000
#define ARM_CPU_XSCALE_ARCH_V3 0x6000
+/* Qualcomm implemented cores */
+#define ARM_CPU_PART_SCORPION 0x510002d0
+
extern unsigned int processor_id;
#ifdef CONFIG_CPU_CP15
diff --git a/arch/arm/include/asm/ftrace.h b/arch/arm/include/asm/ftrace.h
index bfe2a2f..22b7311 100644
--- a/arch/arm/include/asm/ftrace.h
+++ b/arch/arm/include/asm/ftrace.h
@@ -54,6 +54,24 @@ static inline void *return_address(unsigned int level)
#define ftrace_return_address(n) return_address(n)
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ if (!strcmp(sym, "sys_mmap2"))
+ sym = "sys_mmap_pgoff";
+ else if (!strcmp(sym, "sys_statfs64_wrapper"))
+ sym = "sys_statfs64";
+ else if (!strcmp(sym, "sys_fstatfs64_wrapper"))
+ sym = "sys_fstatfs64";
+ else if (!strcmp(sym, "sys_arm_fadvise64_64"))
+ sym = "sys_fadvise64_64";
+
+ /* Ignore case since sym may start with "SyS" instead of "sys" */
+ return !strcasecmp(sym, name);
+}
+
#endif /* ifndef __ASSEMBLY__ */
#endif /* _ASM_ARM_FTRACE */
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index a2e75b8..6dae195 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -80,6 +80,11 @@ static inline bool is_kernel_in_hyp_mode(void)
return false;
}
+static inline bool has_vhe(void)
+{
+ return false;
+}
+
/* The section containing the hypervisor idmap text */
extern char __hyp_idmap_text_start[];
extern char __hyp_idmap_text_end[];
diff --git a/arch/arm/include/asm/types.h b/arch/arm/include/uapi/asm/types.h
index a53cdb8..9435a42 100644
--- a/arch/arm/include/asm/types.h
+++ b/arch/arm/include/uapi/asm/types.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_TYPES_H
-#define _ASM_TYPES_H
+#ifndef _UAPI_ASM_TYPES_H
+#define _UAPI_ASM_TYPES_H
#include <asm-generic/int-ll64.h>
@@ -37,4 +37,4 @@
#define __UINTPTR_TYPE__ unsigned long
#endif
-#endif /* _ASM_TYPES_H */
+#endif /* _UAPI_ASM_TYPES_H */
diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
index 188180b..be3b3fb 100644
--- a/arch/arm/kernel/hw_breakpoint.c
+++ b/arch/arm/kernel/hw_breakpoint.c
@@ -1063,6 +1063,22 @@ static int __init arch_hw_breakpoint_init(void)
return 0;
}
+ /*
+ * Scorpion CPUs (at least those in APQ8060) seem to set DBGPRSR.SPD
+ * whenever a WFI is issued, even if the core is not powered down, in
+ * violation of the architecture. When DBGPRSR.SPD is set, accesses to
+ * breakpoint and watchpoint registers are treated as undefined, so
+ * this results in boot time and runtime failures when these are
+ * accessed and we unexpectedly take a trap.
+ *
+ * It's not clear if/how this can be worked around, so we blacklist
+ * Scorpion CPUs to avoid these issues.
+ */
+ if (read_cpuid_part() == ARM_CPU_PART_SCORPION) {
+ pr_info("Scorpion CPU detected. Hardware breakpoints and watchpoints disabled\n");
+ return 0;
+ }
+
has_ossr = core_has_os_save_restore();
/* Determine how many BRPs/WRPs are available. */
diff --git a/arch/arm/kernel/smp_tlb.c b/arch/arm/kernel/smp_tlb.c
index 22313cb..9af0701 100644
--- a/arch/arm/kernel/smp_tlb.c
+++ b/arch/arm/kernel/smp_tlb.c
@@ -9,6 +9,7 @@
*/
#include <linux/preempt.h>
#include <linux/smp.h>
+#include <linux/uaccess.h>
#include <asm/smp_plat.h>
#include <asm/tlbflush.h>
@@ -40,8 +41,11 @@ static inline void ipi_flush_tlb_mm(void *arg)
static inline void ipi_flush_tlb_page(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_page(ta->ta_vma, ta->ta_start);
+
+ uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_page(void *arg)
@@ -54,8 +58,11 @@ static inline void ipi_flush_tlb_kernel_page(void *arg)
static inline void ipi_flush_tlb_range(void *arg)
{
struct tlb_args *ta = (struct tlb_args *)arg;
+ unsigned int __ua_flags = uaccess_save_and_enable();
local_flush_tlb_range(ta->ta_vma, ta->ta_start, ta->ta_end);
+
+ uaccess_restore(__ua_flags);
}
static inline void ipi_flush_tlb_kernel_range(void *arg)
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index 1167678..9d74464 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -1099,6 +1099,9 @@ static void cpu_init_hyp_mode(void *dummy)
__cpu_init_hyp_mode(pgd_ptr, hyp_stack_ptr, vector_ptr);
__cpu_init_stage2();
+ if (is_kernel_in_hyp_mode())
+ kvm_timer_init_vhe();
+
kvm_arm_init_debug();
}
diff --git a/arch/arm/mach-omap1/dma.c b/arch/arm/mach-omap1/dma.c
index f6ba589..c821c1d 100644
--- a/arch/arm/mach-omap1/dma.c
+++ b/arch/arm/mach-omap1/dma.c
@@ -32,7 +32,6 @@
#include "soc.h"
#define OMAP1_DMA_BASE (0xfffed800)
-#define OMAP1_LOGICAL_DMA_CH_COUNT 17
static u32 enable_1510_mode;
@@ -348,8 +347,6 @@ static int __init omap1_system_dma_init(void)
goto exit_iounmap;
}
- d->lch_count = OMAP1_LOGICAL_DMA_CH_COUNT;
-
/* Valid attributes for omap1 plus processors */
if (cpu_is_omap15xx())
d->dev_caps = ENABLE_1510_MODE;
@@ -366,13 +363,14 @@ static int __init omap1_system_dma_init(void)
d->dev_caps |= CLEAR_CSR_ON_READ;
d->dev_caps |= IS_WORD_16;
- if (cpu_is_omap15xx())
- d->chan_count = 9;
- else if (cpu_is_omap16xx() || cpu_is_omap7xx()) {
- if (!(d->dev_caps & ENABLE_1510_MODE))
- d->chan_count = 16;
+ /* available logical channels */
+ if (cpu_is_omap15xx()) {
+ d->lch_count = 9;
+ } else {
+ if (d->dev_caps & ENABLE_1510_MODE)
+ d->lch_count = 9;
else
- d->chan_count = 9;
+ d->lch_count = 16;
}
p = dma_plat_info;
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index 477910a..70c0047 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -161,7 +161,7 @@ static struct ti_st_plat_data wilink7_pdata = {
.nshutdown_gpio = 162,
.dev_name = "/dev/ttyO1",
.flow_cntrl = 1,
- .baud_rate = 300000,
+ .baud_rate = 3000000,
};
static struct platform_device wl128x_device = {
diff --git a/arch/arm/mach-ux500/pm.c b/arch/arm/mach-ux500/pm.c
index 8538910..a970e7f 100644
--- a/arch/arm/mach-ux500/pm.c
+++ b/arch/arm/mach-ux500/pm.c
@@ -134,8 +134,8 @@ bool prcmu_pending_irq(void)
*/
bool prcmu_is_cpu_in_wfi(int cpu)
{
- return readl(PRCM_ARM_WFI_STANDBY) & cpu ? PRCM_ARM_WFI_STANDBY_WFI1 :
- PRCM_ARM_WFI_STANDBY_WFI0;
+ return readl(PRCM_ARM_WFI_STANDBY) &
+ (cpu ? PRCM_ARM_WFI_STANDBY_WFI1 : PRCM_ARM_WFI_STANDBY_WFI0);
}
/*
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
index 238fbea..5d28e1c 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb-odroidc2.dts
@@ -137,6 +137,10 @@
};
};
+&scpi_clocks {
+ status = "disabled";
+};
+
&uart_AO {
status = "okay";
pinctrl-0 = <&uart_ao_a_pins>;
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
index 596240c..b353073 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
+++ b/arch/arm64/boot/dts/amlogic/meson-gxbb.dtsi
@@ -55,7 +55,7 @@
mboxes = <&mailbox 1 &mailbox 2>;
shmem = <&cpu_scp_lpri &cpu_scp_hpri>;
- clocks {
+ scpi_clocks: clocks {
compatible = "arm,scpi-clocks";
scpi_dvfs: scpi_clocks@0 {
diff --git a/arch/arm64/boot/dts/exynos/exynos5433.dtsi b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
index 64226d5..135890c 100644
--- a/arch/arm64/boot/dts/exynos/exynos5433.dtsi
+++ b/arch/arm64/boot/dts/exynos/exynos5433.dtsi
@@ -1367,7 +1367,7 @@
};
amba {
- compatible = "arm,amba-bus";
+ compatible = "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
index 3580896..ef1b9e5 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-ep108.dts
@@ -27,7 +27,7 @@
stdout-path = "serial0:115200n8";
};
- memory {
+ memory@0 {
device_type = "memory";
reg = <0x0 0x0 0x0 0x40000000>;
};
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
index 68a90833..54dc283 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
+++ b/arch/arm64/boot/dts/xilinx/zynqmp.dtsi
@@ -72,7 +72,7 @@
<1 10 0xf08>;
};
- amba_apu {
+ amba_apu: amba_apu@0 {
compatible = "simple-bus";
#address-cells = <2>;
#size-cells = <1>;
@@ -175,7 +175,7 @@
};
i2c0: i2c@ff020000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 17 4>;
@@ -185,7 +185,7 @@
};
i2c1: i2c@ff030000 {
- compatible = "cdns,i2c-r1p10";
+ compatible = "cdns,i2c-r1p14", "cdns,i2c-r1p10";
status = "disabled";
interrupt-parent = <&gic>;
interrupts = <0 18 4>;
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h
index 446f6c4..3a43011 100644
--- a/arch/arm64/include/asm/assembler.h
+++ b/arch/arm64/include/asm/assembler.h
@@ -164,22 +164,25 @@ lr .req x30 // link register
/*
* Pseudo-ops for PC-relative adr/ldr/str <reg>, <symbol> where
- * <symbol> is within the range +/- 4 GB of the PC.
+ * <symbol> is within the range +/- 4 GB of the PC when running
+ * in core kernel context. In module context, a movz/movk sequence
+ * is used, since modules may be loaded far away from the kernel
+ * when KASLR is in effect.
*/
/*
* @dst: destination register (64 bit wide)
* @sym: name of the symbol
- * @tmp: optional scratch register to be used if <dst> == sp, which
- * is not allowed in an adrp instruction
*/
- .macro adr_l, dst, sym, tmp=
- .ifb \tmp
+ .macro adr_l, dst, sym
+#ifndef MODULE
adrp \dst, \sym
add \dst, \dst, :lo12:\sym
- .else
- adrp \tmp, \sym
- add \dst, \tmp, :lo12:\sym
- .endif
+#else
+ movz \dst, #:abs_g3:\sym
+ movk \dst, #:abs_g2_nc:\sym
+ movk \dst, #:abs_g1_nc:\sym
+ movk \dst, #:abs_g0_nc:\sym
+#endif
.endm
/*
@@ -190,6 +193,7 @@ lr .req x30 // link register
* the address
*/
.macro ldr_l, dst, sym, tmp=
+#ifndef MODULE
.ifb \tmp
adrp \dst, \sym
ldr \dst, [\dst, :lo12:\sym]
@@ -197,6 +201,15 @@ lr .req x30 // link register
adrp \tmp, \sym
ldr \dst, [\tmp, :lo12:\sym]
.endif
+#else
+ .ifb \tmp
+ adr_l \dst, \sym
+ ldr \dst, [\dst]
+ .else
+ adr_l \tmp, \sym
+ ldr \dst, [\tmp]
+ .endif
+#endif
.endm
/*
@@ -206,8 +219,13 @@ lr .req x30 // link register
* while <src> needs to be preserved.
*/
.macro str_l, src, sym, tmp
+#ifndef MODULE
adrp \tmp, \sym
str \src, [\tmp, :lo12:\sym]
+#else
+ adr_l \tmp, \sym
+ str \src, [\tmp]
+#endif
.endm
/*
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index bfe6328..90c39a6 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -222,7 +222,7 @@ static inline void *phys_to_virt(phys_addr_t x)
#define _virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#else
#define __virt_to_pgoff(kaddr) (((u64)(kaddr) & ~PAGE_OFFSET) / PAGE_SIZE * sizeof(struct page))
-#define __page_to_voff(page) (((u64)(page) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
+#define __page_to_voff(kaddr) (((u64)(kaddr) & ~VMEMMAP_START) * PAGE_SIZE / sizeof(struct page))
#define page_to_virt(page) ((void *)((__page_to_voff(page)) | PAGE_OFFSET))
#define virt_to_page(vaddr) ((struct page *)((__virt_to_pgoff(vaddr)) | VMEMMAP_START))
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index fea1073..439f6b5 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -47,6 +47,7 @@
#include <asm/ptrace.h>
#include <asm/sections.h>
#include <asm/sysreg.h>
+#include <asm/cpufeature.h>
/*
* __boot_cpu_mode records what mode CPUs were booted in.
@@ -80,6 +81,14 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
+static inline bool has_vhe(void)
+{
+ if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_ARM64_VHE
extern void verify_cpu_run_el(void);
#else
diff --git a/arch/arm64/include/uapi/asm/ptrace.h b/arch/arm64/include/uapi/asm/ptrace.h
index b5c3933..d1ff83d 100644
--- a/arch/arm64/include/uapi/asm/ptrace.h
+++ b/arch/arm64/include/uapi/asm/ptrace.h
@@ -77,6 +77,7 @@ struct user_fpsimd_state {
__uint128_t vregs[32];
__u32 fpsr;
__u32 fpcr;
+ __u32 __reserved[2];
};
struct user_hwdebug_state {
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index 923841f..43512d4 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -683,7 +683,7 @@ el0_inv:
mov x0, sp
mov x1, #BAD_SYNC
mov x2, x25
- bl bad_mode
+ bl bad_el0_sync
b ret_to_user
ENDPROC(el0_sync)
diff --git a/arch/arm64/kernel/ptrace.c b/arch/arm64/kernel/ptrace.c
index fc35e06..a22161c 100644
--- a/arch/arm64/kernel/ptrace.c
+++ b/arch/arm64/kernel/ptrace.c
@@ -551,6 +551,8 @@ static int hw_break_set(struct task_struct *target,
/* (address, ctrl) registers */
limit = regset->n * regset->size;
while (count && offset < limit) {
+ if (count < PTRACE_HBP_ADDR_SZ)
+ return -EINVAL;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
offset, offset + PTRACE_HBP_ADDR_SZ);
if (ret)
@@ -560,6 +562,8 @@ static int hw_break_set(struct task_struct *target,
return ret;
offset += PTRACE_HBP_ADDR_SZ;
+ if (!count)
+ break;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
offset, offset + PTRACE_HBP_CTRL_SZ);
if (ret)
@@ -596,7 +600,7 @@ static int gpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_pt_regs newregs;
+ struct user_pt_regs newregs = task_pt_regs(target)->user_regs;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
if (ret)
@@ -626,7 +630,8 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct user_fpsimd_state newstate;
+ struct user_fpsimd_state newstate =
+ target->thread.fpsimd_state.user_fpsimd;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newstate, 0, -1);
if (ret)
@@ -650,7 +655,7 @@ static int tls_set(struct task_struct *target, const struct user_regset *regset,
const void *kbuf, const void __user *ubuf)
{
int ret;
- unsigned long tls;
+ unsigned long tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
@@ -676,7 +681,8 @@ static int system_call_set(struct task_struct *target,
unsigned int pos, unsigned int count,
const void *kbuf, const void __user *ubuf)
{
- int syscallno, ret;
+ int syscallno = task_pt_regs(target)->syscallno;
+ int ret;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &syscallno, 0, -1);
if (ret)
@@ -948,7 +954,7 @@ static int compat_tls_set(struct task_struct *target,
const void __user *ubuf)
{
int ret;
- compat_ulong_t tls;
+ compat_ulong_t tls = target->thread.tp_value;
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &tls, 0, -1);
if (ret)
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index 5b830be..659b2e6 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -604,17 +604,34 @@ const char *esr_get_class_string(u32 esr)
}
/*
- * bad_mode handles the impossible case in the exception vector.
+ * bad_mode handles the impossible case in the exception vector. This is always
+ * fatal.
*/
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
{
- siginfo_t info;
- void __user *pc = (void __user *)instruction_pointer(regs);
console_verbose();
pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
handler[reason], smp_processor_id(), esr,
esr_get_class_string(esr));
+
+ die("Oops - bad mode", regs, 0);
+ local_irq_disable();
+ panic("bad mode");
+}
+
+/*
+ * bad_el0_sync handles unexpected, but potentially recoverable synchronous
+ * exceptions taken from EL0. Unlike bad_mode, this returns.
+ */
+asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
+{
+ siginfo_t info;
+ void __user *pc = (void __user *)instruction_pointer(regs);
+ console_verbose();
+
+ pr_crit("Bad EL0 synchronous exception detected on CPU%d, code 0x%08x -- %s\n",
+ smp_processor_id(), esr, esr_get_class_string(esr));
__show_regs(regs);
info.si_signo = SIGILL;
@@ -622,7 +639,10 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
info.si_code = ILL_ILLOPC;
info.si_addr = pc;
- arm64_notify_die("Oops - bad mode", regs, &info, 0);
+ current->thread.fault_address = 0;
+ current->thread.fault_code = 0;
+
+ force_sig_info(info.si_signo, &info, current);
}
void __pte_error(const char *file, int line, unsigned long val)
diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c
index 964b754..e25584d 100644
--- a/arch/arm64/mm/hugetlbpage.c
+++ b/arch/arm64/mm/hugetlbpage.c
@@ -239,7 +239,7 @@ int huge_ptep_set_access_flags(struct vm_area_struct *vma,
ncontig = find_num_contig(vma->vm_mm, addr, cpte,
*cpte, &pgsize);
for (i = 0; i < ncontig; ++i, ++cpte, addr += pgsize) {
- changed = ptep_set_access_flags(vma, addr, cpte,
+ changed |= ptep_set_access_flags(vma, addr, cpte,
pfn_pte(pfn,
hugeprot),
dirty);
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 716d122..380ebe7 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -404,6 +404,8 @@ void __init mem_init(void)
if (swiotlb_force == SWIOTLB_FORCE ||
max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
+ else
+ swiotlb_force = SWIOTLB_NO_FORCE;
set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
index 1c2a5e2..e93c949 100644
--- a/arch/frv/include/asm/atomic.h
+++ b/arch/frv/include/asm/atomic.h
@@ -139,7 +139,7 @@ static inline void atomic64_dec(atomic64_t *v)
#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
#define atomic64_inc_and_test(v) (atomic64_inc_return((v)) == 0)
-
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define atomic_cmpxchg(v, old, new) (cmpxchg(&(v)->counter, old, new))
#define atomic_xchg(v, new) (xchg(&(v)->counter, new))
@@ -161,6 +161,39 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
return c;
}
+static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
+{
+ long long c, old;
+
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == u))
+ break;
+ old = atomic64_cmpxchg(v, c, c + i);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != u;
+}
+
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long c, old, dec;
+
+ c = atomic64_read(v);
+ for (;;) {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ old = atomic64_cmpxchg((v), c, dec);
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return dec;
+}
+
#define ATOMIC_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
diff --git a/arch/m68k/emu/nfeth.c b/arch/m68k/emu/nfeth.c
index fc4be02..e45ce42 100644
--- a/arch/m68k/emu/nfeth.c
+++ b/arch/m68k/emu/nfeth.c
@@ -124,7 +124,6 @@ static inline void recv_packet(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pktlen;
diff --git a/arch/mn10300/include/asm/switch_to.h b/arch/mn10300/include/asm/switch_to.h
index 393d311..67e333a 100644
--- a/arch/mn10300/include/asm/switch_to.h
+++ b/arch/mn10300/include/asm/switch_to.h
@@ -16,7 +16,7 @@
struct task_struct;
struct thread_struct;
-#if !defined(CONFIG_LAZY_SAVE_FPU)
+#if defined(CONFIG_FPU) && !defined(CONFIG_LAZY_SAVE_FPU)
struct fpu_state_struct;
extern asmlinkage void fpu_save(struct fpu_state_struct *);
#define switch_fpu(prev, next) \
diff --git a/arch/powerpc/include/asm/book3s/64/hash-4k.h b/arch/powerpc/include/asm/book3s/64/hash-4k.h
index 1c64bc6..0c4e470 100644
--- a/arch/powerpc/include/asm/book3s/64/hash-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/hash-4k.h
@@ -36,12 +36,13 @@
#ifdef CONFIG_HUGETLB_PAGE
static inline int hash__hugepd_ok(hugepd_t hpd)
{
+ unsigned long hpdval = hpd_val(hpd);
/*
* if it is not a pte and have hugepd shift mask
* set, then it is a hugepd directory pointer
*/
- if (!(hpd.pd & _PAGE_PTE) &&
- ((hpd.pd & HUGEPD_SHIFT_MASK) != 0))
+ if (!(hpdval & _PAGE_PTE) &&
+ ((hpdval & HUGEPD_SHIFT_MASK) != 0))
return true;
return false;
}
diff --git a/arch/powerpc/include/asm/book3s/64/hash.h b/arch/powerpc/include/asm/book3s/64/hash.h
index f61cad3..4c935f75 100644
--- a/arch/powerpc/include/asm/book3s/64/hash.h
+++ b/arch/powerpc/include/asm/book3s/64/hash.h
@@ -201,6 +201,10 @@ extern int __meminit hash__vmemmap_create_mapping(unsigned long start,
unsigned long phys);
extern void hash__vmemmap_remove_mapping(unsigned long start,
unsigned long page_size);
+
+int hash__create_section_mapping(unsigned long start, unsigned long end);
+int hash__remove_section_mapping(unsigned long start, unsigned long end);
+
#endif /* !__ASSEMBLY__ */
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_BOOK3S_64_HASH_H */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index ede2151..7f4025a 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -21,12 +21,12 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
* We have only four bits to encode, MMU page size
*/
BUILD_BUG_ON((MMU_PAGE_COUNT - 1) > 0xf);
- return __va(hpd.pd & HUGEPD_ADDR_MASK);
+ return __va(hpd_val(hpd) & HUGEPD_ADDR_MASK);
}
static inline unsigned int hugepd_mmu_psize(hugepd_t hpd)
{
- return (hpd.pd & HUGEPD_SHIFT_MASK) >> 2;
+ return (hpd_val(hpd) & HUGEPD_SHIFT_MASK) >> 2;
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
@@ -52,18 +52,20 @@ static inline pte_t *hugepd_page(hugepd_t hpd)
{
BUG_ON(!hugepd_ok(hpd));
#ifdef CONFIG_PPC_8xx
- return (pte_t *)__va(hpd.pd & ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
+ return (pte_t *)__va(hpd_val(hpd) &
+ ~(_PMD_PAGE_MASK | _PMD_PRESENT_MASK));
#else
- return (pte_t *)((hpd.pd & ~HUGEPD_SHIFT_MASK) | PD_HUGE);
+ return (pte_t *)((hpd_val(hpd) &
+ ~HUGEPD_SHIFT_MASK) | PD_HUGE);
#endif
}
static inline unsigned int hugepd_shift(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & _PMD_PAGE_MASK) >> 1) + 17;
+ return ((hpd_val(hpd) & _PMD_PAGE_MASK) >> 1) + 17;
#else
- return hpd.pd & HUGEPD_SHIFT_MASK;
+ return hpd_val(hpd) & HUGEPD_SHIFT_MASK;
#endif
}
diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h
index 1728497..0cd8a38 100644
--- a/arch/powerpc/include/asm/nohash/pgtable.h
+++ b/arch/powerpc/include/asm/nohash/pgtable.h
@@ -227,9 +227,10 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
static inline int hugepd_ok(hugepd_t hpd)
{
#ifdef CONFIG_PPC_8xx
- return ((hpd.pd & 0x4) != 0);
+ return ((hpd_val(hpd) & 0x4) != 0);
#else
- return (hpd.pd > 0);
+ /* We clear the top bit to indicate hugepd */
+ return ((hpd_val(hpd) & PD_HUGE) == 0);
#endif
}
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 56398e7..47120bf 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -294,15 +294,12 @@ extern long long virt_phys_offset;
#include <asm/pgtable-types.h>
#endif
-typedef struct { signed long pd; } hugepd_t;
#ifndef CONFIG_HUGETLB_PAGE
#define is_hugepd(pdep) (0)
#define pgd_huge(pgd) (0)
#endif /* CONFIG_HUGETLB_PAGE */
-#define __hugepd(x) ((hugepd_t) { (x) })
-
struct page;
extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
extern void copy_user_page(void *to, void *from, unsigned long vaddr,
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index e157489..ae0a230 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -65,6 +65,7 @@ struct power_pmu {
#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
+#define PPMU_NO_SIAR 0x00000100 /* Do not use SIAR */
/*
* Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/pgtable-be-types.h b/arch/powerpc/include/asm/pgtable-be-types.h
index 49c0a5a..9c0f5db 100644
--- a/arch/powerpc/include/asm/pgtable-be-types.h
+++ b/arch/powerpc/include/asm/pgtable-be-types.h
@@ -104,4 +104,12 @@ static inline bool pmd_xchg(pmd_t *pmdp, pmd_t old, pmd_t new)
return pmd_raw(old) == prev;
}
+typedef struct { __be64 pdbe; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { cpu_to_be64(x) })
+
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return be64_to_cpu(x.pdbe);
+}
+
#endif /* _ASM_POWERPC_PGTABLE_BE_TYPES_H */
diff --git a/arch/powerpc/include/asm/pgtable-types.h b/arch/powerpc/include/asm/pgtable-types.h
index e7f4f3e..8bd3b13 100644
--- a/arch/powerpc/include/asm/pgtable-types.h
+++ b/arch/powerpc/include/asm/pgtable-types.h
@@ -66,4 +66,11 @@ static inline bool pte_xchg(pte_t *ptep, pte_t old, pte_t new)
}
#endif
+typedef struct { unsigned long pd; } hugepd_t;
+#define __hugepd(x) ((hugepd_t) { (x) })
+static inline unsigned long hpd_val(hugepd_t x)
+{
+ return x.pd;
+}
+
#endif /* _ASM_POWERPC_PGTABLE_TYPES_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index c56ea8c..c4ced1d 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -157,7 +157,7 @@
#define PPC_INST_MCRXR 0x7c000400
#define PPC_INST_MCRXR_MASK 0xfc0007fe
#define PPC_INST_MFSPR_PVR 0x7c1f42a6
-#define PPC_INST_MFSPR_PVR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_PVR_MASK 0xfc1ffffe
#define PPC_INST_MFTMR 0x7c0002dc
#define PPC_INST_MSGSND 0x7c00019c
#define PPC_INST_MSGCLR 0x7c0001dc
@@ -174,13 +174,13 @@
#define PPC_INST_RFDI 0x4c00004e
#define PPC_INST_RFMCI 0x4c00004c
#define PPC_INST_MFSPR_DSCR 0x7c1102a6
-#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR 0x7c1103a6
-#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_MASK 0xfc1ffffe
#define PPC_INST_MFSPR_DSCR_USER 0x7c0302a6
-#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MFSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MTSPR_DSCR_USER 0x7c0303a6
-#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1fffff
+#define PPC_INST_MTSPR_DSCR_USER_MASK 0xfc1ffffe
#define PPC_INST_MFVSRD 0x7c000066
#define PPC_INST_MTVSRD 0x7c000166
#define PPC_INST_SLBFEE 0x7c0007a7
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 8180bfd..9de7f79 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -298,9 +298,17 @@ void eeh_slot_error_detail(struct eeh_pe *pe, int severity)
*
* For pHyp, we have to enable IO for log retrieval. Otherwise,
* 0xFF's is always returned from PCI config space.
+ *
+ * When the @severity is EEH_LOG_PERM, the PE is going to be
+ * removed. Prior to that, the drivers for devices included in
+ * the PE will be closed. The drivers rely on working IO path
+ * to bring the devices to quiet state. Otherwise, PCI traffic
+ * from those devices after they are removed is like to cause
+ * another unexpected EEH error.
*/
if (!(pe->type & EEH_PE_PHB)) {
- if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG))
+ if (eeh_has_flag(EEH_ENABLE_IO_FOR_LOG) ||
+ severity == EEH_LOG_PERM)
eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
/*
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
index e4744ff..925a4ef 100644
--- a/arch/powerpc/kernel/ptrace.c
+++ b/arch/powerpc/kernel/ptrace.c
@@ -463,6 +463,10 @@ static int fpr_set(struct task_struct *target, const struct user_regset *regset,
flush_fp_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.TS_FPR(i);
+ buf[32] = target->thread.fp_state.fpscr;
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -672,6 +676,9 @@ static int vsr_set(struct task_struct *target, const struct user_regset *regset,
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
@@ -1019,6 +1026,10 @@ static int tm_cfpr_set(struct task_struct *target,
flush_fp_to_thread(target);
flush_altivec_to_thread(target);
+ for (i = 0; i < 32; i++)
+ buf[i] = target->thread.TS_CKFPR(i);
+ buf[32] = target->thread.ckfp_state.fpscr;
+
/* copy to local buffer then write that out */
i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
if (i)
@@ -1283,6 +1294,9 @@ static int tm_cvsx_set(struct task_struct *target,
flush_altivec_to_thread(target);
flush_vsx_to_thread(target);
+ for (i = 0; i < 32 ; i++)
+ buf[i] = target->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
+
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
buf, 0, 32 * sizeof(double));
if (!ret)
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 8033493..67e19a0 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -747,7 +747,7 @@ static unsigned long __init htab_get_table_size(void)
}
#ifdef CONFIG_MEMORY_HOTPLUG
-int create_section_mapping(unsigned long start, unsigned long end)
+int hash__create_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_bolt_mapping(start, end, __pa(start),
pgprot_val(PAGE_KERNEL), mmu_linear_psize,
@@ -761,7 +761,7 @@ int create_section_mapping(unsigned long start, unsigned long end)
return rc;
}
-int remove_section_mapping(unsigned long start, unsigned long end)
+int hash__remove_section_mapping(unsigned long start, unsigned long end)
{
int rc = htab_remove_mapping(start, end, mmu_linear_psize,
mmu_kernel_ssize);
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index d5026f3..37b5f91 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -125,11 +125,14 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
int hugepd_ok(hugepd_t hpd)
{
bool is_hugepd;
+ unsigned long hpdval;
+
+ hpdval = hpd_val(hpd);
/*
* We should not find this format in page directory, warn otherwise.
*/
- is_hugepd = (((hpd.pd & 0x3) == 0x0) && ((hpd.pd & HUGEPD_SHIFT_MASK) != 0));
+ is_hugepd = (((hpdval & 0x3) == 0x0) && ((hpdval & HUGEPD_SHIFT_MASK) != 0));
WARN(is_hugepd, "Found wrong page directory format\n");
return 0;
}
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 289df38..8c3389c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,7 +53,7 @@ static u64 gpage_freearray[MAX_NUMBER_GPAGES];
static unsigned nr_gpages;
#endif
-#define hugepd_none(hpd) ((hpd).pd == 0)
+#define hugepd_none(hpd) (hpd_val(hpd) == 0)
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
{
@@ -103,24 +103,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
for (i = 0; i < num_hugepd; i++, hpdp++) {
if (unlikely(!hugepd_none(*hpdp)))
break;
- else
+ else {
#ifdef CONFIG_PPC_BOOK3S_64
- hpdp->pd = __pa(new) |
- (shift_to_mmu_psize(pshift) << 2);
+ *hpdp = __hugepd(__pa(new) |
+ (shift_to_mmu_psize(pshift) << 2));
#elif defined(CONFIG_PPC_8xx)
- hpdp->pd = __pa(new) |
- (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
- _PMD_PAGE_512K) |
- _PMD_PRESENT;
+ *hpdp = __hugepd(__pa(new) |
+ (pshift == PAGE_SHIFT_8M ? _PMD_PAGE_8M :
+ _PMD_PAGE_512K) | _PMD_PRESENT);
#else
/* We use the old format for PPC_FSL_BOOK3E */
- hpdp->pd = ((unsigned long)new & ~PD_HUGE) | pshift;
+ *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift);
#endif
+ }
}
/* If we bailed from the for loop early, an error occurred, clean up */
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
kmem_cache_free(cachep, new);
}
spin_unlock(&mm->page_table_lock);
@@ -454,7 +454,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
return;
for (i = 0; i < num_hugepd; i++, hpdp++)
- hpdp->pd = 0;
+ *hpdp = __hugepd(0);
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
@@ -810,12 +810,8 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift > shift) {
+ if (pdshift > shift)
pgtable_cache_add(pdshift - shift, NULL);
- if (!PGT_CACHE(pdshift - shift))
- panic("hugetlbpage_init(): could not create "
- "pgtable cache for %d bit pagesize\n", shift);
- }
#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_8xx)
else if (!hugepte_cache) {
/*
@@ -852,9 +848,6 @@ static int __init hugetlbpage_init(void)
else if (mmu_psize_defs[MMU_PAGE_2M].shift)
HPAGE_SHIFT = mmu_psize_defs[MMU_PAGE_2M].shift;
#endif
- else
- panic("%s: Unable to set default huge page size\n", __func__);
-
return 0;
}
diff --git a/arch/powerpc/mm/init-common.c b/arch/powerpc/mm/init-common.c
index a175cd8..f2108c4 100644
--- a/arch/powerpc/mm/init-common.c
+++ b/arch/powerpc/mm/init-common.c
@@ -78,8 +78,12 @@ void pgtable_cache_add(unsigned shift, void (*ctor)(void *))
align = max_t(unsigned long, align, minalign);
name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift);
new = kmem_cache_create(name, table_size, align, 0, ctor);
+ if (!new)
+ panic("Could not allocate pgtable cache for order %d", shift);
+
kfree(name);
pgtable_cache[shift - 1] = new;
+
pr_debug("Allocated pgtable cache for order %d\n", shift);
}
@@ -88,7 +92,7 @@ void pgtable_cache_init(void)
{
pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor);
- if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
+ if (PMD_CACHE_INDEX && !PGT_CACHE(PMD_CACHE_INDEX))
pgtable_cache_add(PMD_CACHE_INDEX, pmd_ctor);
/*
* In all current configs, when the PUD index exists it's the
@@ -97,11 +101,4 @@ void pgtable_cache_init(void)
*/
if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
pgtable_cache_add(PUD_INDEX_SIZE, pud_ctor);
-
- if (!PGT_CACHE(PGD_INDEX_SIZE))
- panic("Couldn't allocate pgd cache");
- if (PMD_INDEX_SIZE && !PGT_CACHE(PMD_INDEX_SIZE))
- panic("Couldn't allocate pmd pgtable caches");
- if (PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE))
- panic("Couldn't allocate pud pgtable caches");
}
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index ebf9782..653ff6c 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -126,3 +126,21 @@ void mmu_cleanup_all(void)
else if (mmu_hash_ops.hpte_clear_all)
mmu_hash_ops.hpte_clear_all();
}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int create_section_mapping(unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return -ENODEV;
+
+ return hash__create_section_mapping(start, end);
+}
+
+int remove_section_mapping(unsigned long start, unsigned long end)
+{
+ if (radix_enabled())
+ return -ENODEV;
+
+ return hash__remove_section_mapping(start, end);
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index fd3e403..270eb9b 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -295,6 +295,8 @@ static inline void perf_read_regs(struct pt_regs *regs)
*/
if (TRAP(regs) != 0xf00)
use_siar = 0;
+ else if ((ppmu->flags & PPMU_NO_SIAR))
+ use_siar = 0;
else if (marked)
use_siar = 1;
else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
diff --git a/arch/powerpc/perf/power9-events-list.h b/arch/powerpc/perf/power9-events-list.h
index 6447dc1..929b56d 100644
--- a/arch/powerpc/perf/power9-events-list.h
+++ b/arch/powerpc/perf/power9-events-list.h
@@ -16,7 +16,7 @@ EVENT(PM_CYC, 0x0001e)
EVENT(PM_ICT_NOSLOT_CYC, 0x100f8)
EVENT(PM_CMPLU_STALL, 0x1e054)
EVENT(PM_INST_CMPL, 0x00002)
-EVENT(PM_BRU_CMPL, 0x40060)
+EVENT(PM_BRU_CMPL, 0x10012)
EVENT(PM_BR_MPRED_CMPL, 0x400f6)
/* All L1 D cache load references counted at finish, gated by reject */
diff --git a/arch/powerpc/perf/power9-pmu.c b/arch/powerpc/perf/power9-pmu.c
index 346010e..7332634 100644
--- a/arch/powerpc/perf/power9-pmu.c
+++ b/arch/powerpc/perf/power9-pmu.c
@@ -384,7 +384,7 @@ static struct power_pmu power9_isa207_pmu = {
.bhrb_filter_map = power9_bhrb_filter_map,
.get_constraint = isa207_get_constraint,
.disable_pmc = isa207_disable_pmc,
- .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
+ .flags = PPMU_NO_SIAR | PPMU_ARCH_207S,
.n_generic = ARRAY_SIZE(power9_generic_events),
.generic_events = power9_generic_events,
.cache_events = &power9_cache_events,
diff --git a/arch/powerpc/sysdev/xics/icp-opal.c b/arch/powerpc/sysdev/xics/icp-opal.c
index d38e86f..60c5765 100644
--- a/arch/powerpc/sysdev/xics/icp-opal.c
+++ b/arch/powerpc/sysdev/xics/icp-opal.c
@@ -20,6 +20,7 @@
#include <asm/xics.h>
#include <asm/io.h>
#include <asm/opal.h>
+#include <asm/kvm_ppc.h>
static void icp_opal_teardown_cpu(void)
{
@@ -39,7 +40,26 @@ static void icp_opal_flush_ipi(void)
* Should we be flagging idle loop instead?
* Or creating some task to be scheduled?
*/
- opal_int_eoi((0x00 << 24) | XICS_IPI);
+ if (opal_int_eoi((0x00 << 24) | XICS_IPI) > 0)
+ force_external_irq_replay();
+}
+
+static unsigned int icp_opal_get_xirr(void)
+{
+ unsigned int kvm_xirr;
+ __be32 hw_xirr;
+ int64_t rc;
+
+ /* Handle an interrupt latched by KVM first */
+ kvm_xirr = kvmppc_get_xics_latch();
+ if (kvm_xirr)
+ return kvm_xirr;
+
+ /* Then ask OPAL */
+ rc = opal_int_get_xirr(&hw_xirr, false);
+ if (rc < 0)
+ return 0;
+ return be32_to_cpu(hw_xirr);
}
static unsigned int icp_opal_get_irq(void)
@@ -47,12 +67,8 @@ static unsigned int icp_opal_get_irq(void)
unsigned int xirr;
unsigned int vec;
unsigned int irq;
- int64_t rc;
- rc = opal_int_get_xirr(&xirr, false);
- if (rc < 0)
- return 0;
- xirr = be32_to_cpu(xirr);
+ xirr = icp_opal_get_xirr();
vec = xirr & 0x00ffffff;
if (vec == XICS_IRQ_SPURIOUS)
return 0;
@@ -67,7 +83,8 @@ static unsigned int icp_opal_get_irq(void)
xics_mask_unknown_vec(vec);
/* We might learn about it later, so EOI it */
- opal_int_eoi(xirr);
+ if (opal_int_eoi(xirr) > 0)
+ force_external_irq_replay();
return 0;
}
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index e659daf..e0097536 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -69,7 +69,7 @@ CONFIG_CMA=y
CONFIG_CMA_DEBUG=y
CONFIG_CMA_DEBUGFS=y
CONFIG_MEM_SOFT_DIRTY=y
-CONFIG_ZPOOL=m
+CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
CONFIG_ZSMALLOC_STAT=y
@@ -141,8 +141,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -159,13 +157,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -219,7 +216,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -258,7 +254,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -436,7 +431,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -480,6 +474,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -592,14 +587,12 @@ CONFIG_LOCK_STAT=y
CONFIG_DEBUG_LOCKDEP=y
CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
-CONFIG_DEBUG_LIST=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
CONFIG_FAILSLAB=y
@@ -618,6 +611,7 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
@@ -630,6 +624,7 @@ CONFIG_TEST_STRING_HELPERS=y
CONFIG_TEST_KSTRTOX=y
CONFIG_DMA_API_DEBUG=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
@@ -640,16 +635,18 @@ CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
CONFIG_IMA=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_RSA=m
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
-# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
CONFIG_CRYPTO_CRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -673,11 +670,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
-CONFIG_CRYPTO_LZO=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 95ceac5..f05d2d6 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -12,6 +12,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_NUMA_BALANCING=y
+# CONFIG_NUMA_BALANCING_DEFAULT_ENABLED is not set
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
@@ -54,8 +55,9 @@ CONFIG_SOLARIS_X86_PARTITION=y
CONFIG_UNIXWARE_DISKLABEL=y
CONFIG_CFQ_GROUP_IOSCHED=y
CONFIG_DEFAULT_DEADLINE=y
+CONFIG_LIVEPATCH=y
CONFIG_TUNE_ZEC12=y
-CONFIG_NR_CPUS=256
+CONFIG_NR_CPUS=512
CONFIG_NUMA=y
CONFIG_HZ_100=y
CONFIG_MEMORY_HOTPLUG=y
@@ -65,6 +67,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +139,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +155,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +214,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +252,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -430,7 +428,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -460,6 +457,7 @@ CONFIG_HW_RANDOM_VIRTIO=m
CONFIG_RAW_DRIVER=m
CONFIG_HANGCHECK_TIMER=m
CONFIG_TN3270_FS=y
+# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
CONFIG_WATCHDOG_NOWAYOUT=y
CONFIG_SOFT_WATCHDOG=m
@@ -473,6 +471,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -495,6 +494,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -551,25 +551,27 @@ CONFIG_FRAME_WARN=1024
CONFIG_UNUSED_SYMBOLS=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_PANIC_ON_OOPS=y
CONFIG_TIMER_STATS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
-CONFIG_NOTIFIER_ERROR_INJECTION=m
-CONFIG_CPU_NOTIFIER_ERROR_INJECT=m
-CONFIG_PM_NOTIFIER_ERROR_INJECT=m
CONFIG_LATENCYTOP=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-# CONFIG_KPROBE_EVENT is not set
+CONFIG_UPROBE_EVENT=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
-CONFIG_RBTREE_TEST=m
-CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -577,18 +579,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -598,6 +607,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -612,10 +622,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -624,9 +637,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index bc7b176..2cf8734 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -65,6 +65,7 @@ CONFIG_TRANSPARENT_HUGEPAGE=y
CONFIG_CLEANCACHE=y
CONFIG_FRONTSWAP=y
CONFIG_CMA=y
+CONFIG_MEM_SOFT_DIRTY=y
CONFIG_ZSWAP=y
CONFIG_ZBUD=m
CONFIG_ZSMALLOC=m
@@ -136,8 +137,6 @@ CONFIG_NF_CONNTRACK_SECMARK=y
CONFIG_NF_CONNTRACK_EVENTS=y
CONFIG_NF_CONNTRACK_TIMEOUT=y
CONFIG_NF_CONNTRACK_TIMESTAMP=y
-CONFIG_NF_CT_PROTO_DCCP=m
-CONFIG_NF_CT_PROTO_UDPLITE=m
CONFIG_NF_CONNTRACK_AMANDA=m
CONFIG_NF_CONNTRACK_FTP=m
CONFIG_NF_CONNTRACK_H323=m
@@ -154,13 +153,12 @@ CONFIG_NF_TABLES=m
CONFIG_NFT_EXTHDR=m
CONFIG_NFT_META=m
CONFIG_NFT_CT=m
-CONFIG_NFT_RBTREE=m
-CONFIG_NFT_HASH=m
CONFIG_NFT_COUNTER=m
CONFIG_NFT_LOG=m
CONFIG_NFT_LIMIT=m
CONFIG_NFT_NAT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_HASH=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_AUDIT=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
@@ -214,7 +212,6 @@ CONFIG_NETFILTER_XT_MATCH_QUOTA=m
CONFIG_NETFILTER_XT_MATCH_RATEEST=m
CONFIG_NETFILTER_XT_MATCH_REALM=m
CONFIG_NETFILTER_XT_MATCH_RECENT=m
-CONFIG_NETFILTER_XT_MATCH_SOCKET=m
CONFIG_NETFILTER_XT_MATCH_STATE=m
CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
CONFIG_NETFILTER_XT_MATCH_STRING=m
@@ -253,7 +250,6 @@ CONFIG_IP_VS_NQ=m
CONFIG_IP_VS_FTP=m
CONFIG_IP_VS_PE_SIP=m
CONFIG_NF_CONNTRACK_IPV4=m
-# CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
CONFIG_NF_TABLES_IPV4=m
CONFIG_NFT_CHAIN_ROUTE_IPV4=m
CONFIG_NF_TABLES_ARP=m
@@ -430,7 +426,6 @@ CONFIG_EQUALIZER=m
CONFIG_IFB=m
CONFIG_MACVLAN=m
CONFIG_MACVTAP=m
-CONFIG_IPVLAN=m
CONFIG_VXLAN=m
CONFIG_TUN=m
CONFIG_VETH=m
@@ -474,6 +469,7 @@ CONFIG_VIRTIO_BALLOON=m
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
CONFIG_JBD2_DEBUG=y
CONFIG_JFS_FS=m
CONFIG_JFS_POSIX_ACL=y
@@ -496,6 +492,7 @@ CONFIG_AUTOFS4_FS=m
CONFIG_FUSE_FS=y
CONFIG_CUSE=m
CONFIG_OVERLAY_FS=m
+CONFIG_OVERLAY_FS_REDIRECT_DIR=y
CONFIG_FSCACHE=m
CONFIG_CACHEFILES=m
CONFIG_ISO9660_FS=y
@@ -563,12 +560,16 @@ CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
CONFIG_UPROBE_EVENT=y
CONFIG_FUNCTION_PROFILER=y
+CONFIG_HIST_TRIGGERS=y
CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_S390_PTDUMP=y
+CONFIG_PERSISTENT_KEYRINGS=y
+CONFIG_BIG_KEYS=y
CONFIG_ENCRYPTED_KEYS=m
CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
@@ -576,18 +577,25 @@ CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SELINUX_BOOTPARAM=y
CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE=0
CONFIG_SECURITY_SELINUX_DISABLE=y
+CONFIG_INTEGRITY_SIGNATURE=y
+CONFIG_INTEGRITY_ASYMMETRIC_KEYS=y
CONFIG_IMA=y
+CONFIG_IMA_WRITE_POLICY=y
CONFIG_IMA_APPRAISE=y
+CONFIG_CRYPTO_DH=m
+CONFIG_CRYPTO_ECDH=m
CONFIG_CRYPTO_USER=m
# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set
+CONFIG_CRYPTO_PCRYPT=m
CONFIG_CRYPTO_CRYPTD=m
+CONFIG_CRYPTO_MCRYPTD=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
-CONFIG_CRYPTO_CTS=m
+CONFIG_CRYPTO_CHACHA20POLY1305=m
CONFIG_CRYPTO_LRW=m
CONFIG_CRYPTO_PCBC=m
-CONFIG_CRYPTO_XTS=m
+CONFIG_CRYPTO_KEYWRAP=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
@@ -597,6 +605,7 @@ CONFIG_CRYPTO_RMD160=m
CONFIG_CRYPTO_RMD256=m
CONFIG_CRYPTO_RMD320=m
CONFIG_CRYPTO_SHA512=m
+CONFIG_CRYPTO_SHA3=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
@@ -611,10 +620,13 @@ CONFIG_CRYPTO_SEED=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
+CONFIG_CRYPTO_842=m
CONFIG_CRYPTO_LZ4=m
CONFIG_CRYPTO_LZ4HC=m
CONFIG_CRYPTO_USER_API_HASH=m
CONFIG_CRYPTO_USER_API_SKCIPHER=m
+CONFIG_CRYPTO_USER_API_RNG=m
+CONFIG_CRYPTO_USER_API_AEAD=m
CONFIG_ZCRYPT=m
CONFIG_CRYPTO_SHA1_S390=m
CONFIG_CRYPTO_SHA256_S390=m
@@ -623,9 +635,6 @@ CONFIG_CRYPTO_DES_S390=m
CONFIG_CRYPTO_AES_S390=m
CONFIG_CRYPTO_GHASH_S390=m
CONFIG_CRYPTO_CRC32_S390=y
-CONFIG_ASYMMETRIC_KEY_TYPE=y
-CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
-CONFIG_X509_CERTIFICATE_PARSER=m
CONFIG_CRC7=m
CONFIG_CRC8=m
CONFIG_CORDIC=m
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index 2d40ef0..d00e368 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -38,7 +38,6 @@ CONFIG_JUMP_LABEL=y
CONFIG_STATIC_KEYS_SELFTEST=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
CONFIG_PARTITION_ADVANCED=y
CONFIG_IBM_PARTITION=y
@@ -130,8 +129,11 @@ CONFIG_DUMMY=m
CONFIG_EQUALIZER=m
CONFIG_TUN=m
CONFIG_VIRTIO_NET=y
+# CONFIG_NET_VENDOR_ALACRITECH is not set
+# CONFIG_NET_VENDOR_SOLARFLARE is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
+CONFIG_DEVKMEM=y
CONFIG_RAW_DRIVER=m
CONFIG_VIRTIO_BALLOON=y
CONFIG_EXT4_FS=y
@@ -183,7 +185,6 @@ CONFIG_TRACE_ENUM_MAP_FILE=y
CONFIG_KPROBES_SANITY_TEST=y
CONFIG_S390_PTDUMP=y
CONFIG_CRYPTO_CRYPTD=m
-CONFIG_CRYPTO_AUTHENC=m
CONFIG_CRYPTO_TEST=m
CONFIG_CRYPTO_CCM=m
CONFIG_CRYPTO_GCM=m
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index d7697ab..8e136b88 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -15,7 +15,9 @@
BUILD_BUG_ON(sizeof(addrtype) != (high - low + 1) * sizeof(long));\
asm volatile( \
" lctlg %1,%2,%0\n" \
- : : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high));\
+ : \
+ : "Q" (*(addrtype *)(&array)), "i" (low), "i" (high) \
+ : "memory"); \
}
#define __ctl_store(array, low, high) { \
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 7447ba5..12020b5 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -963,6 +963,11 @@ static int s390_fpregs_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ if (MACHINE_HAS_VX)
+ convert_vx_to_fp(fprs, target->thread.fpu.vxrs);
+ else
+ memcpy(&fprs, target->thread.fpu.fprs, sizeof(fprs));
+
/* If setting FPC, must validate it first. */
if (count > 0 && pos < offsetof(s390_fp_regs, fprs)) {
u32 ufpc[2] = { target->thread.fpu.fpc, 0 };
@@ -1067,6 +1072,9 @@ static int s390_vxrs_low_set(struct task_struct *target,
if (target == current)
save_fpu_regs();
+ for (i = 0; i < __NUM_VXRS_LOW; i++)
+ vxrs[i] = *((__u64 *)(target->thread.fpu.vxrs + i) + 1);
+
rc = user_regset_copyin(&pos, &count, &kbuf, &ubuf, vxrs, 0, -1);
if (rc == 0)
for (i = 0; i < __NUM_VXRS_LOW; i++)
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index bec71e9..6484a25 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -916,7 +916,7 @@ static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
S390_ARCH_FAC_LIST_SIZE_BYTE);
memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
ret = -EFAULT;
kfree(mach);
@@ -1437,7 +1437,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
/* Populate the facility mask initially. */
memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
- S390_ARCH_FAC_LIST_SIZE_BYTE);
+ sizeof(S390_lowcore.stfle_fac_list));
for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
if (i < kvm_s390_fac_list_mask_size())
kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 7a1897c..d56ef26 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -202,7 +202,7 @@ static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
return pgste;
}
-static inline void ptep_xchg_commit(struct mm_struct *mm,
+static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
unsigned long addr, pte_t *ptep,
pgste_t pgste, pte_t old, pte_t new)
{
@@ -220,6 +220,7 @@ static inline void ptep_xchg_commit(struct mm_struct *mm,
} else {
*ptep = new;
}
+ return old;
}
pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
@@ -231,7 +232,7 @@ pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_direct(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
@@ -246,7 +247,7 @@ pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
preempt_disable();
pgste = ptep_xchg_start(mm, addr, ptep);
old = ptep_flush_lazy(mm, addr, ptep);
- ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
+ old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
preempt_enable();
return old;
}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index cf4034c..68ac5c7 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -44,6 +44,7 @@ config SPARC
select CPU_NO_EFFICIENT_FFS
select HAVE_ARCH_HARDENED_USERCOPY
select PROVE_LOCKING_SMALL if PROVE_LOCKING
+ select ARCH_WANT_RELAX_ORDER
config SPARC32
def_bool !64BIT
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index d89b701..e279572 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -111,7 +111,7 @@ static int tile_gpr_set(struct task_struct *target,
const void *kbuf, const void __user *ubuf)
{
int ret;
- struct pt_regs regs;
+ struct pt_regs regs = *task_pt_regs(target);
ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &regs, 0,
sizeof(regs));
diff --git a/arch/x86/boot/string.c b/arch/x86/boot/string.c
index cc3bd58..9e240fc 100644
--- a/arch/x86/boot/string.c
+++ b/arch/x86/boot/string.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include "ctype.h"
+#include "string.h"
int memcmp(const void *s1, const void *s2, size_t len)
{
diff --git a/arch/x86/boot/string.h b/arch/x86/boot/string.h
index 725e820..113588d 100644
--- a/arch/x86/boot/string.h
+++ b/arch/x86/boot/string.h
@@ -18,4 +18,13 @@ int memcmp(const void *s1, const void *s2, size_t len);
#define memset(d,c,l) __builtin_memset(d,c,l)
#define memcmp __builtin_memcmp
+extern int strcmp(const char *str1, const char *str2);
+extern int strncmp(const char *cs, const char *ct, size_t count);
+extern size_t strlen(const char *s);
+extern char *strstr(const char *s1, const char *s2);
+extern size_t strnlen(const char *s, size_t maxlen);
+extern unsigned int atou(const char *s);
+extern unsigned long long simple_strtoull(const char *cp, char **endp,
+ unsigned int base);
+
#endif /* BOOT_STRING_H */
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index 31c34ee..6ef688a 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1020,7 +1020,8 @@ struct {
const char *basename;
struct simd_skcipher_alg *simd;
} aesni_simd_skciphers2[] = {
-#if IS_ENABLED(CONFIG_CRYPTO_PCBC)
+#if (defined(MODULE) && IS_ENABLED(CONFIG_CRYPTO_PCBC)) || \
+ IS_BUILTIN(CONFIG_CRYPTO_PCBC)
{
.algname = "pcbc(aes)",
.drvname = "pcbc-aes-aesni",
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 701d29f..57f7ec3 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -255,23 +255,6 @@ ENTRY(__switch_to_asm)
END(__switch_to_asm)
/*
- * The unwinder expects the last frame on the stack to always be at the same
- * offset from the end of the page, which allows it to validate the stack.
- * Calling schedule_tail() directly would break that convention because its an
- * asmlinkage function so its argument has to be pushed on the stack. This
- * wrapper creates a proper "end of stack" frame header before the call.
- */
-ENTRY(schedule_tail_wrapper)
- FRAME_BEGIN
-
- pushl %eax
- call schedule_tail
- popl %eax
-
- FRAME_END
- ret
-ENDPROC(schedule_tail_wrapper)
-/*
* A newly forked process directly context switches into this address.
*
* eax: prev task we switched from
@@ -279,15 +262,24 @@ ENDPROC(schedule_tail_wrapper)
* edi: kernel thread arg
*/
ENTRY(ret_from_fork)
- call schedule_tail_wrapper
+ FRAME_BEGIN /* help unwinder find end of stack */
+
+ /*
+ * schedule_tail() is asmlinkage so we have to put its 'prev' argument
+ * on the stack.
+ */
+ pushl %eax
+ call schedule_tail
+ popl %eax
testl %ebx, %ebx
jnz 1f /* kernel threads are uncommon */
2:
/* When we fork, we trace the syscall return in the child, too. */
- movl %esp, %eax
+ leal FRAME_OFFSET(%esp), %eax
call syscall_return_slowpath
+ FRAME_END
jmp restore_all
/* kernel thread */
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 5b21970..044d18e 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -36,6 +36,7 @@
#include <asm/smap.h>
#include <asm/pgtable_types.h>
#include <asm/export.h>
+#include <asm/frame.h>
#include <linux/err.h>
.code64
@@ -408,17 +409,19 @@ END(__switch_to_asm)
* r12: kernel thread arg
*/
ENTRY(ret_from_fork)
+ FRAME_BEGIN /* help unwinder find end of stack */
movq %rax, %rdi
- call schedule_tail /* rdi: 'prev' task parameter */
+ call schedule_tail /* rdi: 'prev' task parameter */
- testq %rbx, %rbx /* from kernel_thread? */
- jnz 1f /* kernel threads are uncommon */
+ testq %rbx, %rbx /* from kernel_thread? */
+ jnz 1f /* kernel threads are uncommon */
2:
- movq %rsp, %rdi
+ leaq FRAME_OFFSET(%rsp),%rdi /* pt_regs pointer */
call syscall_return_slowpath /* returns with IRQs disabled */
TRACE_IRQS_ON /* user mode is traced as IRQS on */
SWAPGS
+ FRAME_END
jmp restore_regs_and_iret
1:
diff --git a/arch/x86/events/amd/ibs.c b/arch/x86/events/amd/ibs.c
index 05612a2..496e603 100644
--- a/arch/x86/events/amd/ibs.c
+++ b/arch/x86/events/amd/ibs.c
@@ -1010,7 +1010,7 @@ static __init int amd_ibs_init(void)
* all online cpus.
*/
cpuhp_setup_state(CPUHP_AP_PERF_X86_AMD_IBS_STARTING,
- "perf/x86/amd/ibs:STARTING",
+ "perf/x86/amd/ibs:starting",
x86_pmu_amd_ibs_starting_cpu,
x86_pmu_amd_ibs_dying_cpu);
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 019c588..1635c0c 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -505,6 +505,10 @@ int x86_pmu_hw_config(struct perf_event *event)
if (event->attr.precise_ip > precise)
return -EOPNOTSUPP;
+
+ /* There's no sense in having PEBS for non sampling events: */
+ if (!is_sampling_event(event))
+ return -EINVAL;
}
/*
* check that PEBS LBR correction does not conflict with
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 8613826..eb1484c 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3176,13 +3176,16 @@ static void intel_pmu_cpu_starting(int cpu)
if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
for_each_cpu(i, topology_sibling_cpumask(cpu)) {
+ struct cpu_hw_events *sibling;
struct intel_excl_cntrs *c;
- c = per_cpu(cpu_hw_events, i).excl_cntrs;
+ sibling = &per_cpu(cpu_hw_events, i);
+ c = sibling->excl_cntrs;
if (c && c->core_id == core_id) {
cpuc->kfree_on_online[1] = cpuc->excl_cntrs;
cpuc->excl_cntrs = c;
- cpuc->excl_thread_id = 1;
+ if (!sibling->excl_thread_id)
+ cpuc->excl_thread_id = 1;
break;
}
}
@@ -3987,7 +3990,7 @@ __init int intel_pmu_init(void)
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
- x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
+ x86_pmu.intel_ctrl = (1ULL << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index fec8a461..1076c9a 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -434,6 +434,7 @@ static struct pmu cstate_core_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .module = THIS_MODULE,
};
static struct pmu cstate_pkg_pmu = {
@@ -447,6 +448,7 @@ static struct pmu cstate_pkg_pmu = {
.stop = cstate_pmu_event_stop,
.read = cstate_pmu_event_update,
.capabilities = PERF_PMU_CAP_NO_INTERRUPT,
+ .module = THIS_MODULE,
};
static const struct cstate_model nhm_cstates __initconst = {
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index be20239..9dfeeec 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1389,9 +1389,13 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
continue;
/* log dropped samples number */
- if (error[bit])
+ if (error[bit]) {
perf_log_lost_samples(event, error[bit]);
+ if (perf_event_account_interrupt(event))
+ x86_pmu_stop(event, 0);
+ }
+
if (counts[bit]) {
__intel_pmu_pebs_event(event, iregs, base,
top, bit, counts[bit]);
diff --git a/arch/x86/events/intel/rapl.c b/arch/x86/events/intel/rapl.c
index bd34124..17c3564 100644
--- a/arch/x86/events/intel/rapl.c
+++ b/arch/x86/events/intel/rapl.c
@@ -697,6 +697,7 @@ static int __init init_rapl_pmus(void)
rapl_pmus->pmu.start = rapl_pmu_event_start;
rapl_pmus->pmu.stop = rapl_pmu_event_stop;
rapl_pmus->pmu.read = rapl_pmu_event_read;
+ rapl_pmus->pmu.module = THIS_MODULE;
return 0;
}
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 97c246f..8c4ccdc 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -733,6 +733,7 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
.start = uncore_pmu_event_start,
.stop = uncore_pmu_event_stop,
.read = uncore_pmu_event_read,
+ .module = THIS_MODULE,
};
} else {
pmu->pmu = *pmu->type->pmu;
diff --git a/arch/x86/events/intel/uncore_snbep.c b/arch/x86/events/intel/uncore_snbep.c
index e6832be..dae2fed 100644
--- a/arch/x86/events/intel/uncore_snbep.c
+++ b/arch/x86/events/intel/uncore_snbep.c
@@ -2686,7 +2686,7 @@ static struct intel_uncore_type *hswep_msr_uncores[] = {
void hswep_uncore_cpu_init(void)
{
- int pkg = topology_phys_to_logical_pkg(0);
+ int pkg = boot_cpu_data.logical_proc_id;
if (hswep_uncore_cbox.num_boxes > boot_cpu_data.x86_max_cores)
hswep_uncore_cbox.num_boxes = boot_cpu_data.x86_max_cores;
diff --git a/arch/x86/include/asm/intel-family.h b/arch/x86/include/asm/intel-family.h
index 34a46dc..8167fdb 100644
--- a/arch/x86/include/asm/intel-family.h
+++ b/arch/x86/include/asm/intel-family.h
@@ -57,7 +57,7 @@
#define INTEL_FAM6_ATOM_SILVERMONT2 0x4D /* Avaton/Rangely */
#define INTEL_FAM6_ATOM_AIRMONT 0x4C /* CherryTrail / Braswell */
#define INTEL_FAM6_ATOM_MERRIFIELD 0x4A /* Tangier */
-#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Annidale */
+#define INTEL_FAM6_ATOM_MOOREFIELD 0x5A /* Anniedale */
#define INTEL_FAM6_ATOM_GOLDMONT 0x5C
#define INTEL_FAM6_ATOM_DENVERTON 0x5F /* Goldmont Microserver */
diff --git a/arch/x86/include/asm/microcode_intel.h b/arch/x86/include/asm/microcode_intel.h
index 195becc..e793fc9 100644
--- a/arch/x86/include/asm/microcode_intel.h
+++ b/arch/x86/include/asm/microcode_intel.h
@@ -52,6 +52,21 @@ struct extended_sigtable {
#define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
+static inline u32 intel_get_microcode_revision(void)
+{
+ u32 rev, dummy;
+
+ native_wrmsrl(MSR_IA32_UCODE_REV, 0);
+
+ /* As documented in the SDM: Do a CPUID 1 here */
+ native_cpuid_eax(1);
+
+ /* get the current revision from MSR 0x8B */
+ native_rdmsr(MSR_IA32_UCODE_REV, dummy, rev);
+
+ return rev;
+}
+
#ifdef CONFIG_MICROCODE_INTEL
extern void __init load_ucode_intel_bsp(void);
extern void load_ucode_intel_ap(void);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index eaf1005..1be64da 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -219,6 +219,24 @@ static inline void native_cpuid(unsigned int *eax, unsigned int *ebx,
: "memory");
}
+#define native_cpuid_reg(reg) \
+static inline unsigned int native_cpuid_##reg(unsigned int op) \
+{ \
+ unsigned int eax = op, ebx, ecx = 0, edx; \
+ \
+ native_cpuid(&eax, &ebx, &ecx, &edx); \
+ \
+ return reg; \
+}
+
+/*
+ * Native CPUID functions returning a single datum.
+ */
+native_cpuid_reg(eax)
+native_cpuid_reg(ebx)
+native_cpuid_reg(ecx)
+native_cpuid_reg(edx)
+
static inline void load_cr3(pgd_t *pgdir)
{
write_cr3(__pa(pgdir));
diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
index a3269c8..2e41c50 100644
--- a/arch/x86/include/asm/stacktrace.h
+++ b/arch/x86/include/asm/stacktrace.h
@@ -58,7 +58,7 @@ get_frame_pointer(struct task_struct *task, struct pt_regs *regs)
if (task == current)
return __builtin_frame_address(0);
- return (unsigned long *)((struct inactive_task_frame *)task->thread.sp)->bp;
+ return &((struct inactive_task_frame *)task->thread.sp)->bp;
}
#else
static inline unsigned long *
diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
index 5cb436a..fcc5cd3 100644
--- a/arch/x86/include/asm/switch_to.h
+++ b/arch/x86/include/asm/switch_to.h
@@ -36,7 +36,10 @@ static inline void prepare_switch_to(struct task_struct *prev,
asmlinkage void ret_from_fork(void);
-/* data that is pointed to by thread.sp */
+/*
+ * This is the structure pointed to by thread.sp for an inactive task. The
+ * order of the fields must match the code in __switch_to_asm().
+ */
struct inactive_task_frame {
#ifdef CONFIG_X86_64
unsigned long r15;
@@ -48,6 +51,11 @@ struct inactive_task_frame {
unsigned long di;
#endif
unsigned long bx;
+
+ /*
+ * These two fields must be together. They form a stack frame header,
+ * needed by get_frame_pointer().
+ */
unsigned long bp;
unsigned long ret_addr;
};
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 945e512..1e35dd0 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1875,6 +1875,7 @@ static struct irq_chip ioapic_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
@@ -1886,6 +1887,7 @@ static struct irq_chip ioapic_ir_chip __read_mostly = {
.irq_ack = irq_chip_ack_parent,
.irq_eoi = ioapic_ir_ack_level,
.irq_set_affinity = ioapic_set_affinity,
+ .irq_retrigger = irq_chip_retrigger_hierarchy,
.flags = IRQCHIP_SKIP_SET_WAKE,
};
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 71cae73..1d31672 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -309,15 +309,8 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
/* get information required for multi-node processors */
if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
- u32 eax, ebx, ecx, edx;
- cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- node_id = ecx & 7;
-
- /* get compute unit information */
- smp_num_siblings = ((ebx >> 8) & 3) + 1;
- c->x86_max_cores /= smp_num_siblings;
- c->cpu_core_id = ebx & 0xff;
+ node_id = cpuid_ecx(0x8000001e) & 7;
/*
* We may have multiple LLCs if L3 caches exist, so check if we
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index dc1697c..9bab7a8 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1221,7 +1221,7 @@ static __init int setup_disablecpuid(char *arg)
{
int bit;
- if (get_option(&arg, &bit) && bit < NCAPINTS*32)
+ if (get_option(&arg, &bit) && bit >= 0 && bit < NCAPINTS * 32)
setup_clear_cpu_cap(bit);
else
return 0;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index fcd484d..203f860 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -14,6 +14,7 @@
#include <asm/bugs.h>
#include <asm/cpu.h>
#include <asm/intel-family.h>
+#include <asm/microcode_intel.h>
#ifdef CONFIG_X86_64
#include <linux/topology.h>
@@ -78,14 +79,8 @@ static void early_init_intel(struct cpuinfo_x86 *c)
(c->x86 == 0x6 && c->x86_model >= 0x0e))
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
- if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) {
- unsigned lower_word;
-
- wrmsr(MSR_IA32_UCODE_REV, 0, 0);
- /* Required by the SDM */
- sync_core();
- rdmsr(MSR_IA32_UCODE_REV, lower_word, c->microcode);
- }
+ if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64))
+ c->microcode = intel_get_microcode_revision();
/*
* Atom erratum AAE44/AAF40/AAG38/AAH41:
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index b624b54..3f329b7 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -150,7 +150,7 @@ static struct ucode_patch *__alloc_microcode_buf(void *data, unsigned int size)
{
struct ucode_patch *p;
- p = kzalloc(size, GFP_KERNEL);
+ p = kzalloc(sizeof(struct ucode_patch), GFP_KERNEL);
if (!p)
return ERR_PTR(-ENOMEM);
@@ -368,26 +368,6 @@ next:
return patch;
}
-static void cpuid_1(void)
-{
- /*
- * According to the Intel SDM, Volume 3, 9.11.7:
- *
- * CPUID returns a value in a model specific register in
- * addition to its usual register return values. The
- * semantics of CPUID cause it to deposit an update ID value
- * in the 64-bit model-specific register at address 08BH
- * (IA32_BIOS_SIGN_ID). If no update is present in the
- * processor, the value in the MSR remains unmodified.
- *
- * Use native_cpuid -- this code runs very early and we don't
- * want to mess with paravirt.
- */
- unsigned int eax = 1, ebx, ecx = 0, edx;
-
- native_cpuid(&eax, &ebx, &ecx, &edx);
-}
-
static int collect_cpu_info_early(struct ucode_cpu_info *uci)
{
unsigned int val[2];
@@ -410,15 +390,8 @@ static int collect_cpu_info_early(struct ucode_cpu_info *uci)
native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
csig.pf = 1 << ((val[1] >> 18) & 7);
}
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
-
- csig.rev = val[1];
+ csig.rev = intel_get_microcode_revision();
uci->cpu_sig = csig;
uci->valid = 1;
@@ -602,7 +575,7 @@ static inline void print_ucode(struct ucode_cpu_info *uci)
static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
{
struct microcode_intel *mc;
- unsigned int val[2];
+ u32 rev;
mc = uci->mc;
if (!mc)
@@ -610,21 +583,16 @@ static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
/* write microcode via MSR 0x79 */
native_wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- native_wrmsrl(MSR_IA32_UCODE_REV, 0);
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
-
- /* get the current revision from MSR 0x8B */
- native_rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
- if (val[1] != mc->hdr.rev)
+ rev = intel_get_microcode_revision();
+ if (rev != mc->hdr.rev)
return -1;
#ifdef CONFIG_X86_64
/* Flush global tlb. This is precaution. */
flush_tlb_early();
#endif
- uci->cpu_sig.rev = val[1];
+ uci->cpu_sig.rev = rev;
if (early)
print_ucode(uci);
@@ -804,8 +772,8 @@ static int apply_microcode_intel(int cpu)
struct microcode_intel *mc;
struct ucode_cpu_info *uci;
struct cpuinfo_x86 *c;
- unsigned int val[2];
static int prev_rev;
+ u32 rev;
/* We should bind the task to the CPU */
if (WARN_ON(raw_smp_processor_id() != cpu))
@@ -822,33 +790,28 @@ static int apply_microcode_intel(int cpu)
/* write microcode via MSR 0x79 */
wrmsrl(MSR_IA32_UCODE_WRITE, (unsigned long)mc->bits);
- wrmsrl(MSR_IA32_UCODE_REV, 0);
-
- /* As documented in the SDM: Do a CPUID 1 here */
- cpuid_1();
- /* get the current revision from MSR 0x8B */
- rdmsr(MSR_IA32_UCODE_REV, val[0], val[1]);
+ rev = intel_get_microcode_revision();
- if (val[1] != mc->hdr.rev) {
+ if (rev != mc->hdr.rev) {
pr_err("CPU%d update to revision 0x%x failed\n",
cpu, mc->hdr.rev);
return -1;
}
- if (val[1] != prev_rev) {
+ if (rev != prev_rev) {
pr_info("updated to revision 0x%x, date = %04x-%02x-%02x\n",
- val[1],
+ rev,
mc->hdr.date & 0xffff,
mc->hdr.date >> 24,
(mc->hdr.date >> 16) & 0xff);
- prev_rev = val[1];
+ prev_rev = rev;
}
c = &cpu_data(cpu);
- uci->cpu_sig.rev = val[1];
- c->microcode = val[1];
+ uci->cpu_sig.rev = rev;
+ c->microcode = rev;
return 0;
}
@@ -860,7 +823,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
int new_rev = uci->cpu_sig.rev;
unsigned int leftover = size;
- unsigned int curr_mc_size = 0;
+ unsigned int curr_mc_size = 0, new_mc_size = 0;
unsigned int csig, cpf;
while (leftover) {
@@ -901,6 +864,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
vfree(new_mc);
new_rev = mc_header.rev;
new_mc = mc;
+ new_mc_size = mc_size;
mc = NULL; /* trigger new vmalloc */
}
@@ -926,7 +890,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
* permanent memory. So it will be loaded early when a CPU is hot added
* or resumes.
*/
- save_mc_for_early(new_mc, curr_mc_size);
+ save_mc_for_early(new_mc, new_mc_size);
pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
cpu, new_rev, uci->cpu_sig.rev);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index be3a49e..e41af59 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -694,6 +694,7 @@ unsigned long native_calibrate_tsc(void)
crystal_khz = 24000; /* 24.0 MHz */
break;
case INTEL_FAM6_SKYLAKE_X:
+ case INTEL_FAM6_ATOM_DENVERTON:
crystal_khz = 25000; /* 25.0 MHz */
break;
case INTEL_FAM6_ATOM_GOLDMONT:
diff --git a/arch/x86/kernel/unwind_frame.c b/arch/x86/kernel/unwind_frame.c
index 4443e499..23d1556 100644
--- a/arch/x86/kernel/unwind_frame.c
+++ b/arch/x86/kernel/unwind_frame.c
@@ -6,6 +6,21 @@
#define FRAME_HEADER_SIZE (sizeof(long) * 2)
+/*
+ * This disables KASAN checking when reading a value from another task's stack,
+ * since the other task could be running on another CPU and could have poisoned
+ * the stack in the meantime.
+ */
+#define READ_ONCE_TASK_STACK(task, x) \
+({ \
+ unsigned long val; \
+ if (task == current) \
+ val = READ_ONCE(x); \
+ else \
+ val = READ_ONCE_NOCHECK(x); \
+ val; \
+})
+
static void unwind_dump(struct unwind_state *state, unsigned long *sp)
{
static bool dumped_before = false;
@@ -48,7 +63,8 @@ unsigned long unwind_get_return_address(struct unwind_state *state)
if (state->regs && user_mode(state->regs))
return 0;
- addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, *addr_p,
+ addr = READ_ONCE_TASK_STACK(state->task, *addr_p);
+ addr = ftrace_graph_ret_addr(state->task, &state->graph_idx, addr,
addr_p);
return __kernel_text_address(addr) ? addr : 0;
@@ -162,7 +178,7 @@ bool unwind_next_frame(struct unwind_state *state)
if (state->regs)
next_bp = (unsigned long *)state->regs->bp;
else
- next_bp = (unsigned long *)*state->bp;
+ next_bp = (unsigned long *)READ_ONCE_TASK_STACK(state->task,*state->bp);
/* is the next frame pointer an encoded pointer to pt_regs? */
regs = decode_frame_pointer(next_bp);
@@ -207,6 +223,16 @@ bool unwind_next_frame(struct unwind_state *state)
return true;
bad_address:
+ /*
+ * When unwinding a non-current task, the task might actually be
+ * running on another CPU, in which case it could be modifying its
+ * stack while we're reading it. This is generally not a problem and
+ * can be ignored as long as the caller understands that unwinding
+ * another task will not always succeed.
+ */
+ if (state->task != current)
+ goto the_end;
+
if (state->regs) {
printk_deferred_once(KERN_WARNING
"WARNING: kernel stack regs at %p in %s:%d has bad 'bp' value %p\n",
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 56628a4..cedbba0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
}
+static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
+ struct segmented_address addr,
+ void *data,
+ unsigned int size)
+{
+ int rc;
+ ulong linear;
+
+ rc = linearize(ctxt, addr, size, true, &linear);
+ if (rc != X86EMUL_CONTINUE)
+ return rc;
+ return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
+}
+
/*
* Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet.
@@ -1571,7 +1585,6 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
&ctxt->exception);
}
-/* Does not support long mode */
static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg, u8 cpl,
enum x86_transfer_type transfer,
@@ -1608,20 +1621,34 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
rpl = selector & 3;
- /* NULL selector is not valid for TR, CS and SS (except for long mode) */
- if ((seg == VCPU_SREG_CS
- || (seg == VCPU_SREG_SS
- && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
- || seg == VCPU_SREG_TR)
- && null_selector)
- goto exception;
-
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
- if (null_selector) /* for NULL selector skip all following checks */
+ /* NULL selector is not valid for TR, CS and (except for long mode) SS */
+ if (null_selector) {
+ if (seg == VCPU_SREG_CS || seg == VCPU_SREG_TR)
+ goto exception;
+
+ if (seg == VCPU_SREG_SS) {
+ if (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl)
+ goto exception;
+
+ /*
+ * ctxt->ops->set_segment expects the CPL to be in
+ * SS.DPL, so fake an expand-up 32-bit data segment.
+ */
+ seg_desc.type = 3;
+ seg_desc.p = 1;
+ seg_desc.s = 1;
+ seg_desc.dpl = cpl;
+ seg_desc.d = 1;
+ seg_desc.g = 1;
+ }
+
+ /* Skip all following checks */
goto load;
+ }
ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
if (ret != X86EMUL_CONTINUE)
@@ -1737,6 +1764,21 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
u16 selector, int seg)
{
u8 cpl = ctxt->ops->cpl(ctxt);
+
+ /*
+ * None of MOV, POP and LSS can load a NULL selector in CPL=3, but
+ * they can load it at CPL<3 (Intel's manual says only LSS can,
+ * but it's wrong).
+ *
+ * However, the Intel manual says that putting IST=1/DPL=3 in
+ * an interrupt gate will result in SS=3 (the AMD manual instead
+ * says it doesn't), so allow SS=3 in __load_segment_descriptor
+ * and only forbid it here.
+ */
+ if (seg == VCPU_SREG_SS && selector == 3 &&
+ ctxt->mode == X86EMUL_MODE_PROT64)
+ return emulate_exception(ctxt, GP_VECTOR, 0, true);
+
return __load_segment_descriptor(ctxt, selector, seg, cpl,
X86_TRANSFER_NONE, NULL);
}
@@ -3685,8 +3727,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
}
/* Disable writeback. */
ctxt->dst.type = OP_NONE;
- return segmented_write(ctxt, ctxt->dst.addr.mem,
- &desc_ptr, 2 + ctxt->op_bytes);
+ return segmented_write_std(ctxt, ctxt->dst.addr.mem,
+ &desc_ptr, 2 + ctxt->op_bytes);
}
static int em_sgdt(struct x86_emulate_ctxt *ctxt)
@@ -3932,7 +3974,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
else
size = offsetof(struct fxregs_state, xmm_space[0]);
- return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size);
+ return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
}
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
@@ -3974,7 +4016,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE)
return rc;
- rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
+ rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE)
return rc;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 5fe290c..2f6ef51 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -2426,3 +2426,9 @@ void kvm_lapic_init(void)
jump_label_rate_limit(&apic_hw_disabled, HZ);
jump_label_rate_limit(&apic_sw_disabled, HZ);
}
+
+void kvm_lapic_exit(void)
+{
+ static_key_deferred_flush(&apic_hw_disabled);
+ static_key_deferred_flush(&apic_sw_disabled);
+}
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index e0c8023..ff8039d 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -110,6 +110,7 @@ static inline bool kvm_hv_vapic_assist_page_enabled(struct kvm_vcpu *vcpu)
int kvm_lapic_enable_pv_eoi(struct kvm_vcpu *vcpu, u64 data);
void kvm_lapic_init(void);
+void kvm_lapic_exit(void);
#define VEC_POS(v) ((v) & (32 - 1))
#define REG_POS(v) (((v) >> 5) << 4)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2f22810..d153be8 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3342,6 +3342,8 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
switch (cap->cap) {
case KVM_CAP_HYPERV_SYNIC:
+ if (!irqchip_in_kernel(vcpu->kvm))
+ return -EINVAL;
return kvm_hv_activate_synic(vcpu);
default:
return -EINVAL;
@@ -6045,6 +6047,7 @@ out:
void kvm_arch_exit(void)
{
+ kvm_lapic_exit();
perf_unregister_guest_info_callbacks(&kvm_guest_cbs);
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC))
@@ -6168,7 +6171,8 @@ static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt)
kvm_x86_ops->patch_hypercall(vcpu, instruction);
- return emulator_write_emulated(ctxt, rip, instruction, 3, NULL);
+ return emulator_write_emulated(ctxt, rip, instruction, 3,
+ &ctxt->exception);
}
static int dm_request_for_irq_injection(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 324e571..af59f80 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -293,7 +293,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
* We were not able to extract an address from the instruction,
* probably because there was something invalid in it.
*/
- if (info->si_addr == (void *)-1) {
+ if (info->si_addr == (void __user *)-1) {
err = -EINVAL;
goto err_out;
}
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 3cd6983..3961103 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -114,6 +114,16 @@ static const struct dmi_system_id pci_crs_quirks[] __initconst = {
DMI_MATCH(DMI_BIOS_VERSION, "6JET85WW (1.43 )"),
},
},
+ /* https://bugzilla.kernel.org/show_bug.cgi?id=42606 */
+ {
+ .callback = set_nouse_crs,
+ .ident = "Supermicro X8DTH",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Supermicro"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "X8DTH-i/6/iF/6F"),
+ DMI_MATCH(DMI_BIOS_VERSION, "2.0a"),
+ },
+ },
/* https://bugzilla.kernel.org/show_bug.cgi?id=15362 */
{
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 936a488..274dfc4 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -210,6 +210,70 @@ int __init efi_memblock_x86_reserve_range(void)
return 0;
}
+#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+ u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+ u64 end_hi = 0;
+ char buf[64];
+
+ if (md->num_pages == 0) {
+ end = 0;
+ } else if (md->num_pages > EFI_PAGES_MAX ||
+ EFI_PAGES_MAX - md->num_pages <
+ (md->phys_addr >> EFI_PAGE_SHIFT)) {
+ end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+ >> OVERFLOW_ADDR_SHIFT;
+
+ if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+ end_hi += 1;
+ } else {
+ return true;
+ }
+
+ pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+ if (end_hi) {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end_hi, end);
+ } else {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end);
+ }
+ return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+ efi_memory_desc_t *out = efi.memmap.map;
+ const efi_memory_desc_t *in = out;
+ const efi_memory_desc_t *end = efi.memmap.map_end;
+ int i, n_removal;
+
+ for (i = n_removal = 0; in < end; i++) {
+ if (efi_memmap_entry_valid(in, i)) {
+ if (out != in)
+ memcpy(out, in, efi.memmap.desc_size);
+ out = (void *)out + efi.memmap.desc_size;
+ } else {
+ n_removal++;
+ }
+ in = (void *)in + efi.memmap.desc_size;
+ }
+
+ if (n_removal > 0) {
+ u64 size = efi.memmap.nr_map - n_removal;
+
+ pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+ efi_memmap_install(efi.memmap.phys_map, size);
+ }
+}
+
void __init efi_print_memmap(void)
{
efi_memory_desc_t *md;
@@ -472,6 +536,8 @@ void __init efi_init(void)
}
}
+ efi_clean_memmap();
+
if (efi_enabled(EFI_DBG))
efi_print_memmap();
}
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
index 10aca63..30031d5 100644
--- a/arch/x86/platform/efi/quirks.c
+++ b/arch/x86/platform/efi/quirks.c
@@ -214,7 +214,7 @@ void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
new_size = efi.memmap.desc_size * num_entries;
- new_phys = memblock_alloc(new_size, 0);
+ new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Could not allocate boot services memmap\n");
return;
@@ -355,7 +355,7 @@ void __init efi_free_boot_services(void)
}
new_size = efi.memmap.desc_size * num_entries;
- new_phys = memblock_alloc(new_size, 0);
+ new_phys = efi_memmap_alloc(num_entries);
if (!new_phys) {
pr_err("Failed to allocate new EFI memmap\n");
return;
diff --git a/arch/x86/platform/intel-mid/device_libs/Makefile b/arch/x86/platform/intel-mid/device_libs/Makefile
index 61b5ed2..90e4f2a 100644
--- a/arch/x86/platform/intel-mid/device_libs/Makefile
+++ b/arch/x86/platform/intel-mid/device_libs/Makefile
@@ -15,7 +15,7 @@ obj-$(subst m,y,$(CONFIG_INTEL_MID_POWER_BUTTON)) += platform_msic_power_btn.o
obj-$(subst m,y,$(CONFIG_GPIO_INTEL_PMIC)) += platform_pmic_gpio.o
obj-$(subst m,y,$(CONFIG_INTEL_MFLD_THERMAL)) += platform_msic_thermal.o
# SPI Devices
-obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_spidev.o
+obj-$(subst m,y,$(CONFIG_SPI_SPIDEV)) += platform_mrfld_spidev.o
# I2C Devices
obj-$(subst m,y,$(CONFIG_SENSORS_EMC1403)) += platform_emc1403.o
obj-$(subst m,y,$(CONFIG_SENSORS_LIS3LV02D)) += platform_lis331.o
diff --git a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
index 30c601b..27186ad 100644
--- a/arch/x86/platform/intel-mid/device_libs/platform_spidev.c
+++ b/arch/x86/platform/intel-mid/device_libs/platform_mrfld_spidev.c
@@ -11,6 +11,7 @@
* of the License.
*/
+#include <linux/err.h>
#include <linux/init.h>
#include <linux/sfi.h>
#include <linux/spi/pxa2xx_spi.h>
@@ -34,6 +35,9 @@ static void __init *spidev_platform_data(void *info)
{
struct spi_board_info *spi_info = info;
+ if (intel_mid_identify_cpu() != INTEL_MID_CPU_CHIP_TANGIER)
+ return ERR_PTR(-ENODEV);
+
spi_info->mode = SPI_MODE_0;
spi_info->controller_data = &spidev_spi_chip;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index ed89c8f..f8c82a9 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -301,13 +301,6 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask)
return -EINVAL;
- if (discard) {
- ret = __blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
- BLKDEV_DISCARD_ZERO, biop);
- if (ret == 0 || (ret && ret != -EOPNOTSUPP))
- goto out;
- }
-
ret = __blkdev_issue_write_zeroes(bdev, sector, nr_sects, gfp_mask,
biop);
if (ret == 0 || (ret && ret != -EOPNOTSUPP))
@@ -370,6 +363,12 @@ int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
struct bio *bio = NULL;
struct blk_plug plug;
+ if (discard) {
+ if (!blkdev_issue_discard(bdev, sector, nr_sects, gfp_mask,
+ BLKDEV_DISCARD_ZERO))
+ return 0;
+ }
+
blk_start_plug(&plug);
ret = __blkdev_issue_zeroout(bdev, sector, nr_sects, gfp_mask,
&bio, discard);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a8e67a1..c3400b5 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -912,7 +912,6 @@ bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *hctx, struct list_head *list)
static void blk_mq_process_rq_list(struct blk_mq_hw_ctx *hctx)
{
LIST_HEAD(rq_list);
- LIST_HEAD(driver_list);
if (unlikely(blk_mq_hctx_stopped(hctx)))
return;
diff --git a/block/blk-zoned.c b/block/blk-zoned.c
index 472211f..3bd15d8 100644
--- a/block/blk-zoned.c
+++ b/block/blk-zoned.c
@@ -16,7 +16,7 @@
static inline sector_t blk_zone_start(struct request_queue *q,
sector_t sector)
{
- sector_t zone_mask = blk_queue_zone_size(q) - 1;
+ sector_t zone_mask = blk_queue_zone_sectors(q) - 1;
return sector & ~zone_mask;
}
@@ -222,7 +222,7 @@ int blkdev_reset_zones(struct block_device *bdev,
return -EINVAL;
/* Check alignment (handle eventual smaller last zone) */
- zone_sectors = blk_queue_zone_size(q);
+ zone_sectors = blk_queue_zone_sectors(q);
if (sector & (zone_sectors - 1))
return -EINVAL;
diff --git a/block/partition-generic.c b/block/partition-generic.c
index d7beb6b..7afb990 100644
--- a/block/partition-generic.c
+++ b/block/partition-generic.c
@@ -434,7 +434,7 @@ static bool part_zone_aligned(struct gendisk *disk,
struct block_device *bdev,
sector_t from, sector_t size)
{
- unsigned int zone_size = bdev_zone_size(bdev);
+ unsigned int zone_sectors = bdev_zone_sectors(bdev);
/*
* If this function is called, then the disk is a zoned block device
@@ -446,7 +446,7 @@ static bool part_zone_aligned(struct gendisk *disk,
* regular block devices (no zone operation) and their zone size will
* be reported as 0. Allow this case.
*/
- if (!zone_size)
+ if (!zone_sectors)
return true;
/*
@@ -455,24 +455,24 @@ static bool part_zone_aligned(struct gendisk *disk,
* use it. Check the zone size too: it should be a power of 2 number
* of sectors.
*/
- if (WARN_ON_ONCE(!is_power_of_2(zone_size))) {
+ if (WARN_ON_ONCE(!is_power_of_2(zone_sectors))) {
u32 rem;
- div_u64_rem(from, zone_size, &rem);
+ div_u64_rem(from, zone_sectors, &rem);
if (rem)
return false;
if ((from + size) < get_capacity(disk)) {
- div_u64_rem(size, zone_size, &rem);
+ div_u64_rem(size, zone_sectors, &rem);
if (rem)
return false;
}
} else {
- if (from & (zone_size - 1))
+ if (from & (zone_sectors - 1))
return false;
if ((from + size) < get_capacity(disk) &&
- (size & (zone_size - 1)))
+ (size & (zone_sectors - 1)))
return false;
}
diff --git a/drivers/acpi/acpica/tbdata.c b/drivers/acpi/acpica/tbdata.c
index 82b0b57..b0399e8 100644
--- a/drivers/acpi/acpica/tbdata.c
+++ b/drivers/acpi/acpica/tbdata.c
@@ -852,23 +852,18 @@ acpi_tb_install_and_load_table(acpi_physical_address address,
ACPI_FUNCTION_TRACE(tb_install_and_load_table);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-
/* Install the table and load it into the namespace */
status = acpi_tb_install_standard_table(address, flags, TRUE,
override, &i);
if (ACPI_FAILURE(status)) {
- goto unlock_and_exit;
+ goto exit;
}
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
status = acpi_tb_load_table(i, acpi_gbl_root_node);
- (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
-unlock_and_exit:
+exit:
*table_index = i;
- (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
return_ACPI_STATUS(status);
}
diff --git a/drivers/acpi/acpica/tbinstal.c b/drivers/acpi/acpica/tbinstal.c
index 5fdf251..01e1b3d 100644
--- a/drivers/acpi/acpica/tbinstal.c
+++ b/drivers/acpi/acpica/tbinstal.c
@@ -217,6 +217,10 @@ acpi_tb_install_standard_table(acpi_physical_address address,
goto release_and_exit;
}
+ /* Acquire the table lock */
+
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
if (reload) {
/*
* Validate the incoming table signature.
@@ -244,7 +248,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
new_table_desc.signature.integer));
status = AE_BAD_SIGNATURE;
- goto release_and_exit;
+ goto unlock_and_exit;
}
/* Check if table is already registered */
@@ -279,7 +283,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Table is still loaded, this is an error */
status = AE_ALREADY_EXISTS;
- goto release_and_exit;
+ goto unlock_and_exit;
} else {
/*
* Table was unloaded, allow it to be reloaded.
@@ -290,6 +294,7 @@ acpi_tb_install_standard_table(acpi_physical_address address,
* indicate the re-installation.
*/
acpi_tb_uninstall_table(&new_table_desc);
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
*table_index = i;
return_ACPI_STATUS(AE_OK);
}
@@ -303,11 +308,19 @@ acpi_tb_install_standard_table(acpi_physical_address address,
/* Invoke table handler if present */
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
if (acpi_gbl_table_handler) {
(void)acpi_gbl_table_handler(ACPI_TABLE_EVENT_INSTALL,
new_table_desc.pointer,
acpi_gbl_table_handler_context);
}
+ (void)acpi_ut_acquire_mutex(ACPI_MTX_TABLES);
+
+unlock_and_exit:
+
+ /* Release the table lock */
+
+ (void)acpi_ut_release_mutex(ACPI_MTX_TABLES);
release_and_exit:
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 9b6cebe..54abb26 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -674,14 +674,6 @@ static void acpi_sleep_suspend_setup(void)
if (acpi_sleep_state_supported(i))
sleep_states[i] = 1;
- /*
- * Use suspend-to-idle by default if ACPI_FADT_LOW_POWER_S0 is set and
- * the default suspend mode was not selected from the command line.
- */
- if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0 &&
- mem_sleep_default > PM_SUSPEND_MEM)
- mem_sleep_default = PM_SUSPEND_FREEZE;
-
suspend_set_ops(old_suspend_ordering ?
&acpi_suspend_ops_old : &acpi_suspend_ops);
freeze_set_ops(&acpi_freeze_ops);
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 02ded25..7f48156 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -305,17 +305,6 @@ static const struct dmi_system_id video_detect_dmi_table[] = {
DMI_MATCH(DMI_PRODUCT_NAME, "Dell System XPS L702X"),
},
},
- {
- /* https://bugzilla.redhat.com/show_bug.cgi?id=1204476 */
- /* https://bugs.launchpad.net/ubuntu/+source/linux-lts-trusty/+bug/1416940 */
- .callback = video_detect_force_native,
- .ident = "HP Pavilion dv6",
- .matches = {
- DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
- DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv6 Notebook PC"),
- },
- },
-
{ },
};
diff --git a/drivers/auxdisplay/Kconfig b/drivers/auxdisplay/Kconfig
index 4ef4c5c..8a8e403 100644
--- a/drivers/auxdisplay/Kconfig
+++ b/drivers/auxdisplay/Kconfig
@@ -132,9 +132,9 @@ config HT16K33
tristate "Holtek Ht16K33 LED controller with keyscan"
depends on FB && OF && I2C && INPUT
select FB_SYS_FOPS
- select FB_CFB_FILLRECT
- select FB_CFB_COPYAREA
- select FB_CFB_IMAGEBLIT
+ select FB_SYS_FILLRECT
+ select FB_SYS_COPYAREA
+ select FB_SYS_IMAGEBLIT
select INPUT_MATRIXKMAP
select FB_BACKLIGHT
help
diff --git a/drivers/base/base.h b/drivers/base/base.h
index ada9dce..e19b100 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -141,8 +141,6 @@ extern void device_unblock_probing(void);
extern struct kset *devices_kset;
extern void devices_kset_move_last(struct device *dev);
-extern struct device_attribute dev_attr_deferred_probe;
-
#if defined(CONFIG_MODULES) && defined(CONFIG_SYSFS)
extern void module_add_driver(struct module *mod, struct device_driver *drv);
extern void module_remove_driver(struct device_driver *drv);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 020ea7f..8c25e68 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1060,14 +1060,8 @@ static int device_add_attrs(struct device *dev)
goto err_remove_dev_groups;
}
- error = device_create_file(dev, &dev_attr_deferred_probe);
- if (error)
- goto err_remove_online;
-
return 0;
- err_remove_online:
- device_remove_file(dev, &dev_attr_online);
err_remove_dev_groups:
device_remove_groups(dev, dev->groups);
err_remove_type_groups:
@@ -1085,7 +1079,6 @@ static void device_remove_attrs(struct device *dev)
struct class *class = dev->class;
const struct device_type *type = dev->type;
- device_remove_file(dev, &dev_attr_deferred_probe);
device_remove_file(dev, &dev_attr_online);
device_remove_groups(dev, dev->groups);
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index a8b258e..a1fbf55 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -53,19 +53,6 @@ static LIST_HEAD(deferred_probe_pending_list);
static LIST_HEAD(deferred_probe_active_list);
static atomic_t deferred_trigger_count = ATOMIC_INIT(0);
-static ssize_t deferred_probe_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- bool value;
-
- mutex_lock(&deferred_probe_mutex);
- value = !list_empty(&dev->p->deferred_probe);
- mutex_unlock(&deferred_probe_mutex);
-
- return sprintf(buf, "%d\n", value);
-}
-DEVICE_ATTR_RO(deferred_probe);
-
/*
* In some cases, like suspend to RAM or hibernation, It might be reasonable
* to prohibit probing of devices as it could be unsafe.
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 8ab8ea1..dacb6a8 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -408,14 +408,14 @@ static ssize_t show_valid_zones(struct device *dev,
sprintf(buf, "%s", zone->name);
/* MMOP_ONLINE_KERNEL */
- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL);
+ zone_can_shift(start_pfn, nr_pages, ZONE_NORMAL, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
}
/* MMOP_ONLINE_MOVABLE */
- zone_shift = zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE);
+ zone_can_shift(start_pfn, nr_pages, ZONE_MOVABLE, &zone_shift);
if (zone_shift) {
strcat(buf, " ");
strcat(buf, (zone + zone_shift)->name);
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 38c576f..9fd06ee 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,7 +271,7 @@ static inline int sock_send_bvec(struct nbd_device *nbd, int index,
static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
{
struct request *req = blk_mq_rq_from_pdu(cmd);
- int result, flags;
+ int result;
struct nbd_request request;
unsigned long size = blk_rq_bytes(req);
struct bio *bio;
@@ -310,7 +310,6 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
if (type != NBD_CMD_WRITE)
return 0;
- flags = 0;
bio = req->bio;
while (bio) {
struct bio *next = bio->bi_next;
@@ -319,9 +318,8 @@ static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd, int index)
bio_for_each_segment(bvec, bio, iter) {
bool is_last = !next && bio_iter_last(bvec, iter);
+ int flags = is_last ? 0 : MSG_MORE;
- if (is_last)
- flags = MSG_MORE;
dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n",
cmd, bvec.bv_len);
result = sock_send_bvec(nbd, index, &bvec, flags);
@@ -1042,6 +1040,7 @@ static int __init nbd_init(void)
return -ENOMEM;
for (i = 0; i < nbds_max; i++) {
+ struct request_queue *q;
struct gendisk *disk = alloc_disk(1 << part_shift);
if (!disk)
goto out;
@@ -1067,12 +1066,13 @@ static int __init nbd_init(void)
* every gendisk to have its very own request_queue struct.
* These structs are big so we dynamically allocate them.
*/
- disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set);
- if (!disk->queue) {
+ q = blk_mq_init_queue(&nbd_dev[i].tag_set);
+ if (IS_ERR(q)) {
blk_mq_free_tag_set(&nbd_dev[i].tag_set);
put_disk(disk);
goto out;
}
+ disk->queue = q;
/*
* Tell the block layer that we are not a rotational device
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index 5545a67..10332c2 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -56,6 +56,7 @@ struct virtblk_req {
struct virtio_blk_outhdr out_hdr;
struct virtio_scsi_inhdr in_hdr;
u8 status;
+ u8 sense[SCSI_SENSE_BUFFERSIZE];
struct scatterlist sg[];
};
@@ -102,7 +103,8 @@ static int __virtblk_add_req(struct virtqueue *vq,
}
if (type == cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_SCSI_CMD)) {
- sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ memcpy(vbr->sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE);
+ sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
sgs[num_out + num_in++] = &sense;
sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
sgs[num_out + num_in++] = &inhdr;
@@ -628,11 +630,12 @@ static int virtblk_probe(struct virtio_device *vdev)
if (err)
goto out_put_disk;
- q = vblk->disk->queue = blk_mq_init_queue(&vblk->tag_set);
+ q = blk_mq_init_queue(&vblk->tag_set);
if (IS_ERR(q)) {
err = -ENOMEM;
goto out_free_tags;
}
+ vblk->disk->queue = q;
q->queuedata = vblk;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index b2bdfa8..265f1a7 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -197,13 +197,13 @@ struct blkfront_info
/* Number of pages per ring buffer. */
unsigned int nr_ring_pages;
struct request_queue *rq;
- unsigned int feature_flush;
- unsigned int feature_fua;
+ unsigned int feature_flush:1;
+ unsigned int feature_fua:1;
unsigned int feature_discard:1;
unsigned int feature_secdiscard:1;
+ unsigned int feature_persistent:1;
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned int feature_persistent:1;
/* Number of 4KB segments handled */
unsigned int max_indirect_segments;
int is_ready;
@@ -2223,7 +2223,7 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo)
}
else
grants = info->max_indirect_segments;
- psegs = grants / GRANTS_PER_PSEG;
+ psegs = DIV_ROUND_UP(grants, GRANTS_PER_PSEG);
err = fill_grant_buffer(rinfo,
(grants + INDIRECT_GREFS(grants)) * BLK_RING_SIZE(info));
@@ -2323,13 +2323,16 @@ static void blkfront_gather_backend_features(struct blkfront_info *info)
blkfront_setup_discard(info);
info->feature_persistent =
- xenbus_read_unsigned(info->xbdev->otherend,
- "feature-persistent", 0);
+ !!xenbus_read_unsigned(info->xbdev->otherend,
+ "feature-persistent", 0);
indirect_segments = xenbus_read_unsigned(info->xbdev->otherend,
"feature-max-indirect-segments", 0);
- info->max_indirect_segments = min(indirect_segments,
- xen_blkif_max_segments);
+ if (indirect_segments > xen_blkif_max_segments)
+ indirect_segments = xen_blkif_max_segments;
+ if (indirect_segments <= BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ indirect_segments = 0;
+ info->max_indirect_segments = indirect_segments;
}
/*
@@ -2652,6 +2655,9 @@ static int __init xlblk_init(void)
if (!xen_domain())
return -ENODEV;
+ if (xen_blkif_max_segments < BLKIF_MAX_SEGMENTS_PER_REQUEST)
+ xen_blkif_max_segments = BLKIF_MAX_SEGMENTS_PER_REQUEST;
+
if (xen_blkif_max_ring_order > XENBUS_MAX_RING_GRANT_ORDER) {
pr_info("Invalid max_ring_order (%d), will use default max: %d.\n",
xen_blkif_max_ring_order, XENBUS_MAX_RING_GRANT_ORDER);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 15f58ab..e5ab7d9 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -25,6 +25,7 @@
#include <linux/genhd.h>
#include <linux/highmem.h>
#include <linux/slab.h>
+#include <linux/backing-dev.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/err.h>
@@ -112,6 +113,14 @@ static inline bool is_partial_io(struct bio_vec *bvec)
return bvec->bv_len != PAGE_SIZE;
}
+static void zram_revalidate_disk(struct zram *zram)
+{
+ revalidate_disk(zram->disk);
+ /* revalidate_disk reset the BDI_CAP_STABLE_WRITES so set again */
+ zram->disk->queue->backing_dev_info.capabilities |=
+ BDI_CAP_STABLE_WRITES;
+}
+
/*
* Check if request is within bounds and aligned on zram logical blocks.
*/
@@ -1095,15 +1104,9 @@ static ssize_t disksize_store(struct device *dev,
zram->comp = comp;
zram->disksize = disksize;
set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
+ zram_revalidate_disk(zram);
up_write(&zram->init_lock);
- /*
- * Revalidate disk out of the init_lock to avoid lockdep splat.
- * It's okay because disk's capacity is protected by init_lock
- * so that revalidate_disk always sees up-to-date capacity.
- */
- revalidate_disk(zram->disk);
-
return len;
out_destroy_comp:
@@ -1149,7 +1152,7 @@ static ssize_t reset_store(struct device *dev,
/* Make sure all the pending I/O are finished */
fsync_bdev(bdev);
zram_reset_device(zram);
- revalidate_disk(zram->disk);
+ zram_revalidate_disk(zram);
bdput(bdev);
mutex_lock(&bdev->bd_mutex);
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 5bb1985..6d9cc2d 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -381,9 +381,6 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
int err = 0;
- if (!pfn_valid(PFN_DOWN(p)))
- return -EIO;
-
read = 0;
if (p < (unsigned long) high_memory) {
low_count = count;
@@ -412,6 +409,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
* by the kernel or data corruption may occur
*/
kbuf = xlate_dev_kmem_ptr((void *)p);
+ if (!virt_addr_valid(kbuf))
+ return -ENXIO;
if (copy_to_user(buf, kbuf, sz))
return -EFAULT;
@@ -482,6 +481,8 @@ static ssize_t do_write_kmem(unsigned long p, const char __user *buf,
* corruption may occur.
*/
ptr = xlate_dev_kmem_ptr((void *)p);
+ if (!virt_addr_valid(ptr))
+ return -ENXIO;
copied = copy_from_user(ptr, buf, sz);
if (copied) {
@@ -512,9 +513,6 @@ static ssize_t write_kmem(struct file *file, const char __user *buf,
char *kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
int err = 0;
- if (!pfn_valid(PFN_DOWN(p)))
- return -EIO;
-
if (p < (unsigned long) high_memory) {
unsigned long to_write = min_t(unsigned long, count,
(unsigned long)high_memory - p);
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c
index 02819e0..87885d1 100644
--- a/drivers/char/ppdev.c
+++ b/drivers/char/ppdev.c
@@ -290,6 +290,7 @@ static int register_device(int minor, struct pp_struct *pp)
struct pardevice *pdev = NULL;
char *name;
struct pardev_cb ppdev_cb;
+ int rc = 0;
name = kasprintf(GFP_KERNEL, CHRDEV "%x", minor);
if (name == NULL)
@@ -298,8 +299,8 @@ static int register_device(int minor, struct pp_struct *pp)
port = parport_find_number(minor);
if (!port) {
pr_warn("%s: no associated port!\n", name);
- kfree(name);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err;
}
memset(&ppdev_cb, 0, sizeof(ppdev_cb));
@@ -308,16 +309,18 @@ static int register_device(int minor, struct pp_struct *pp)
ppdev_cb.private = pp;
pdev = parport_register_dev_model(port, name, &ppdev_cb, minor);
parport_put_port(port);
- kfree(name);
if (!pdev) {
pr_warn("%s: failed to register device!\n", name);
- return -ENXIO;
+ rc = -ENXIO;
+ goto err;
}
pp->pdev = pdev;
dev_dbg(&pdev->dev, "registered pardevice\n");
- return 0;
+err:
+ kfree(name);
+ return rc;
}
static enum ieee1284_phase init_phase(int mode)
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index 8b00e79..17857be 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1862,7 +1862,7 @@ static void config_work_handler(struct work_struct *work)
{
struct ports_device *portdev;
- portdev = container_of(work, struct ports_device, control_work);
+ portdev = container_of(work, struct ports_device, config_work);
if (!use_multiport(portdev)) {
struct virtio_device *vdev;
struct port *port;
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 8c8b495..cdc092a 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -586,7 +586,7 @@ static const struct samsung_gate_clock exynos5800_gate_clks[] __initconst = {
GATE(CLK_ACLK550_CAM, "aclk550_cam", "mout_user_aclk550_cam",
GATE_BUS_TOP, 24, 0, 0),
GATE(CLK_ACLK432_SCALER, "aclk432_scaler", "mout_user_aclk432_scaler",
- GATE_BUS_TOP, 27, 0, 0),
+ GATE_BUS_TOP, 27, CLK_IS_CRITICAL, 0),
};
static const struct samsung_mux_clock exynos5420_mux_clks[] __initconst = {
@@ -956,20 +956,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(CLK_SMMU_G2D, "smmu_g2d", "aclk333_g2d", GATE_IP_G2D, 7, 0, 0),
GATE(0, "aclk200_fsys", "mout_user_aclk200_fsys",
- GATE_BUS_FSYS0, 9, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_FSYS0, 9, CLK_IS_CRITICAL, 0),
GATE(0, "aclk200_fsys2", "mout_user_aclk200_fsys2",
GATE_BUS_FSYS0, 10, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_g2d", "mout_user_aclk333_g2d",
GATE_BUS_TOP, 0, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk266_g2d", "mout_user_aclk266_g2d",
- GATE_BUS_TOP, 1, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 1, CLK_IS_CRITICAL, 0),
GATE(0, "aclk300_jpeg", "mout_user_aclk300_jpeg",
GATE_BUS_TOP, 4, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp0", "mout_user_aclk333_432_isp0",
GATE_BUS_TOP, 5, 0, 0),
GATE(0, "aclk300_gscl", "mout_user_aclk300_gscl",
- GATE_BUS_TOP, 6, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 6, CLK_IS_CRITICAL, 0),
GATE(0, "aclk333_432_gscl", "mout_user_aclk333_432_gscl",
GATE_BUS_TOP, 7, CLK_IGNORE_UNUSED, 0),
GATE(0, "aclk333_432_isp", "mout_user_aclk333_432_isp",
@@ -983,20 +983,20 @@ static const struct samsung_gate_clock exynos5x_gate_clks[] __initconst = {
GATE(0, "aclk166", "mout_user_aclk166",
GATE_BUS_TOP, 14, CLK_IGNORE_UNUSED, 0),
GATE(CLK_ACLK333, "aclk333", "mout_user_aclk333",
- GATE_BUS_TOP, 15, CLK_IGNORE_UNUSED, 0),
+ GATE_BUS_TOP, 15, CLK_IS_CRITICAL, 0),
GATE(0, "aclk400_isp", "mout_user_aclk400_isp",
GATE_BUS_TOP, 16, 0, 0),
GATE(0, "aclk400_mscl", "mout_user_aclk400_mscl",
GATE_BUS_TOP, 17, 0, 0),
GATE(0, "aclk200_disp1", "mout_user_aclk200_disp1",
- GATE_BUS_TOP, 18, 0, 0),
+ GATE_BUS_TOP, 18, CLK_IS_CRITICAL, 0),
GATE(CLK_SCLK_MPHY_IXTAL24, "sclk_mphy_ixtal24", "mphy_refclk_ixtal24",
GATE_BUS_TOP, 28, 0, 0),
GATE(CLK_SCLK_HSIC_12M, "sclk_hsic_12m", "ff_hsic_12m",
GATE_BUS_TOP, 29, 0, 0),
GATE(0, "aclk300_disp1", "mout_user_aclk300_disp1",
- SRC_MASK_TOP2, 24, 0, 0),
+ SRC_MASK_TOP2, 24, CLK_IS_CRITICAL, 0),
GATE(CLK_MAU_EPLL, "mau_epll", "mout_mau_epll_clk",
SRC_MASK_TOP7, 20, 0, 0),
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 4da1dc2..670ff0f 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -495,6 +495,7 @@ static int exynos4_mct_dying_cpu(unsigned int cpu)
if (mct_int_type == MCT_INT_SPI) {
if (evt->irq != -1)
disable_irq_nosync(evt->irq);
+ exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
} else {
disable_percpu_irq(mct_irqs[MCT_L0_IRQ]);
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index f91c257..a54d65a 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -2005,7 +2005,8 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
limits = &performance_limits;
perf_limits = limits;
}
- if (policy->max >= policy->cpuinfo.max_freq) {
+ if (policy->max >= policy->cpuinfo.max_freq &&
+ !limits->no_turbo) {
pr_debug("set performance\n");
intel_pstate_set_performance_limits(perf_limits);
goto out;
@@ -2047,6 +2048,17 @@ static int intel_pstate_verify_policy(struct cpufreq_policy *policy)
policy->policy != CPUFREQ_POLICY_PERFORMANCE)
return -EINVAL;
+ /* When per-CPU limits are used, sysfs limits are not used */
+ if (!per_cpu_limits) {
+ unsigned int max_freq, min_freq;
+
+ max_freq = policy->cpuinfo.max_freq *
+ limits->max_sysfs_pct / 100;
+ min_freq = policy->cpuinfo.max_freq *
+ limits->min_sysfs_pct / 100;
+ cpufreq_verify_within_limits(policy, min_freq, max_freq);
+ }
+
return 0;
}
diff --git a/drivers/dma/dw/Kconfig b/drivers/dma/dw/Kconfig
index e00c9b0..5a37b9f 100644
--- a/drivers/dma/dw/Kconfig
+++ b/drivers/dma/dw/Kconfig
@@ -24,5 +24,5 @@ config DW_DMAC_PCI
select DW_DMAC_CORE
help
Support the Synopsys DesignWare AHB DMA controller on the
- platfroms that enumerate it as a PCI device. For example,
+ platforms that enumerate it as a PCI device. For example,
Intel Medfield has integrated this GPDMA controller.
diff --git a/drivers/dma/ioat/hw.h b/drivers/dma/ioat/hw.h
index 8e67895..abcc51b 100644
--- a/drivers/dma/ioat/hw.h
+++ b/drivers/dma/ioat/hw.h
@@ -64,6 +64,8 @@
#define PCI_DEVICE_ID_INTEL_IOAT_BDX8 0x6f2e
#define PCI_DEVICE_ID_INTEL_IOAT_BDX9 0x6f2f
+#define PCI_DEVICE_ID_INTEL_IOAT_SKX 0x2021
+
#define IOAT_VER_1_2 0x12 /* Version 1.2 */
#define IOAT_VER_2_0 0x20 /* Version 2.0 */
#define IOAT_VER_3_0 0x30 /* Version 3.0 */
diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c
index 90eddd9..cc5259b 100644
--- a/drivers/dma/ioat/init.c
+++ b/drivers/dma/ioat/init.c
@@ -106,6 +106,8 @@ static struct pci_device_id ioat_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX8) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BDX9) },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_SKX) },
+
/* I/OAT v3.3 platforms */
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD0) },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_IOAT_BWD1) },
@@ -243,10 +245,15 @@ static bool is_bdx_ioat(struct pci_dev *pdev)
}
}
+static inline bool is_skx_ioat(struct pci_dev *pdev)
+{
+ return (pdev->device == PCI_DEVICE_ID_INTEL_IOAT_SKX) ? true : false;
+}
+
static bool is_xeon_cb32(struct pci_dev *pdev)
{
return is_jf_ioat(pdev) || is_snb_ioat(pdev) || is_ivb_ioat(pdev) ||
- is_hsw_ioat(pdev) || is_bdx_ioat(pdev);
+ is_hsw_ioat(pdev) || is_bdx_ioat(pdev) || is_skx_ioat(pdev);
}
bool is_bwd_ioat(struct pci_dev *pdev)
@@ -693,7 +700,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
/* doing 2 32bit writes to mmio since 1 64b write doesn't work */
ioat_chan->completion =
dma_pool_zalloc(ioat_chan->ioat_dma->completion_pool,
- GFP_KERNEL, &ioat_chan->completion_dma);
+ GFP_NOWAIT, &ioat_chan->completion_dma);
if (!ioat_chan->completion)
return -ENOMEM;
@@ -703,7 +710,7 @@ static int ioat_alloc_chan_resources(struct dma_chan *c)
ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH);
order = IOAT_MAX_ORDER;
- ring = ioat_alloc_ring(c, order, GFP_KERNEL);
+ ring = ioat_alloc_ring(c, order, GFP_NOWAIT);
if (!ring)
return -ENOMEM;
@@ -1357,6 +1364,8 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
device->version = readb(device->reg_base + IOAT_VER_OFFSET);
if (device->version >= IOAT_VER_3_0) {
+ if (is_skx_ioat(pdev))
+ device->version = IOAT_VER_3_2;
err = ioat3_dma_probe(device, ioat_dca_enabled);
if (device->version >= IOAT_VER_3_3)
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index ac68666..daf479c 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -938,21 +938,14 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_DST_AMODE_POSTINC;
if (port_window) {
d->ccr |= CCR_SRC_AMODE_DBLIDX;
- d->ei = 1;
- /*
- * One frame covers the port_window and by configure
- * the source frame index to be -1 * (port_window - 1)
- * we instruct the sDMA that after a frame is processed
- * it should move back to the start of the window.
- */
- d->fi = -(port_window_bytes - 1);
if (port_window_bytes >= 64)
- d->csdp = CSDP_SRC_BURST_64 | CSDP_SRC_PACKED;
+ d->csdp |= CSDP_SRC_BURST_64;
else if (port_window_bytes >= 32)
- d->csdp = CSDP_SRC_BURST_32 | CSDP_SRC_PACKED;
+ d->csdp |= CSDP_SRC_BURST_32;
else if (port_window_bytes >= 16)
- d->csdp = CSDP_SRC_BURST_16 | CSDP_SRC_PACKED;
+ d->csdp |= CSDP_SRC_BURST_16;
+
} else {
d->ccr |= CCR_SRC_AMODE_CONSTANT;
}
@@ -962,13 +955,21 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
d->ccr |= CCR_SRC_AMODE_POSTINC;
if (port_window) {
d->ccr |= CCR_DST_AMODE_DBLIDX;
+ d->ei = 1;
+ /*
+ * One frame covers the port_window and by configure
+ * the source frame index to be -1 * (port_window - 1)
+ * we instruct the sDMA that after a frame is processed
+ * it should move back to the start of the window.
+ */
+ d->fi = -(port_window_bytes - 1);
if (port_window_bytes >= 64)
- d->csdp = CSDP_DST_BURST_64 | CSDP_DST_PACKED;
+ d->csdp |= CSDP_DST_BURST_64;
else if (port_window_bytes >= 32)
- d->csdp = CSDP_DST_BURST_32 | CSDP_DST_PACKED;
+ d->csdp |= CSDP_DST_BURST_32;
else if (port_window_bytes >= 16)
- d->csdp = CSDP_DST_BURST_16 | CSDP_DST_PACKED;
+ d->csdp |= CSDP_DST_BURST_16;
} else {
d->ccr |= CCR_DST_AMODE_CONSTANT;
}
@@ -1017,7 +1018,7 @@ static struct dma_async_tx_descriptor *omap_dma_prep_slave_sg(
osg->addr = sg_dma_address(sgent);
osg->en = en;
osg->fn = sg_dma_len(sgent) / frame_bytes;
- if (port_window && dir == DMA_MEM_TO_DEV) {
+ if (port_window && dir == DMA_DEV_TO_MEM) {
osg->ei = 1;
/*
* One frame covers the port_window and by configure
@@ -1452,6 +1453,7 @@ static int omap_dma_probe(struct platform_device *pdev)
struct omap_dmadev *od;
struct resource *res;
int rc, i, irq;
+ u32 lch_count;
od = devm_kzalloc(&pdev->dev, sizeof(*od), GFP_KERNEL);
if (!od)
@@ -1494,20 +1496,31 @@ static int omap_dma_probe(struct platform_device *pdev)
spin_lock_init(&od->lock);
spin_lock_init(&od->irq_lock);
- if (!pdev->dev.of_node) {
- od->dma_requests = od->plat->dma_attr->lch_count;
- if (unlikely(!od->dma_requests))
- od->dma_requests = OMAP_SDMA_REQUESTS;
- } else if (of_property_read_u32(pdev->dev.of_node, "dma-requests",
- &od->dma_requests)) {
+ /* Number of DMA requests */
+ od->dma_requests = OMAP_SDMA_REQUESTS;
+ if (pdev->dev.of_node && of_property_read_u32(pdev->dev.of_node,
+ "dma-requests",
+ &od->dma_requests)) {
dev_info(&pdev->dev,
"Missing dma-requests property, using %u.\n",
OMAP_SDMA_REQUESTS);
- od->dma_requests = OMAP_SDMA_REQUESTS;
}
- od->lch_map = devm_kcalloc(&pdev->dev, od->dma_requests,
- sizeof(*od->lch_map), GFP_KERNEL);
+ /* Number of available logical channels */
+ if (!pdev->dev.of_node) {
+ lch_count = od->plat->dma_attr->lch_count;
+ if (unlikely(!lch_count))
+ lch_count = OMAP_SDMA_CHANNELS;
+ } else if (of_property_read_u32(pdev->dev.of_node, "dma-channels",
+ &lch_count)) {
+ dev_info(&pdev->dev,
+ "Missing dma-channels property, using %u.\n",
+ OMAP_SDMA_CHANNELS);
+ lch_count = OMAP_SDMA_CHANNELS;
+ }
+
+ od->lch_map = devm_kcalloc(&pdev->dev, lch_count, sizeof(*od->lch_map),
+ GFP_KERNEL);
if (!od->lch_map)
return -ENOMEM;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 87fd015..740bbb9 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -448,6 +448,9 @@ struct dma_pl330_chan {
/* for cyclic capability */
bool cyclic;
+
+ /* for runtime pm tracking */
+ bool active;
};
struct pl330_dmac {
@@ -2033,6 +2036,7 @@ static void pl330_tasklet(unsigned long data)
_stop(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
power_down = true;
+ pch->active = false;
} else {
/* Make sure the PL330 Channel thread is active */
spin_lock(&pch->thread->dmac->lock);
@@ -2052,6 +2056,7 @@ static void pl330_tasklet(unsigned long data)
desc->status = PREP;
list_move_tail(&desc->node, &pch->work_list);
if (power_down) {
+ pch->active = true;
spin_lock(&pch->thread->dmac->lock);
_start(pch->thread);
spin_unlock(&pch->thread->dmac->lock);
@@ -2166,6 +2171,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
unsigned long flags;
struct pl330_dmac *pl330 = pch->dmac;
LIST_HEAD(list);
+ bool power_down = false;
pm_runtime_get_sync(pl330->ddma.dev);
spin_lock_irqsave(&pch->lock, flags);
@@ -2176,6 +2182,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
pch->thread->req[0].desc = NULL;
pch->thread->req[1].desc = NULL;
pch->thread->req_running = -1;
+ power_down = pch->active;
+ pch->active = false;
/* Mark all desc done */
list_for_each_entry(desc, &pch->submitted_list, node) {
@@ -2193,6 +2201,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
spin_unlock_irqrestore(&pch->lock, flags);
pm_runtime_mark_last_busy(pl330->ddma.dev);
+ if (power_down)
+ pm_runtime_put_autosuspend(pl330->ddma.dev);
pm_runtime_put_autosuspend(pl330->ddma.dev);
return 0;
@@ -2357,6 +2367,7 @@ static void pl330_issue_pending(struct dma_chan *chan)
* updated on work_list emptiness status.
*/
WARN_ON(list_empty(&pch->submitted_list));
+ pch->active = true;
pm_runtime_get_sync(pch->dmac->ddma.dev);
}
list_splice_tail_init(&pch->submitted_list, &pch->work_list);
diff --git a/drivers/dma/sh/rcar-dmac.c b/drivers/dma/sh/rcar-dmac.c
index 2e441d0..4c357d4 100644
--- a/drivers/dma/sh/rcar-dmac.c
+++ b/drivers/dma/sh/rcar-dmac.c
@@ -986,6 +986,7 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
{
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
struct rcar_dmac *dmac = to_rcar_dmac(chan->device);
+ struct rcar_dmac_chan_map *map = &rchan->map;
struct rcar_dmac_desc_page *page, *_page;
struct rcar_dmac_desc *desc;
LIST_HEAD(list);
@@ -1019,6 +1020,13 @@ static void rcar_dmac_free_chan_resources(struct dma_chan *chan)
free_page((unsigned long)page);
}
+ /* Remove slave mapping if present. */
+ if (map->slave.xfer_size) {
+ dma_unmap_resource(chan->device->dev, map->addr,
+ map->slave.xfer_size, map->dir, 0);
+ map->slave.xfer_size = 0;
+ }
+
pm_runtime_put(chan->device->dev);
}
diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
index 3688d08..3056ce7 100644
--- a/drivers/dma/stm32-dma.c
+++ b/drivers/dma/stm32-dma.c
@@ -880,7 +880,7 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
struct virt_dma_desc *vdesc;
enum dma_status status;
unsigned long flags;
- u32 residue;
+ u32 residue = 0;
status = dma_cookie_status(c, cookie, state);
if ((status == DMA_COMPLETE) || (!state))
@@ -888,16 +888,12 @@ static enum dma_status stm32_dma_tx_status(struct dma_chan *c,
spin_lock_irqsave(&chan->vchan.lock, flags);
vdesc = vchan_find_desc(&chan->vchan, cookie);
- if (cookie == chan->desc->vdesc.tx.cookie) {
+ if (chan->desc && cookie == chan->desc->vdesc.tx.cookie)
residue = stm32_dma_desc_residue(chan, chan->desc,
chan->next_sg);
- } else if (vdesc) {
+ else if (vdesc)
residue = stm32_dma_desc_residue(chan,
to_stm32_dma_desc(vdesc), 0);
- } else {
- residue = 0;
- }
-
dma_set_residue(state, residue);
spin_unlock_irqrestore(&chan->vchan.lock, flags);
@@ -972,21 +968,18 @@ static struct dma_chan *stm32_dma_of_xlate(struct of_phandle_args *dma_spec,
struct stm32_dma_chan *chan;
struct dma_chan *c;
- if (dma_spec->args_count < 3)
+ if (dma_spec->args_count < 4)
return NULL;
cfg.channel_id = dma_spec->args[0];
cfg.request_line = dma_spec->args[1];
cfg.stream_config = dma_spec->args[2];
- cfg.threshold = 0;
+ cfg.threshold = dma_spec->args[3];
if ((cfg.channel_id >= STM32_DMA_MAX_CHANNELS) || (cfg.request_line >=
STM32_DMA_MAX_REQUEST_ID))
return NULL;
- if (dma_spec->args_count > 3)
- cfg.threshold = dma_spec->args[3];
-
chan = &dmadev->chan[cfg.channel_id];
c = dma_get_slave_channel(&chan->vchan.chan);
diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c
index 3f24aeb..2403475 100644
--- a/drivers/dma/ti-dma-crossbar.c
+++ b/drivers/dma/ti-dma-crossbar.c
@@ -149,6 +149,7 @@ static int ti_am335x_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_am335x_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
@@ -339,6 +340,7 @@ static int ti_dra7_xbar_probe(struct platform_device *pdev)
match = of_match_node(ti_dra7_master_match, dma_node);
if (!match) {
dev_err(&pdev->dev, "DMA master is not supported\n");
+ of_node_put(dma_node);
return -EINVAL;
}
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 7829846..7c1e3a7 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -453,7 +453,7 @@ int extcon_sync(struct extcon_dev *edev, unsigned int id)
dev_err(&edev->dev, "out of memory in extcon_set_state\n");
kobject_uevent(&edev->dev.kobj, KOBJ_CHANGE);
- return 0;
+ return -ENOMEM;
}
length = name_show(&edev->dev, NULL, prop_buf);
diff --git a/drivers/firmware/efi/fake_mem.c b/drivers/firmware/efi/fake_mem.c
index 520a40e..6c7d60c 100644
--- a/drivers/firmware/efi/fake_mem.c
+++ b/drivers/firmware/efi/fake_mem.c
@@ -71,8 +71,7 @@ void __init efi_fake_memmap(void)
}
/* allocate memory for new EFI memmap */
- new_memmap_phy = memblock_alloc(efi.memmap.desc_size * new_nr_map,
- PAGE_SIZE);
+ new_memmap_phy = efi_memmap_alloc(new_nr_map);
if (!new_memmap_phy)
return;
diff --git a/drivers/firmware/efi/libstub/efistub.h b/drivers/firmware/efi/libstub/efistub.h
index b98824e..0e2a96b 100644
--- a/drivers/firmware/efi/libstub/efistub.h
+++ b/drivers/firmware/efi/libstub/efistub.h
@@ -39,14 +39,6 @@ efi_status_t efi_file_close(void *handle);
unsigned long get_dram_base(efi_system_table_t *sys_table_arg);
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size,
- efi_memory_desc_t *memory_map,
- unsigned long map_size, unsigned long desc_size,
- u32 desc_ver);
-
efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
void *handle,
unsigned long *new_fdt_addr,
diff --git a/drivers/firmware/efi/libstub/fdt.c b/drivers/firmware/efi/libstub/fdt.c
index a6a9311..921dfa0 100644
--- a/drivers/firmware/efi/libstub/fdt.c
+++ b/drivers/firmware/efi/libstub/fdt.c
@@ -16,13 +16,10 @@
#include "efistub.h"
-efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
- unsigned long orig_fdt_size,
- void *fdt, int new_fdt_size, char *cmdline_ptr,
- u64 initrd_addr, u64 initrd_size,
- efi_memory_desc_t *memory_map,
- unsigned long map_size, unsigned long desc_size,
- u32 desc_ver)
+static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
+ unsigned long orig_fdt_size,
+ void *fdt, int new_fdt_size, char *cmdline_ptr,
+ u64 initrd_addr, u64 initrd_size)
{
int node, num_rsv;
int status;
@@ -101,25 +98,23 @@ efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
if (status)
goto fdt_set_fail;
- fdt_val64 = cpu_to_fdt64((u64)(unsigned long)memory_map);
+ fdt_val64 = U64_MAX; /* placeholder */
status = fdt_setprop(fdt, node, "linux,uefi-mmap-start",
&fdt_val64, sizeof(fdt_val64));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(map_size);
+ fdt_val32 = U32_MAX; /* placeholder */
status = fdt_setprop(fdt, node, "linux,uefi-mmap-size",
&fdt_val32, sizeof(fdt_val32));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(desc_size);
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-size",
&fdt_val32, sizeof(fdt_val32));
if (status)
goto fdt_set_fail;
- fdt_val32 = cpu_to_fdt32(desc_ver);
status = fdt_setprop(fdt, node, "linux,uefi-mmap-desc-ver",
&fdt_val32, sizeof(fdt_val32));
if (status)
@@ -148,6 +143,43 @@ fdt_set_fail:
return EFI_LOAD_ERROR;
}
+static efi_status_t update_fdt_memmap(void *fdt, struct efi_boot_memmap *map)
+{
+ int node = fdt_path_offset(fdt, "/chosen");
+ u64 fdt_val64;
+ u32 fdt_val32;
+ int err;
+
+ if (node < 0)
+ return EFI_LOAD_ERROR;
+
+ fdt_val64 = cpu_to_fdt64((unsigned long)*map->map);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-start",
+ &fdt_val64, sizeof(fdt_val64));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->map_size);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-size",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_size);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-size",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ fdt_val32 = cpu_to_fdt32(*map->desc_ver);
+ err = fdt_setprop_inplace(fdt, node, "linux,uefi-mmap-desc-ver",
+ &fdt_val32, sizeof(fdt_val32));
+ if (err)
+ return EFI_LOAD_ERROR;
+
+ return EFI_SUCCESS;
+}
+
#ifndef EFI_FDT_ALIGN
#define EFI_FDT_ALIGN EFI_PAGE_SIZE
#endif
@@ -243,20 +275,10 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
goto fail;
}
- /*
- * Now that we have done our final memory allocation (and free)
- * we can get the memory map key needed for
- * exit_boot_services().
- */
- status = efi_get_memory_map(sys_table, &map);
- if (status != EFI_SUCCESS)
- goto fail_free_new_fdt;
-
status = update_fdt(sys_table,
(void *)fdt_addr, fdt_size,
(void *)*new_fdt_addr, new_fdt_size,
- cmdline_ptr, initrd_addr, initrd_size,
- memory_map, map_size, desc_size, desc_ver);
+ cmdline_ptr, initrd_addr, initrd_size);
/* Succeeding the first time is the expected case. */
if (status == EFI_SUCCESS)
@@ -266,20 +288,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
/*
* We need to allocate more space for the new
* device tree, so free existing buffer that is
- * too small. Also free memory map, as we will need
- * to get new one that reflects the free/alloc we do
- * on the device tree buffer.
+ * too small.
*/
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
- sys_table->boottime->free_pool(memory_map);
new_fdt_size += EFI_PAGE_SIZE;
} else {
pr_efi_err(sys_table, "Unable to construct new device tree.\n");
- goto fail_free_mmap;
+ goto fail_free_new_fdt;
}
}
- sys_table->boottime->free_pool(memory_map);
priv.runtime_map = runtime_map;
priv.runtime_entry_count = &runtime_entry_count;
status = efi_exit_boot_services(sys_table, handle, &map, &priv,
@@ -288,6 +306,16 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
if (status == EFI_SUCCESS) {
efi_set_virtual_address_map_t *svam;
+ status = update_fdt_memmap((void *)*new_fdt_addr, &map);
+ if (status != EFI_SUCCESS) {
+ /*
+ * The kernel won't get far without the memory map, but
+ * may still be able to print something meaningful so
+ * return success here.
+ */
+ return EFI_SUCCESS;
+ }
+
/* Install the new virtual address map */
svam = sys_table->runtime->set_virtual_address_map;
status = svam(runtime_entry_count * desc_size, desc_size,
@@ -319,9 +347,6 @@ efi_status_t allocate_new_fdt_and_exit_boot(efi_system_table_t *sys_table,
pr_efi_err(sys_table, "Exit boot services failed.\n");
-fail_free_mmap:
- sys_table->boottime->free_pool(memory_map);
-
fail_free_new_fdt:
efi_free(sys_table, new_fdt_size, *new_fdt_addr);
diff --git a/drivers/firmware/efi/memmap.c b/drivers/firmware/efi/memmap.c
index f03ddec..7868644 100644
--- a/drivers/firmware/efi/memmap.c
+++ b/drivers/firmware/efi/memmap.c
@@ -9,6 +9,44 @@
#include <linux/efi.h>
#include <linux/io.h>
#include <asm/early_ioremap.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+
+static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
+{
+ return memblock_alloc(size, 0);
+}
+
+static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
+{
+ unsigned int order = get_order(size);
+ struct page *p = alloc_pages(GFP_KERNEL, order);
+
+ if (!p)
+ return 0;
+
+ return PFN_PHYS(page_to_pfn(p));
+}
+
+/**
+ * efi_memmap_alloc - Allocate memory for the EFI memory map
+ * @num_entries: Number of entries in the allocated map.
+ *
+ * Depending on whether mm_init() has already been invoked or not,
+ * either memblock or "normal" page allocation is used.
+ *
+ * Returns the physical address of the allocated memory map on
+ * success, zero on failure.
+ */
+phys_addr_t __init efi_memmap_alloc(unsigned int num_entries)
+{
+ unsigned long size = num_entries * efi.memmap.desc_size;
+
+ if (slab_is_available())
+ return __efi_memmap_alloc_late(size);
+
+ return __efi_memmap_alloc_early(size);
+}
/**
* __efi_memmap_init - Common code for mapping the EFI memory map
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index 1e8fde8..2292742 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -205,7 +205,7 @@ static int mxs_gpio_set_wake_irq(struct irq_data *d, unsigned int enable)
return 0;
}
-static int __init mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
+static int mxs_gpio_init_gc(struct mxs_gpio_port *port, int irq_base)
{
struct irq_chip_generic *gc;
struct irq_chip_type *ct;
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index f4c26c7..a07ae9e 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1317,12 +1317,12 @@ void gpiochip_remove(struct gpio_chip *chip)
/* FIXME: should the legacy sysfs handling be moved to gpio_device? */
gpiochip_sysfs_unregister(gdev);
+ gpiochip_free_hogs(chip);
/* Numb the device, cancelling all outstanding operations */
gdev->chip = NULL;
gpiochip_irqchip_remove(chip);
acpi_gpiochip_remove(chip);
gpiochip_remove_pin_ranges(chip);
- gpiochip_free_hogs(chip);
of_gpiochip_remove(chip);
/*
* We accept no more calls into the driver from this point, so
@@ -1723,7 +1723,7 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
}
/**
- * _gpiochip_irqchip_add() - adds an irqchip to a gpiochip
+ * gpiochip_irqchip_add_key() - adds an irqchip to a gpiochip
* @gpiochip: the gpiochip to add the irqchip to
* @irqchip: the irqchip to add to the gpiochip
* @first_irq: if not dynamically assigned, the base (first) IRQ to
@@ -1749,13 +1749,13 @@ static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip)
* the pins on the gpiochip can generate a unique IRQ. Everything else
* need to be open coded.
*/
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int first_irq,
- irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key)
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ bool nested,
+ struct lock_class_key *lock_key)
{
struct device_node *of_node;
bool irq_base_set = false;
@@ -1840,7 +1840,7 @@ int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
return 0;
}
-EXPORT_SYMBOL_GPL(_gpiochip_irqchip_add);
+EXPORT_SYMBOL_GPL(gpiochip_irqchip_add_key);
#else /* CONFIG_GPIOLIB_IRQCHIP */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index 9ada56c..4c851fde 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -840,6 +840,9 @@ static int amdgpu_cgs_get_firmware_info(struct cgs_device *cgs_device,
else if (type == CGS_UCODE_ID_SMU_SK)
strcpy(fw_name, "amdgpu/polaris10_smc_sk.bin");
break;
+ case CHIP_POLARIS12:
+ strcpy(fw_name, "amdgpu/polaris12_smc.bin");
+ break;
default:
DRM_ERROR("SMC firmware not supported\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 29d6d84..41e41f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
+
+ if (!(*out_ring && (*out_ring)->adev)) {
+ DRM_ERROR("Ring %d is not initialized on IP %d\n",
+ ring, ip_type);
+ return -EINVAL;
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 60bd4afe..fe3bb94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -73,6 +73,7 @@ static const char *amdgpu_asic_name[] = {
"STONEY",
"POLARIS10",
"POLARIS11",
+ "POLARIS12",
"LAST",
};
@@ -1277,6 +1278,7 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
case CHIP_FIJI:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_CARRIZO:
case CHIP_STONEY:
if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8cb937b..2534ada 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -418,6 +418,13 @@ static const struct pci_device_id pciidlist[] = {
{0x1002, 0x67CA, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CC, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
{0x1002, 0x67CF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS10},
+ /* Polaris12 */
+ {0x1002, 0x6980, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6986, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x6987, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
+ {0x1002, 0x699F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_POLARIS12},
{0, 0, 0}
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
index fc592c2..95a568d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_powerplay.c
@@ -98,6 +98,7 @@ static int amdgpu_pp_early_init(void *handle)
switch (adev->asic_type) {
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_TONGA:
case CHIP_FIJI:
case CHIP_TOPAZ:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a81dfae..1d564be 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -65,6 +65,7 @@
#define FIRMWARE_STONEY "amdgpu/stoney_uvd.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_uvd.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_uvd.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_uvd.bin"
/**
* amdgpu_uvd_cs_ctx - Command submission parser context
@@ -98,6 +99,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
@@ -149,6 +151,9 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 69b66b9..8fec802 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -52,6 +52,7 @@
#define FIRMWARE_STONEY "amdgpu/stoney_vce.bin"
#define FIRMWARE_POLARIS10 "amdgpu/polaris10_vce.bin"
#define FIRMWARE_POLARIS11 "amdgpu/polaris11_vce.bin"
+#define FIRMWARE_POLARIS12 "amdgpu/polaris12_vce.bin"
#ifdef CONFIG_DRM_AMDGPU_CIK
MODULE_FIRMWARE(FIRMWARE_BONAIRE);
@@ -66,6 +67,7 @@ MODULE_FIRMWARE(FIRMWARE_FIJI);
MODULE_FIRMWARE(FIRMWARE_STONEY);
MODULE_FIRMWARE(FIRMWARE_POLARIS10);
MODULE_FIRMWARE(FIRMWARE_POLARIS11);
+MODULE_FIRMWARE(FIRMWARE_POLARIS12);
static void amdgpu_vce_idle_work_handler(struct work_struct *work);
@@ -121,6 +123,9 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
case CHIP_POLARIS11:
fw_name = FIRMWARE_POLARIS11;
break;
+ case CHIP_POLARIS12:
+ fw_name = FIRMWARE_POLARIS12;
+ break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 9999dc7..ccb5e02 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2512,6 +2512,8 @@ static int dce_v10_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2537,7 +2539,6 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2578,7 +2579,9 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v10_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2587,16 +2590,10 @@ static int dce_v10_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v10_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v10_0_show_cursor(crtc);
@@ -2620,7 +2617,6 @@ unpin:
static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v10_0_lock_cursor(crtc, true);
@@ -2628,10 +2624,6 @@ static void dce_v10_0_cursor_reset(struct drm_crtc *crtc)
dce_v10_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v10_0_show_cursor(crtc);
dce_v10_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index b3d62b9..a7af5b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -167,6 +167,7 @@ static void dce_v11_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(stoney_golden_settings_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
polaris11_golden_settings_a11,
(const u32)ARRAY_SIZE(polaris11_golden_settings_a11));
@@ -608,6 +609,7 @@ static int dce_v11_0_get_num_crtc (struct amdgpu_device *adev)
num_crtc = 6;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
num_crtc = 5;
break;
default:
@@ -1589,6 +1591,7 @@ static int dce_v11_0_audio_init(struct amdgpu_device *adev)
adev->mode_info.audio.num_pins = 8;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
adev->mode_info.audio.num_pins = 6;
break;
default:
@@ -2388,7 +2391,8 @@ static u32 dce_v11_0_pick_pll(struct drm_crtc *crtc)
int pll;
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
struct amdgpu_encoder_atom_dig *dig = amdgpu_encoder->enc_priv;
@@ -2528,6 +2532,8 @@ static int dce_v11_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2553,7 +2559,6 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2594,7 +2599,9 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v11_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2603,16 +2610,10 @@ static int dce_v11_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v11_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v11_0_show_cursor(crtc);
@@ -2636,7 +2637,6 @@ unpin:
static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v11_0_lock_cursor(crtc, true);
@@ -2644,10 +2644,6 @@ static void dce_v11_0_cursor_reset(struct drm_crtc *crtc)
dce_v11_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v11_0_show_cursor(crtc);
dce_v11_0_lock_cursor(crtc, false);
@@ -2822,7 +2818,8 @@ static int dce_v11_0_crtc_mode_set(struct drm_crtc *crtc,
return -EINVAL;
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
struct amdgpu_encoder *amdgpu_encoder =
to_amdgpu_encoder(amdgpu_crtc->encoder);
int encoder_mode =
@@ -2992,6 +2989,7 @@ static int dce_v11_0_early_init(void *handle)
adev->mode_info.num_dig = 6;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
adev->mode_info.num_hpd = 5;
adev->mode_info.num_dig = 5;
break;
@@ -3101,7 +3099,8 @@ static int dce_v11_0_hw_init(void *handle)
amdgpu_atombios_crtc_powergate_init(adev);
amdgpu_atombios_encoder_init_dig(adev);
if ((adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11)) {
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
amdgpu_atombios_crtc_set_dce_clock(adev, adev->clock.default_dispclk,
DCE_CLOCK_TYPE_DISPCLK, ATOM_GCK_DFS);
amdgpu_atombios_crtc_set_dce_clock(adev, 0,
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index b4e4ec6..39df6a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -1859,6 +1859,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
struct amdgpu_device *adev = crtc->dev->dev_private;
int xorigin = 0, yorigin = 0;
+ int w = amdgpu_crtc->cursor_width;
+
amdgpu_crtc->cursor_x = x;
amdgpu_crtc->cursor_y = y;
@@ -1878,6 +1880,8 @@ static int dce_v6_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((w - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -1903,7 +1907,6 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -1944,7 +1947,9 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -1953,16 +1958,10 @@ static int dce_v6_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v6_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v6_0_show_cursor(crtc);
@@ -1986,7 +1985,6 @@ unpin:
static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v6_0_lock_cursor(crtc, true);
@@ -1994,10 +1992,6 @@ static void dce_v6_0_cursor_reset(struct drm_crtc *crtc)
dce_v6_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v6_0_show_cursor(crtc);
dce_v6_0_lock_cursor(crtc, false);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index 584abe8..28102bb 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2363,6 +2363,8 @@ static int dce_v8_0_cursor_move_locked(struct drm_crtc *crtc,
WREG32(mmCUR_POSITION + amdgpu_crtc->crtc_offset, (x << 16) | y);
WREG32(mmCUR_HOT_SPOT + amdgpu_crtc->crtc_offset, (xorigin << 16) | yorigin);
+ WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
+ ((amdgpu_crtc->cursor_width - 1) << 16) | (amdgpu_crtc->cursor_height - 1));
return 0;
}
@@ -2388,7 +2390,6 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
int32_t hot_y)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
struct drm_gem_object *obj;
struct amdgpu_bo *aobj;
int ret;
@@ -2429,7 +2430,9 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_lock_cursor(crtc, true);
- if (hot_x != amdgpu_crtc->cursor_hot_x ||
+ if (width != amdgpu_crtc->cursor_width ||
+ height != amdgpu_crtc->cursor_height ||
+ hot_x != amdgpu_crtc->cursor_hot_x ||
hot_y != amdgpu_crtc->cursor_hot_y) {
int x, y;
@@ -2438,16 +2441,10 @@ static int dce_v8_0_crtc_cursor_set2(struct drm_crtc *crtc,
dce_v8_0_cursor_move_locked(crtc, x, y);
- amdgpu_crtc->cursor_hot_x = hot_x;
- amdgpu_crtc->cursor_hot_y = hot_y;
- }
-
- if (width != amdgpu_crtc->cursor_width ||
- height != amdgpu_crtc->cursor_height) {
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (width - 1) << 16 | (height - 1));
amdgpu_crtc->cursor_width = width;
amdgpu_crtc->cursor_height = height;
+ amdgpu_crtc->cursor_hot_x = hot_x;
+ amdgpu_crtc->cursor_hot_y = hot_y;
}
dce_v8_0_show_cursor(crtc);
@@ -2471,7 +2468,6 @@ unpin:
static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
{
struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct amdgpu_device *adev = crtc->dev->dev_private;
if (amdgpu_crtc->cursor_bo) {
dce_v8_0_lock_cursor(crtc, true);
@@ -2479,10 +2475,6 @@ static void dce_v8_0_cursor_reset(struct drm_crtc *crtc)
dce_v8_0_cursor_move_locked(crtc, amdgpu_crtc->cursor_x,
amdgpu_crtc->cursor_y);
- WREG32(mmCUR_SIZE + amdgpu_crtc->crtc_offset,
- (amdgpu_crtc->cursor_width - 1) << 16 |
- (amdgpu_crtc->cursor_height - 1));
-
dce_v8_0_show_cursor(crtc);
dce_v8_0_lock_cursor(crtc, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index 762f8e8..e9a1768 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
{
- struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
-
- kfree(amdgpu_encoder->enc_priv);
drm_encoder_cleanup(encoder);
- kfree(amdgpu_encoder);
+ kfree(encoder);
}
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index d0ec009..3733741 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -139,6 +139,13 @@ MODULE_FIRMWARE("amdgpu/polaris10_mec.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mec2.bin");
MODULE_FIRMWARE("amdgpu/polaris10_rlc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_ce.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_pfp.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_me.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mec2.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_rlc.bin");
+
static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset[] =
{
{mmGDS_VMID0_BASE, mmGDS_VMID0_SIZE, mmGDS_GWS_VMID0, mmGDS_OA_VMID0},
@@ -689,6 +696,7 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(tonga_golden_common_all));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -903,6 +911,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_STONEY:
chip_name = "stoney";
break;
@@ -1768,6 +1779,7 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
gb_addr_config = TONGA_GB_ADDR_CONFIG_GOLDEN;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
ret = amdgpu_atombios_get_gfx_info(adev);
if (ret)
return ret;
@@ -2682,6 +2694,7 @@ static void gfx_v8_0_tiling_mode_table_init(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
modearray[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
PIPE_CONFIG(ADDR_SURF_P4_16x16) |
TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
@@ -3503,6 +3516,7 @@ gfx_v8_0_raster_config(struct amdgpu_device *adev, u32 *rconf, u32 *rconf1)
*rconf1 |= 0x0;
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
*rconf |= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
SE_XSEL(1) | SE_YSEL(1);
*rconf1 |= 0x0;
@@ -4021,7 +4035,8 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev)
cz_enable_cp_power_gating(adev, true);
else
cz_enable_cp_power_gating(adev, false);
- } else if (adev->asic_type == CHIP_POLARIS11) {
+ } else if ((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
gfx_v8_0_init_csb(adev);
gfx_v8_0_init_save_restore_list(adev);
gfx_v8_0_enable_save_restore_machine(adev);
@@ -4095,7 +4110,8 @@ static int gfx_v8_0_rlc_resume(struct amdgpu_device *adev)
RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK);
WREG32(mmRLC_CGCG_CGLS_CTRL, tmp);
if (adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10) {
+ adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS12) {
tmp = RREG32(mmRLC_CGCG_CGLS_CTRL_3D);
tmp &= ~0x3;
WREG32(mmRLC_CGCG_CGLS_CTRL_3D, tmp);
@@ -4283,6 +4299,7 @@ static int gfx_v8_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_write(ring, 0x0000002A);
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_ring_write(ring, 0x16000012);
amdgpu_ring_write(ring, 0x00000000);
break;
@@ -4664,7 +4681,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
(adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS11) ||
- (adev->asic_type == CHIP_POLARIS10)) {
+ (adev->asic_type == CHIP_POLARIS10) ||
+ (adev->asic_type == CHIP_POLARIS12)) {
WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
AMDGPU_DOORBELL_KIQ << 2);
WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
@@ -4700,7 +4718,8 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
mqd->cp_hqd_persistent_state = tmp;
if (adev->asic_type == CHIP_STONEY ||
adev->asic_type == CHIP_POLARIS11 ||
- adev->asic_type == CHIP_POLARIS10) {
+ adev->asic_type == CHIP_POLARIS10 ||
+ adev->asic_type == CHIP_POLARIS12) {
tmp = RREG32(mmCP_ME1_PIPE3_INT_CNTL);
tmp = REG_SET_FIELD(tmp, CP_ME1_PIPE3_INT_CNTL, GENERIC2_INT_ENABLE, 1);
WREG32(mmCP_ME1_PIPE3_INT_CNTL, tmp);
@@ -5279,7 +5298,8 @@ static int gfx_v8_0_late_init(void *handle)
static void gfx_v8_0_enable_gfx_static_mg_power_gating(struct amdgpu_device *adev,
bool enable)
{
- if (adev->asic_type == CHIP_POLARIS11)
+ if ((adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
/* Send msg to SMU via Powerplay */
amdgpu_set_powergating_state(adev,
AMD_IP_BLOCK_TYPE_SMC,
@@ -5353,6 +5373,7 @@ static int gfx_v8_0_set_powergating_state(void *handle,
gfx_v8_0_enable_gfx_dynamic_mg_power_gating(adev, false);
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable)
gfx_v8_0_enable_gfx_static_mg_power_gating(adev, true);
else
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 45a573e..e2b0b16 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -44,6 +44,7 @@ MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/pitcairn_mc.bin");
MODULE_FIRMWARE("radeon/verde_mc.bin");
MODULE_FIRMWARE("radeon/oland_mc.bin");
+MODULE_FIRMWARE("radeon/si58_mc.bin");
#define MC_SEQ_MISC0__MT__MASK 0xf0000000
#define MC_SEQ_MISC0__MT__GDDR1 0x10000000
@@ -113,6 +114,7 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
const char *chip_name;
char fw_name[30];
int err;
+ bool is_58_fw = false;
DRM_DEBUG("\n");
@@ -135,7 +137,14 @@ static int gmc_v6_0_init_microcode(struct amdgpu_device *adev)
default: BUG();
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
+ /* this memory configuration requires special firmware */
+ if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ is_58_fw = true;
+
+ if (is_58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", chip_name);
err = request_firmware(&adev->mc.fw, fw_name, adev->dev);
if (err)
goto out;
@@ -463,19 +472,11 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev)
WREG32(mmVM_CONTEXT1_CNTL,
VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK |
(1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) |
- ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT) |
- VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_DEFAULT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK |
- VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK);
+ ((amdgpu_vm_block_size - 9) << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT));
+ if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS)
+ gmc_v6_0_set_fault_enable_default(adev, false);
+ else
+ gmc_v6_0_set_fault_enable_default(adev, true);
gmc_v6_0_gart_flush_gpu_tlb(adev, 0);
dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n",
@@ -754,7 +755,10 @@ static int gmc_v6_0_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS)
+ return amdgpu_irq_get(adev, &adev->mc.vm_fault, 0);
+ else
+ return 0;
}
static int gmc_v6_0_sw_init(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 0daac3a..476bc9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -46,6 +46,7 @@ static int gmc_v8_0_wait_for_idle(void *handle);
MODULE_FIRMWARE("amdgpu/tonga_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_mc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_mc.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_mc.bin");
static const u32 golden_settings_tonga_a11[] =
{
@@ -130,6 +131,7 @@ static void gmc_v8_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -225,6 +227,9 @@ static int gmc_v8_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_FIJI:
case CHIP_CARRIZO:
case CHIP_STONEY:
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 1170a64..034ace7 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -60,6 +60,8 @@ MODULE_FIRMWARE("amdgpu/polaris10_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris10_sdma1.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma.bin");
MODULE_FIRMWARE("amdgpu/polaris11_sdma1.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_sdma1.bin");
static const u32 sdma_offsets[SDMA_MAX_INSTANCE] =
@@ -206,6 +208,7 @@ static void sdma_v3_0_init_golden_registers(struct amdgpu_device *adev)
(const u32)ARRAY_SIZE(golden_settings_tonga_a11));
break;
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
amdgpu_program_register_sequence(adev,
golden_settings_polaris11_a11,
(const u32)ARRAY_SIZE(golden_settings_polaris11_a11));
@@ -278,6 +281,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
case CHIP_POLARIS10:
chip_name = "polaris10";
break;
+ case CHIP_POLARIS12:
+ chip_name = "polaris12";
+ break;
case CHIP_CARRIZO:
chip_name = "carrizo";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 6c65a1a2..6e150db 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -56,7 +56,6 @@
#define BIOS_SCRATCH_4 0x5cd
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
MODULE_FIRMWARE("radeon/verde_smc.bin");
@@ -65,6 +64,7 @@ MODULE_FIRMWARE("radeon/oland_smc.bin");
MODULE_FIRMWARE("radeon/oland_k_smc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
union power_info {
struct _ATOM_POWERPLAY_INFO info;
@@ -3488,30 +3488,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6817) ||
(adev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (adev->asic_type == CHIP_VERDE) {
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6820) ||
- (adev->pdev->device == 0x6821) ||
- (adev->pdev->device == 0x6822) ||
- (adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682A) ||
- (adev->pdev->device == 0x682B)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- } else if (adev->asic_type == CHIP_OLAND) {
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (adev->asic_type == CHIP_HAINAN) {
if ((adev->pdev->revision == 0x81) ||
(adev->pdev->revision == 0x83) ||
@@ -3520,7 +3496,6 @@ static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
(adev->pdev->device == 0x6665) ||
(adev->pdev->device == 0x6667)) {
max_sclk = 75000;
- max_mclk = 80000;
}
}
/* Apply dpm quirks */
@@ -7687,50 +7662,51 @@ static int si_dpm_init_microcode(struct amdgpu_device *adev)
chip_name = "tahiti";
break;
case CHIP_PITCAIRN:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->device == 0x6810) ||
- (adev->pdev->device == 0x6811) ||
- (adev->pdev->device == 0x6816) ||
- (adev->pdev->device == 0x6817) ||
- (adev->pdev->device == 0x6806))
+ if ((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6810) ||
+ (adev->pdev->device == 0x6811)))
chip_name = "pitcairn_k";
else
chip_name = "pitcairn";
break;
case CHIP_VERDE:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6820) ||
- (adev->pdev->device == 0x6821) ||
- (adev->pdev->device == 0x6822) ||
- (adev->pdev->device == 0x6823) ||
- (adev->pdev->device == 0x682A) ||
- (adev->pdev->device == 0x682B))
+ if (((adev->pdev->device == 0x6820) &&
+ ((adev->pdev->revision == 0x81) ||
+ (adev->pdev->revision == 0x83))) ||
+ ((adev->pdev->device == 0x6821) &&
+ ((adev->pdev->revision == 0x83) ||
+ (adev->pdev->revision == 0x87))) ||
+ ((adev->pdev->revision == 0x87) &&
+ ((adev->pdev->device == 0x6823) ||
+ (adev->pdev->device == 0x682b))))
chip_name = "verde_k";
else
chip_name = "verde";
break;
case CHIP_OLAND:
- if ((adev->pdev->revision == 0xC7) ||
- (adev->pdev->revision == 0x80) ||
- (adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0x87) ||
- (adev->pdev->device == 0x6604) ||
- (adev->pdev->device == 0x6605))
+ if (((adev->pdev->revision == 0x81) &&
+ ((adev->pdev->device == 0x6600) ||
+ (adev->pdev->device == 0x6604) ||
+ (adev->pdev->device == 0x6605) ||
+ (adev->pdev->device == 0x6610))) ||
+ ((adev->pdev->revision == 0x83) &&
+ (adev->pdev->device == 0x6610)))
chip_name = "oland_k";
else
chip_name = "oland";
break;
case CHIP_HAINAN:
- if ((adev->pdev->revision == 0x81) ||
- (adev->pdev->revision == 0x83) ||
- (adev->pdev->revision == 0xC3) ||
- (adev->pdev->device == 0x6664) ||
- (adev->pdev->device == 0x6665) ||
- (adev->pdev->device == 0x6667))
+ if (((adev->pdev->revision == 0x81) &&
+ (adev->pdev->device == 0x6660)) ||
+ ((adev->pdev->revision == 0x83) &&
+ ((adev->pdev->device == 0x6660) ||
+ (adev->pdev->device == 0x6663) ||
+ (adev->pdev->device == 0x6665) ||
+ (adev->pdev->device == 0x6667))))
chip_name = "hainan_k";
+ else if ((adev->pdev->revision == 0xc3) &&
+ (adev->pdev->device == 0x6665))
+ chip_name = "banks_k_2";
else
chip_name = "hainan";
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 96444e4..7fb9137 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -40,13 +40,14 @@
#include "smu/smu_7_0_1_sh_mask.h"
static void uvd_v4_2_mc_resume(struct amdgpu_device *adev);
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev);
static void uvd_v4_2_set_ring_funcs(struct amdgpu_device *adev);
static void uvd_v4_2_set_irq_funcs(struct amdgpu_device *adev);
static int uvd_v4_2_start(struct amdgpu_device *adev);
static void uvd_v4_2_stop(struct amdgpu_device *adev);
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state);
+static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
+ bool sw_mode);
/**
* uvd_v4_2_ring_get_rptr - get read pointer
*
@@ -140,7 +141,8 @@ static int uvd_v4_2_sw_fini(void *handle)
return r;
}
-
+static void uvd_v4_2_enable_mgcg(struct amdgpu_device *adev,
+ bool enable);
/**
* uvd_v4_2_hw_init - start and test UVD block
*
@@ -155,8 +157,7 @@ static int uvd_v4_2_hw_init(void *handle)
uint32_t tmp;
int r;
- uvd_v4_2_init_cg(adev);
- uvd_v4_2_set_clockgating_state(adev, AMD_CG_STATE_GATE);
+ uvd_v4_2_enable_mgcg(adev, true);
amdgpu_asic_set_uvd_clocks(adev, 10000, 10000);
r = uvd_v4_2_start(adev);
if (r)
@@ -266,11 +267,13 @@ static int uvd_v4_2_start(struct amdgpu_device *adev)
struct amdgpu_ring *ring = &adev->uvd.ring;
uint32_t rb_bufsz;
int i, j, r;
-
/* disable byte swapping */
u32 lmi_swap_cntl = 0;
u32 mp_swap_cntl = 0;
+ WREG32(mmUVD_CGC_GATE, 0);
+ uvd_v4_2_set_dcm(adev, true);
+
uvd_v4_2_mc_resume(adev);
/* disable interupt */
@@ -406,6 +409,8 @@ static void uvd_v4_2_stop(struct amdgpu_device *adev)
/* Unstall UMC and register bus */
WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
+
+ uvd_v4_2_set_dcm(adev, false);
}
/**
@@ -619,19 +624,6 @@ static void uvd_v4_2_set_dcm(struct amdgpu_device *adev,
WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
}
-static void uvd_v4_2_init_cg(struct amdgpu_device *adev)
-{
- bool hw_mode = true;
-
- if (hw_mode) {
- uvd_v4_2_set_dcm(adev, false);
- } else {
- u32 tmp = RREG32(mmUVD_CGC_CTRL);
- tmp &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
- WREG32(mmUVD_CGC_CTRL, tmp);
- }
-}
-
static bool uvd_v4_2_is_idle(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -685,17 +677,6 @@ static int uvd_v4_2_process_interrupt(struct amdgpu_device *adev,
static int uvd_v4_2_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
- bool gate = false;
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
-
- if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
- return 0;
-
- if (state == AMD_CG_STATE_GATE)
- gate = true;
-
- uvd_v4_2_enable_mgcg(adev, gate);
-
return 0;
}
@@ -711,9 +692,6 @@ static int uvd_v4_2_set_powergating_state(void *handle,
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
- return 0;
-
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index a79e283..6de6bec 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -791,15 +791,10 @@ static int uvd_v5_0_set_clockgating_state(void *handle,
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- static int curstate = -1;
if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
- if (curstate == state)
- return 0;
-
- curstate = state;
if (enable) {
/* wait for STATUS to clear */
if (uvd_v5_0_wait_for_idle(handle))
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index 6b3293a..37ca685 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -43,9 +43,13 @@
#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
+#define GRBM_GFX_INDEX__VCE_ALL_PIPE 0x07
+
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR0 0x8616
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR1 0x8617
#define mmVCE_LMI_VCPU_CACHE_40BIT_BAR2 0x8618
+#define mmGRBM_GFX_INDEX_DEFAULT 0xE0000000
+
#define VCE_STATUS_VCPU_REPORT_FW_LOADED_MASK 0x02
#define VCE_V3_0_FW_SIZE (384 * 1024)
@@ -54,6 +58,9 @@
#define FW_52_8_3 ((52 << 24) | (8 << 16) | (3 << 8))
+#define GET_VCE_INSTANCE(i) ((i) << GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT \
+ | GRBM_GFX_INDEX__VCE_ALL_PIPE)
+
static void vce_v3_0_mc_resume(struct amdgpu_device *adev, int idx);
static void vce_v3_0_set_ring_funcs(struct amdgpu_device *adev);
static void vce_v3_0_set_irq_funcs(struct amdgpu_device *adev);
@@ -175,7 +182,7 @@ static void vce_v3_0_set_vce_sw_clock_gating(struct amdgpu_device *adev,
WREG32(mmVCE_UENC_CLOCK_GATING_2, data);
data = RREG32(mmVCE_UENC_REG_CLOCK_GATING);
- data &= ~0xffc00000;
+ data &= ~0x3ff;
WREG32(mmVCE_UENC_REG_CLOCK_GATING, data);
data = RREG32(mmVCE_UENC_DMA_DCLK_CTRL);
@@ -249,7 +256,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
vce_v3_0_mc_resume(adev, idx);
WREG32_FIELD(VCE_STATUS, JOB_BUSY, 1);
@@ -273,7 +280,7 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
}
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -288,7 +295,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
if (adev->vce.harvest_config & (1 << idx))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, idx);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(idx));
if (adev->asic_type >= CHIP_STONEY)
WREG32_P(mmVCE_VCPU_CNTL, 0, ~0x200001);
@@ -306,7 +313,7 @@ static int vce_v3_0_stop(struct amdgpu_device *adev)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
@@ -320,11 +327,12 @@ static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
{
u32 tmp;
- /* Fiji, Stoney, Polaris10, Polaris11 are single pipe */
+ /* Fiji, Stoney, Polaris10, Polaris11, Polaris12 are single pipe */
if ((adev->asic_type == CHIP_FIJI) ||
(adev->asic_type == CHIP_STONEY) ||
(adev->asic_type == CHIP_POLARIS10) ||
- (adev->asic_type == CHIP_POLARIS11))
+ (adev->asic_type == CHIP_POLARIS11) ||
+ (adev->asic_type == CHIP_POLARIS12))
return AMDGPU_VCE_HARVEST_VCE1;
/* Tonga and CZ are dual or single pipe */
@@ -585,17 +593,17 @@ static bool vce_v3_0_check_soft_reset(void *handle)
* VCE team suggest use bit 3--bit 6 for busy status check
*/
mutex_lock(&adev->grbm_idx_mutex);
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0x10);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(1));
if (RREG32(mmVCE_STATUS) & AMDGPU_VCE_STATUS_BUSY_MASK) {
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE0, 1);
srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, SRBM_SOFT_RESET, SOFT_RESET_VCE1, 1);
}
- WREG32_FIELD(GRBM_GFX_INDEX, INSTANCE_INDEX, 0);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(0));
mutex_unlock(&adev->grbm_idx_mutex);
if (srbm_soft_reset) {
@@ -733,7 +741,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
if (adev->vce.harvest_config & (1 << i))
continue;
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, i);
+ WREG32(mmGRBM_GFX_INDEX, GET_VCE_INSTANCE(i));
if (enable) {
/* initialize VCE_CLOCK_GATING_A: Clock ON/OFF delay */
@@ -752,7 +760,7 @@ static int vce_v3_0_set_clockgating_state(void *handle,
vce_v3_0_set_vce_sw_clock_gating(adev, enable);
}
- WREG32_FIELD(GRBM_GFX_INDEX, VCE_INSTANCE, 0);
+ WREG32(mmGRBM_GFX_INDEX, mmGRBM_GFX_INDEX_DEFAULT);
mutex_unlock(&adev->grbm_idx_mutex);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index bf088d6..c2ac54f 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -88,6 +88,7 @@ MODULE_FIRMWARE("amdgpu/polaris10_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris10_smc_sk.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc.bin");
MODULE_FIRMWARE("amdgpu/polaris11_smc_sk.bin");
+MODULE_FIRMWARE("amdgpu/polaris12_smc.bin");
/*
* Indirect registers accessor
@@ -312,6 +313,7 @@ static void vi_init_golden_registers(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
default:
break;
}
@@ -671,6 +673,7 @@ static int vi_read_register(struct amdgpu_device *adev, u32 se_num,
case CHIP_TONGA:
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
case CHIP_CARRIZO:
case CHIP_STONEY:
asic_register_table = cz_allowed_read_registers;
@@ -994,6 +997,11 @@ static int vi_common_early_init(void *handle)
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x50;
break;
+ case CHIP_POLARIS12:
+ adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG;
+ adev->pg_flags = 0;
+ adev->external_rev_id = adev->rev_id + 0x64;
+ break;
case CHIP_CARRIZO:
adev->cg_flags = AMD_CG_SUPPORT_UVD_MGCG |
AMD_CG_SUPPORT_GFX_MGCG |
@@ -1346,6 +1354,7 @@ static int vi_common_set_clockgating_state(void *handle,
case CHIP_TONGA:
case CHIP_POLARIS10:
case CHIP_POLARIS11:
+ case CHIP_POLARIS12:
vi_common_set_clockgating_state_by_smu(adev, state);
default:
break;
@@ -1429,6 +1438,7 @@ int vi_set_ip_blocks(struct amdgpu_device *adev)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
amdgpu_ip_block_add(adev, &vi_common_ip_block);
amdgpu_ip_block_add(adev, &gmc_v8_1_ip_block);
amdgpu_ip_block_add(adev, &tonga_ih_ip_block);
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index c02469a..85f3587 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -23,7 +23,7 @@
#ifndef __AMD_SHARED_H__
#define __AMD_SHARED_H__
-#define AMD_MAX_USEC_TIMEOUT 100000 /* 100 ms */
+#define AMD_MAX_USEC_TIMEOUT 200000 /* 200 ms */
/*
* Supported ASIC types
@@ -46,6 +46,7 @@ enum amd_asic_type {
CHIP_STONEY,
CHIP_POLARIS10,
CHIP_POLARIS11,
+ CHIP_POLARIS12,
CHIP_LAST,
};
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
index b0c63c5..6bb79c9 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_clockpowergating.c
@@ -200,7 +200,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_CG_STATE_UNGATE);
+ AMD_CG_STATE_GATE);
cgs_set_powergating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
@@ -218,7 +218,7 @@ int cz_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
cgs_set_clockgating_state(
hwmgr->device,
AMD_IP_BLOCK_TYPE_VCE,
- AMD_PG_STATE_GATE);
+ AMD_PG_STATE_UNGATE);
cz_dpm_update_vce_dpm(hwmgr);
cz_enable_disable_vce_dpm(hwmgr, true);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 4b14f25..0fb4e8c 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -1402,14 +1402,22 @@ int cz_dpm_update_vce_dpm(struct pp_hwmgr *hwmgr)
cz_hwmgr->vce_dpm.hard_min_clk,
PPSMC_MSG_SetEclkHardMin));
} else {
- /*EPR# 419220 -HW limitation to to */
- cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
- smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
- PPSMC_MSG_SetEclkHardMin,
- cz_get_eclk_level(hwmgr,
- cz_hwmgr->vce_dpm.hard_min_clk,
- PPSMC_MSG_SetEclkHardMin));
-
+ /*Program HardMin based on the vce_arbiter.ecclk */
+ if (hwmgr->vce_arbiter.ecclk == 0) {
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin, 0);
+ /* disable ECLK DPM 0. Otherwise VCE could hang if
+ * switching SCLK from DPM 0 to 6/7 */
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkSoftMin, 1);
+ } else {
+ cz_hwmgr->vce_dpm.hard_min_clk = hwmgr->vce_arbiter.ecclk;
+ smum_send_msg_to_smc_with_parameter(hwmgr->smumgr,
+ PPSMC_MSG_SetEclkHardMin,
+ cz_get_eclk_level(hwmgr,
+ cz_hwmgr->vce_dpm.hard_min_clk,
+ PPSMC_MSG_SetEclkHardMin));
+ }
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
index dc6700a..b036064 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hwmgr.c
@@ -95,6 +95,7 @@ int hwmgr_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
polaris_set_asic_special_caps(hwmgr);
hwmgr->feature_mask &= ~(PP_UVD_HANDSHAKE_MASK);
break;
@@ -745,7 +746,7 @@ int polaris_set_asic_special_caps(struct pp_hwmgr *hwmgr)
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_TablelessHardwareInterface);
- if (hwmgr->chip_id == CHIP_POLARIS11)
+ if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12))
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_SPLLShutdownSupport);
return 0;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index 26477f0..6cd1287 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -521,7 +521,7 @@ int smu7_enable_didt_config(struct pp_hwmgr *hwmgr)
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris10);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
- } else if (hwmgr->chip_id == CHIP_POLARIS11) {
+ } else if ((hwmgr->chip_id == CHIP_POLARIS11) || (hwmgr->chip_id == CHIP_POLARIS12)) {
result = smu7_program_pt_config_registers(hwmgr, GCCACConfig_Polaris11);
PP_ASSERT_WITH_CODE((result == 0), "DIDT Config failed.", return result);
result = smu7_program_pt_config_registers(hwmgr, DIDTConfig_Polaris11);
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
index e5812aa..6e618aa 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/smumgr.c
@@ -65,6 +65,7 @@ int smum_init(struct amd_pp_init *pp_init, struct pp_instance *handle)
break;
case CHIP_POLARIS11:
case CHIP_POLARIS10:
+ case CHIP_POLARIS12:
polaris10_smum_init(smumgr);
break;
default:
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index 908011d..7abda94 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
+ bool DisableP2A;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index f75c642..533e762 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else
*need_post = false;
+ /* Check P2A Access */
+ ast->DisableP2A = true;
+ data = ast_read32(ast, 0xf004);
+ if (data != 0xFFFFFFFF)
+ ast->DisableP2A = false;
+
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
@@ -140,15 +146,17 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
- /* Read SCU7c (silicon revision register) */
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
- data = ast_read32(ast, 0x1207c);
- data &= 0x300;
- if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
- ast->support_wide_screen = true;
- if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
- ast->support_wide_screen = true;
+ if (ast->DisableP2A == false) {
+ /* Read SCU7c (silicon revision register) */
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x1207c);
+ data &= 0x300;
+ if (ast->chip == AST2300 && data == 0x0) /* ast1300 */
+ ast->support_wide_screen = true;
+ if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
+ ast->support_wide_screen = true;
+ }
}
break;
}
@@ -216,80 +224,81 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
- ast_write32(ast, 0xf004, 0x1e6e0000);
- ast_write32(ast, 0xf000, 0x1);
-
-
- ast_write32(ast, 0x10000, 0xfc600309);
-
- do {
- if (pci_channel_offline(dev->pdev))
- return -EIO;
- } while (ast_read32(ast, 0x10000) != 0x01);
- data = ast_read32(ast, 0x10004);
-
- if (data & 0x40)
+ if (ast->DisableP2A)
+ {
ast->dram_bus_width = 16;
+ ast->dram_type = AST_DRAM_1Gx16;
+ ast->mclk = 396;
+ }
else
- ast->dram_bus_width = 32;
+ {
+ ast_write32(ast, 0xf004, 0x1e6e0000);
+ ast_write32(ast, 0xf000, 0x1);
+ data = ast_read32(ast, 0x10004);
+
+ if (data & 0x40)
+ ast->dram_bus_width = 16;
+ else
+ ast->dram_bus_width = 32;
+
+ if (ast->chip == AST2300 || ast->chip == AST2400) {
+ switch (data & 0x03) {
+ case 0:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ default:
+ case 1:
+ ast->dram_type = AST_DRAM_1Gx16;
+ break;
+ case 2:
+ ast->dram_type = AST_DRAM_2Gx16;
+ break;
+ case 3:
+ ast->dram_type = AST_DRAM_4Gx16;
+ break;
+ }
+ } else {
+ switch (data & 0x0c) {
+ case 0:
+ case 4:
+ ast->dram_type = AST_DRAM_512Mx16;
+ break;
+ case 8:
+ if (data & 0x40)
+ ast->dram_type = AST_DRAM_1Gx16;
+ else
+ ast->dram_type = AST_DRAM_512Mx32;
+ break;
+ case 0xc:
+ ast->dram_type = AST_DRAM_1Gx32;
+ break;
+ }
+ }
- if (ast->chip == AST2300 || ast->chip == AST2400) {
- switch (data & 0x03) {
- case 0:
- ast->dram_type = AST_DRAM_512Mx16;
- break;
- default:
- case 1:
- ast->dram_type = AST_DRAM_1Gx16;
- break;
- case 2:
- ast->dram_type = AST_DRAM_2Gx16;
- break;
+ data = ast_read32(ast, 0x10120);
+ data2 = ast_read32(ast, 0x10170);
+ if (data2 & 0x2000)
+ ref_pll = 14318;
+ else
+ ref_pll = 12000;
+
+ denum = data & 0x1f;
+ num = (data & 0x3fe0) >> 5;
+ data = (data & 0xc000) >> 14;
+ switch (data) {
case 3:
- ast->dram_type = AST_DRAM_4Gx16;
- break;
- }
- } else {
- switch (data & 0x0c) {
- case 0:
- case 4:
- ast->dram_type = AST_DRAM_512Mx16;
+ div = 0x4;
break;
- case 8:
- if (data & 0x40)
- ast->dram_type = AST_DRAM_1Gx16;
- else
- ast->dram_type = AST_DRAM_512Mx32;
+ case 2:
+ case 1:
+ div = 0x2;
break;
- case 0xc:
- ast->dram_type = AST_DRAM_1Gx32;
+ default:
+ div = 0x1;
break;
}
+ ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
-
- data = ast_read32(ast, 0x10120);
- data2 = ast_read32(ast, 0x10170);
- if (data2 & 0x2000)
- ref_pll = 14318;
- else
- ref_pll = 12000;
-
- denum = data & 0x1f;
- num = (data & 0x3fe0) >> 5;
- data = (data & 0xc000) >> 14;
- switch (data) {
- case 3:
- div = 0x4;
- break;
- case 2:
- case 1:
- div = 0x2;
- break;
- default:
- div = 0x1;
- break;
- }
- ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
return 0;
}
diff --git a/drivers/gpu/drm/ast/ast_post.c b/drivers/gpu/drm/ast/ast_post.c
index 810c51d..5331ee1 100644
--- a/drivers/gpu/drm/ast/ast_post.c
+++ b/drivers/gpu/drm/ast/ast_post.c
@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_open_key(ast);
ast_set_def_ext_reg(dev);
- if (ast->chip == AST2300 || ast->chip == AST2400)
- ast_init_dram_2300(dev);
- else
- ast_init_dram_reg(dev);
+ if (ast->DisableP2A == false)
+ {
+ if (ast->chip == AST2300 || ast->chip == AST2400)
+ ast_init_dram_2300(dev);
+ else
+ ast_init_dram_reg(dev);
- ast_init_3rdtx(dev);
+ ast_init_3rdtx(dev);
+ }
+ else
+ {
+ if (ast->tx_chip_type != AST_TX_NONE)
+ ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
+ }
}
/* AST 2300 DRAM settings */
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index eb9bf87..18eefdc 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1382,6 +1382,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
pm_runtime_enable(dev);
+ pm_runtime_get_sync(dev);
phy_power_on(dp->phy);
analogix_dp_init_dp(dp);
@@ -1414,9 +1415,15 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
goto err_disable_pm_runtime;
}
+ phy_power_off(dp->phy);
+ pm_runtime_put(dev);
+
return 0;
err_disable_pm_runtime:
+
+ phy_power_off(dp->phy);
+ pm_runtime_put(dev);
pm_runtime_disable(dev);
return ret;
diff --git a/drivers/gpu/drm/cirrus/Kconfig b/drivers/gpu/drm/cirrus/Kconfig
index 04b3c16..7f4cc6e 100644
--- a/drivers/gpu/drm/cirrus/Kconfig
+++ b/drivers/gpu/drm/cirrus/Kconfig
@@ -7,3 +7,12 @@ config DRM_CIRRUS_QEMU
This is a KMS driver for emulated cirrus device in qemu.
It is *NOT* intended for real cirrus devices. This requires
the modesetting userspace X.org driver.
+
+ Cirrus is obsolete, the hardware was designed in the 90ies
+ and can't keep up with todays needs. More background:
+ https://www.kraxel.org/blog/2014/10/qemu-using-cirrus-considered-harmful/
+
+ Better alternatives are:
+ - stdvga (DRM_BOCHS, qemu -vga std, default in qemu 2.2+)
+ - qxl (DRM_QXL, qemu -vga qxl, works best with spice)
+ - virtio (DRM_VIRTIO_GPU), qemu -vga virtio)
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 6069748..50f5cf7 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
- struct drm_crtc *crtc, s64 __user *fence_ptr)
+ struct drm_crtc *crtc, s32 __user *fence_ptr)
{
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
}
-static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
+static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
- s64 __user *fence_ptr;
+ s32 __user *fence_ptr;
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->prop_out_fence_ptr) {
- s64 __user *fence_ptr = u64_to_user_ptr(val);
+ s32 __user *fence_ptr = u64_to_user_ptr(val);
if (!fence_ptr)
return 0;
@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
*/
struct drm_out_fence_state {
- s64 __user *out_fence_ptr;
+ s32 __user *out_fence_ptr;
struct sync_file *sync_file;
int fd;
};
@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
- u64 __user *fence_ptr;
+ s32 __user *fence_ptr;
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 583f47f..34f757b 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1259,8 +1259,10 @@ int drm_atomic_helper_commit(struct drm_device *dev,
if (!nonblock) {
ret = drm_atomic_helper_wait_for_fences(dev, state, true);
- if (ret)
+ if (ret) {
+ drm_atomic_helper_cleanup_planes(dev, state);
return ret;
+ }
}
/*
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index ac6a352..e6b19bc 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1460,6 +1460,13 @@ drm_mode_create_from_cmdline_mode(struct drm_device *dev,
return NULL;
mode->type |= DRM_MODE_TYPE_USERDEF;
+ /* fix up 1368x768: GFT/CVT can't express 1366 width due to alignment */
+ if (cmd->xres == 1366 && mode->hdisplay == 1368) {
+ mode->hdisplay = 1366;
+ mode->hsync_start--;
+ mode->hsync_end--;
+ drm_mode_set_name(mode);
+ }
drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
return mode;
}
diff --git a/drivers/gpu/drm/drm_probe_helper.c b/drivers/gpu/drm/drm_probe_helper.c
index ac953f0..cf8f012 100644
--- a/drivers/gpu/drm/drm_probe_helper.c
+++ b/drivers/gpu/drm/drm_probe_helper.c
@@ -143,8 +143,18 @@ void drm_kms_helper_poll_enable_locked(struct drm_device *dev)
}
if (dev->mode_config.delayed_event) {
+ /*
+ * FIXME:
+ *
+ * Use short (1s) delay to handle the initial delayed event.
+ * This delay should not be needed, but Optimus/nouveau will
+ * fail in a mysterious way if the delayed event is handled as
+ * soon as possible like it is done in
+ * drm_helper_probe_single_connector_modes() in case the poll
+ * was enabled before.
+ */
poll = true;
- delay = 0;
+ delay = HZ;
}
if (poll)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
index 169ac96..fe0e85b 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_mmu.c
@@ -116,9 +116,14 @@ static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
struct list_head list;
bool found;
+ /*
+ * XXX: The DRM_MM_SEARCH_BELOW is really a hack to trick
+ * drm_mm into giving out a low IOVA after address space
+ * rollover. This needs a proper fix.
+ */
ret = drm_mm_insert_node_in_range(&mmu->mm, node,
size, 0, mmu->last_iova, ~0UL,
- DRM_MM_SEARCH_DEFAULT);
+ mmu->last_iova ? DRM_MM_SEARCH_DEFAULT : DRM_MM_SEARCH_BELOW);
if (ret != -ENOSPC)
break;
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 6ca1f31..75eeb83 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -46,7 +46,8 @@ enum decon_flag_bits {
BIT_CLKS_ENABLED,
BIT_IRQS_ENABLED,
BIT_WIN_UPDATED,
- BIT_SUSPENDED
+ BIT_SUSPENDED,
+ BIT_REQUEST_UPDATE
};
struct decon_context {
@@ -141,12 +142,6 @@ static void decon_commit(struct exynos_drm_crtc *crtc)
m->crtc_vsync_end = m->crtc_vsync_start + 1;
}
- decon_set_bits(ctx, DECON_VIDCON0, VIDCON0_ENVID, 0);
-
- /* enable clock gate */
- val = CMU_CLKGAGE_MODE_SFR_F | CMU_CLKGAGE_MODE_MEM_F;
- writel(val, ctx->addr + DECON_CMU);
-
if (ctx->out_type & (IFTYPE_I80 | I80_HW_TRG))
decon_setup_trigger(ctx);
@@ -315,6 +310,7 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
/* window enable */
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, ~0);
+ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_disable_plane(struct exynos_drm_crtc *crtc,
@@ -327,6 +323,7 @@ static void decon_disable_plane(struct exynos_drm_crtc *crtc,
return;
decon_set_bits(ctx, DECON_WINCONx(win), WINCONx_ENWIN_F, 0);
+ set_bit(BIT_REQUEST_UPDATE, &ctx->flags);
}
static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
@@ -340,8 +337,8 @@ static void decon_atomic_flush(struct exynos_drm_crtc *crtc)
for (i = ctx->first_win; i < WINDOWS_NR; i++)
decon_shadow_protect_win(ctx, i, false);
- /* standalone update */
- decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
+ if (test_and_clear_bit(BIT_REQUEST_UPDATE, &ctx->flags))
+ decon_set_bits(ctx, DECON_UPDATE, STANDALONE_UPDATE_F, ~0);
if (ctx->out_type & IFTYPE_I80)
set_bit(BIT_WIN_UPDATED, &ctx->flags);
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 0d41ebc..f7bce86 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -37,13 +37,6 @@
#include "i915_drv.h"
#include "gvt.h"
-#define MB_TO_BYTES(mb) ((mb) << 20ULL)
-#define BYTES_TO_MB(b) ((b) >> 20ULL)
-
-#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
-#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
-#define HOST_FENCE 4
-
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -165,6 +158,14 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
POSTING_READ(fence_reg_lo);
}
+static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
+{
+ int i;
+
+ for (i = 0; i < vgpu_fence_sz(vgpu); i++)
+ intel_vgpu_write_fence(vgpu, i, 0);
+}
+
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
@@ -178,9 +179,9 @@ static void free_vgpu_fence(struct intel_vgpu *vgpu)
intel_runtime_pm_get(dev_priv);
mutex_lock(&dev_priv->drm.struct_mutex);
+ _clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
- intel_vgpu_write_fence(vgpu, i, 0);
list_add_tail(&reg->link,
&dev_priv->mm.fence_list);
}
@@ -208,13 +209,14 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
continue;
list_del(pos);
vgpu->fence.regs[i] = reg;
- intel_vgpu_write_fence(vgpu, i, 0);
if (++i == vgpu_fence_sz(vgpu))
break;
}
if (i != vgpu_fence_sz(vgpu))
goto out_free_fence;
+ _clear_vgpu_fence(vgpu);
+
mutex_unlock(&dev_priv->drm.struct_mutex);
intel_runtime_pm_put(dev_priv);
return 0;
@@ -314,6 +316,22 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
}
/**
+ * intel_vgpu_reset_resource - reset resource state owned by a vGPU
+ * @vgpu: a vGPU
+ *
+ * This function is used to reset resource state owned by a vGPU.
+ *
+ */
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
+{
+ struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+
+ intel_runtime_pm_get(dev_priv);
+ _clear_vgpu_fence(vgpu);
+ intel_runtime_pm_put(dev_priv);
+}
+
+/**
* intel_alloc_vgpu_resource - allocate HW resource for a vGPU
* @vgpu: vGPU
* @param: vGPU creation params
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 711c31c..4a6a2ed 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -282,3 +282,77 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
}
return 0;
}
+
+/**
+ * intel_vgpu_init_cfg_space - init vGPU configuration space when create vGPU
+ *
+ * @vgpu: a vGPU
+ * @primary: is the vGPU presented as primary
+ *
+ */
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ bool primary)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+ u16 *gmch_ctl;
+ int i;
+
+ memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
+ info->cfg_space_size);
+
+ if (!primary) {
+ vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+ }
+
+ /* Show guest that there isn't any stolen memory.*/
+ gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
+ *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
+
+ intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
+ gvt_aperture_pa_base(gvt), true);
+
+ vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
+ | PCI_COMMAND_MEMORY
+ | PCI_COMMAND_MASTER);
+ /*
+ * Clear the bar upper 32bit and let guest to assign the new value
+ */
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
+ memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
+
+ for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
+ vgpu->cfg_space.bar[i].size = pci_resource_len(
+ gvt->dev_priv->drm.pdev, i * 2);
+ vgpu->cfg_space.bar[i].tracked = false;
+ }
+}
+
+/**
+ * intel_vgpu_reset_cfg_space - reset vGPU configuration space
+ *
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu)
+{
+ u8 cmd = vgpu_cfg_space(vgpu)[PCI_COMMAND];
+ bool primary = vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] !=
+ INTEL_GVT_PCI_CLASS_VGA_OTHER;
+
+ if (cmd & PCI_COMMAND_MEMORY) {
+ trap_gttmmio(vgpu, false);
+ map_aperture(vgpu, false);
+ }
+
+ /**
+ * Currently we only do such reset when vGPU is not
+ * owned by any VM, so we simply restore entire cfg
+ * space to default value.
+ */
+ intel_vgpu_init_cfg_space(vgpu, primary);
+}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index d26a092..e456398 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -481,7 +481,6 @@ struct parser_exec_state {
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
static unsigned long bypass_scan_mask = 0;
-static bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
- if (bypass_batch_buffer_scan)
- return 0;
-
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index f32bb6f..3408373 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
-
-#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
-#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
-static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
- unsigned long add, int gmadr_bytes)
-{
- if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
- return -1;
-
- *((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
- BATCH_BUFFER_ADDR_MASK;
- if (gmadr_bytes == 8) {
- *((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
- add & BATCH_BUFFER_ADDR_HIGH_MASK;
- }
-
- return 0;
-}
-
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
- int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
+ struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */
- if (!list_empty(&workload->shadow_bb)) {
- struct intel_shadow_bb_entry *entry_obj =
- list_first_entry(&workload->shadow_bb,
- struct intel_shadow_bb_entry,
- list);
- struct intel_shadow_bb_entry *temp;
+ list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
+ struct i915_vma *vma;
- list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
- list) {
- struct i915_vma *vma;
-
- vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
- 4, 0);
- if (IS_ERR(vma)) {
- gvt_err("Cannot pin\n");
- return;
- }
-
- /* FIXME: we are not tracking our pinned VMA leaving it
- * up to the core to fix up the stray pin_count upon
- * free.
- */
-
- /* update the relocate gma with shadow batch buffer*/
- set_gma_to_bb_cmd(entry_obj,
- i915_ggtt_offset(vma),
- gmadr_bytes);
+ vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
+ if (IS_ERR(vma)) {
+ gvt_err("Cannot pin\n");
+ return;
}
+
+ /* FIXME: we are not tracking our pinned VMA leaving it
+ * up to the core to fix up the stray pin_count upon
+ * free.
+ */
+
+ /* update the relocate gma with shadow batch buffer*/
+ entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
+ if (gmadr_bytes == 8)
+ entry_obj->bb_start_cmd_va[2] = 0;
}
}
@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
- vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
+ vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 6c5fdf5..47dec4a 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -240,15 +240,8 @@ static inline int get_pse_type(int type)
static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
- u64 pte;
-#ifdef readq
- pte = readq(addr);
-#else
- pte = ioread32(addr);
- pte |= (u64)ioread32(addr + 4) << 32;
-#endif
- return pte;
+ return readq(addr);
}
static void write_pte64(struct drm_i915_private *dev_priv,
@@ -256,12 +249,8 @@ static void write_pte64(struct drm_i915_private *dev_priv,
{
void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
-#ifdef writeq
writeq(pte, addr);
-#else
- iowrite32((u32)pte, addr);
- iowrite32(pte >> 32, addr + 4);
-#endif
+
I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
POSTING_READ(GFX_FLSH_CNTL_GEN6);
}
@@ -1380,8 +1369,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
info->gtt_entry_size;
mem = kzalloc(mm->has_shadow_page_table ?
mm->page_table_entry_size * 2
- : mm->page_table_entry_size,
- GFP_ATOMIC);
+ : mm->page_table_entry_size, GFP_KERNEL);
if (!mem)
return -ENOMEM;
mm->virtual_page_table = mem;
@@ -1532,7 +1520,7 @@ struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu,
struct intel_vgpu_mm *mm;
int ret;
- mm = kzalloc(sizeof(*mm), GFP_ATOMIC);
+ mm = kzalloc(sizeof(*mm), GFP_KERNEL);
if (!mm) {
ret = -ENOMEM;
goto fail;
@@ -1886,30 +1874,27 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
- struct page *scratch_pt;
+ void *scratch_pt;
unsigned long mfn;
int i;
- void *p;
if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
- scratch_pt = alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
+ scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
if (!scratch_pt) {
gvt_err("fail to allocate scratch page\n");
return -ENOMEM;
}
- p = kmap_atomic(scratch_pt);
- mfn = intel_gvt_hypervisor_virt_to_mfn(p);
+ mfn = intel_gvt_hypervisor_virt_to_mfn(scratch_pt);
if (mfn == INTEL_GVT_INVALID_ADDR) {
- gvt_err("fail to translate vaddr:0x%llx\n", (u64)p);
- kunmap_atomic(p);
- __free_page(scratch_pt);
+ gvt_err("fail to translate vaddr:0x%lx\n", (unsigned long)scratch_pt);
+ free_page((unsigned long)scratch_pt);
return -EFAULT;
}
gtt->scratch_pt[type].page_mfn = mfn;
- gtt->scratch_pt[type].page = scratch_pt;
+ gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, mfn);
@@ -1918,7 +1903,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
* scratch_pt[type] indicate the scratch pt/scratch page used by the
* 'type' pt.
* e.g. scratch_pt[GTT_TYPE_PPGTT_PDE_PT] is used by
- * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scatch_pt it self
+ * GTT_TYPE_PPGTT_PDE_PT level pt, that means this scratch_pt it self
* is GTT_TYPE_PPGTT_PTE_PT, and full filled by scratch page mfn.
*/
if (type > GTT_TYPE_PPGTT_PTE_PT && type < GTT_TYPE_MAX) {
@@ -1936,11 +1921,9 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
se.val64 |= PPAT_CACHED_INDEX;
for (i = 0; i < page_entry_num; i++)
- ops->set_entry(p, &se, i, false, 0, vgpu);
+ ops->set_entry(scratch_pt, &se, i, false, 0, vgpu);
}
- kunmap_atomic(p);
-
return 0;
}
@@ -2208,7 +2191,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
- void *page_addr;
+ void *page;
gvt_dbg_core("init gtt\n");
@@ -2221,17 +2204,14 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV;
}
- gvt->gtt.scratch_ggtt_page =
- alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
- if (!gvt->gtt.scratch_ggtt_page) {
+ page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!page) {
gvt_err("fail to allocate scratch ggtt page\n");
return -ENOMEM;
}
+ gvt->gtt.scratch_ggtt_page = virt_to_page(page);
- page_addr = page_address(gvt->gtt.scratch_ggtt_page);
-
- gvt->gtt.scratch_ggtt_mfn =
- intel_gvt_hypervisor_virt_to_mfn(page_addr);
+ gvt->gtt.scratch_ggtt_mfn = intel_gvt_hypervisor_virt_to_mfn(page);
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate scratch ggtt page\n");
__free_page(gvt->gtt.scratch_ggtt_page);
@@ -2297,3 +2277,30 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
}
+
+/**
+ * intel_vgpu_reset_gtt - reset the all GTT related status
+ * @vgpu: a vGPU
+ * @dmlr: true for vGPU Device Model Level Reset, false for GT Reset
+ *
+ * This function is called from vfio core to reset reset all
+ * GTT related status, including GGTT, PPGTT, scratch page.
+ *
+ */
+void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr)
+{
+ int i;
+
+ ppgtt_free_all_shadow_page(vgpu);
+ if (!dmlr)
+ return;
+
+ intel_vgpu_reset_ggtt(vgpu);
+
+ /* clear scratch page for security */
+ for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
+ if (vgpu->gtt.scratch_pt[i].page != NULL)
+ memset(page_address(vgpu->gtt.scratch_pt[i].page),
+ 0, PAGE_SIZE);
+ }
+}
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b315ab3..f88eb5e 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -208,6 +208,7 @@ extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
+extern void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu, bool dmlr);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 398877c..e6bf5c5 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -201,6 +201,8 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_clean_vgpu_types(gvt);
+ idr_destroy(&gvt->vgpu_idr);
+
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
@@ -237,6 +239,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("init gvt device\n");
+ idr_init(&gvt->vgpu_idr);
+
mutex_init(&gvt->lock);
gvt->dev_priv = dev_priv;
@@ -244,7 +248,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
ret = intel_gvt_setup_mmio_info(gvt);
if (ret)
- return ret;
+ goto out_clean_idr;
ret = intel_gvt_load_firmware(gvt);
if (ret)
@@ -313,6 +317,8 @@ out_free_firmware:
intel_gvt_free_firmware(gvt);
out_clean_mmio_info:
intel_gvt_clean_mmio_info(gvt);
+out_clean_idr:
+ idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index 0af1701..e227caf 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -323,6 +323,7 @@ struct intel_vgpu_creation_params {
int intel_vgpu_alloc_resource(struct intel_vgpu *vgpu,
struct intel_vgpu_creation_params *param);
+void intel_vgpu_reset_resource(struct intel_vgpu *vgpu);
void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value);
@@ -375,6 +376,8 @@ void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -411,6 +414,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index);
+void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
+ bool primary);
+void intel_vgpu_reset_cfg_space(struct intel_vgpu *vgpu);
+
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
@@ -424,7 +431,6 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu);
int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa);
int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
-int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu);
struct intel_gvt_ops {
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 5228097..ab2ea15 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -93,7 +93,8 @@ static void write_vreg(struct intel_vgpu *vgpu, unsigned int offset,
static int new_mmio_info(struct intel_gvt *gvt,
u32 offset, u32 flags, u32 size,
u32 addr_mask, u32 ro_mask, u32 device,
- void *read, void *write)
+ int (*read)(struct intel_vgpu *, unsigned int, void *, unsigned int),
+ int (*write)(struct intel_vgpu *, unsigned int, void *, unsigned int))
{
struct intel_gvt_mmio_info *info, *p;
u32 start, end, i;
@@ -219,7 +220,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
default:
/*should not hit here*/
gvt_err("invalid forcewake offset 0x%x\n", offset);
- return 1;
+ return -EINVAL;
}
} else {
ack_reg_offset = FORCEWAKE_ACK_HSW_REG;
@@ -230,77 +231,45 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
return 0;
}
-static int handle_device_reset(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes, unsigned long bitmap)
-{
- struct intel_gvt_workload_scheduler *scheduler =
- &vgpu->gvt->scheduler;
-
- vgpu->resetting = true;
-
- intel_vgpu_stop_schedule(vgpu);
- /*
- * The current_vgpu will set to NULL after stopping the
- * scheduler when the reset is triggered by current vgpu.
- */
- if (scheduler->current_vgpu == NULL) {
- mutex_unlock(&vgpu->gvt->lock);
- intel_gvt_wait_vgpu_idle(vgpu);
- mutex_lock(&vgpu->gvt->lock);
- }
-
- intel_vgpu_reset_execlist(vgpu, bitmap);
-
- /* full GPU reset */
- if (bitmap == 0xff) {
- mutex_unlock(&vgpu->gvt->lock);
- intel_vgpu_clean_gtt(vgpu);
- mutex_lock(&vgpu->gvt->lock);
- setup_vgpu_mmio(vgpu);
- populate_pvinfo_page(vgpu);
- intel_vgpu_init_gtt(vgpu);
- }
-
- vgpu->resetting = false;
-
- return 0;
-}
-
static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
- void *p_data, unsigned int bytes)
+ void *p_data, unsigned int bytes)
{
+ unsigned int engine_mask = 0;
u32 data;
- u64 bitmap = 0;
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
if (data & GEN6_GRDOM_FULL) {
gvt_dbg_mmio("vgpu%d: request full GPU reset\n", vgpu->id);
- bitmap = 0xff;
- }
- if (data & GEN6_GRDOM_RENDER) {
- gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
- bitmap |= (1 << RCS);
- }
- if (data & GEN6_GRDOM_MEDIA) {
- gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
- bitmap |= (1 << VCS);
- }
- if (data & GEN6_GRDOM_BLT) {
- gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
- bitmap |= (1 << BCS);
- }
- if (data & GEN6_GRDOM_VECS) {
- gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
- bitmap |= (1 << VECS);
- }
- if (data & GEN8_GRDOM_MEDIA2) {
- gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
- if (HAS_BSD2(vgpu->gvt->dev_priv))
- bitmap |= (1 << VCS2);
+ engine_mask = ALL_ENGINES;
+ } else {
+ if (data & GEN6_GRDOM_RENDER) {
+ gvt_dbg_mmio("vgpu%d: request RCS reset\n", vgpu->id);
+ engine_mask |= (1 << RCS);
+ }
+ if (data & GEN6_GRDOM_MEDIA) {
+ gvt_dbg_mmio("vgpu%d: request VCS reset\n", vgpu->id);
+ engine_mask |= (1 << VCS);
+ }
+ if (data & GEN6_GRDOM_BLT) {
+ gvt_dbg_mmio("vgpu%d: request BCS Reset\n", vgpu->id);
+ engine_mask |= (1 << BCS);
+ }
+ if (data & GEN6_GRDOM_VECS) {
+ gvt_dbg_mmio("vgpu%d: request VECS Reset\n", vgpu->id);
+ engine_mask |= (1 << VECS);
+ }
+ if (data & GEN8_GRDOM_MEDIA2) {
+ gvt_dbg_mmio("vgpu%d: request VCS2 Reset\n", vgpu->id);
+ if (HAS_BSD2(vgpu->gvt->dev_priv))
+ engine_mask |= (1 << VCS2);
+ }
}
- return handle_device_reset(vgpu, offset, p_data, bytes, bitmap);
+
+ intel_gvt_reset_vgpu_locked(vgpu, false, engine_mask);
+
+ return 0;
}
static int gmbus_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
@@ -974,7 +943,7 @@ static int sbi_data_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
-static bool sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+static int sbi_ctl_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data;
@@ -1366,7 +1335,6 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- int rc = 0;
unsigned int id = 0;
write_vreg(vgpu, offset, p_data, bytes);
@@ -1389,12 +1357,11 @@ static int gvt_reg_tlb_control_handler(struct intel_vgpu *vgpu,
id = VECS;
break;
default:
- rc = -EINVAL;
- break;
+ return -EINVAL;
}
set_bit(id, (void *)vgpu->tlb_handle_pending);
- return rc;
+ return 0;
}
static int ring_reset_ctl_write(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index faaae07..3f656e3 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
-static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
- char *buf)
+static ssize_t available_instances_show(struct kobject *kobj,
+ struct device *dev, char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
type->fence);
}
-static MDEV_TYPE_ATTR_RO(available_instance);
+static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = {
- &mdev_type_attr_available_instance.attr,
+ &mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
@@ -398,6 +398,7 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
struct intel_vgpu_type *type;
struct device *pdev;
void *gvt;
+ int ret;
pdev = mdev_parent_dev(mdev);
gvt = kdev_to_i915(pdev)->gvt;
@@ -406,13 +407,15 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
if (!type) {
gvt_err("failed to find type %s to create\n",
kobject_name(kobj));
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
vgpu = intel_gvt_ops->vgpu_create(gvt, type);
if (IS_ERR_OR_NULL(vgpu)) {
- gvt_err("create intel vgpu failed\n");
- return -EINVAL;
+ ret = vgpu == NULL ? -EFAULT : PTR_ERR(vgpu);
+ gvt_err("failed to create intel vgpu: %d\n", ret);
+ goto out;
}
INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
@@ -422,7 +425,10 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
dev_name(mdev_dev(mdev)));
- return 0;
+ ret = 0;
+
+out:
+ return ret;
}
static int intel_vgpu_remove(struct mdev_device *mdev)
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index 09c9450..4df078b 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -125,25 +125,12 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
- mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
- if (!mmio && !vgpu->mmio.disable_warn_untrack) {
- gvt_err("vgpu%d: read untracked MMIO %x len %d val %x\n",
- vgpu->id, offset, bytes, *(u32 *)p_data);
-
- if (offset == 0x206c) {
- gvt_err("------------------------------------------\n");
- gvt_err("vgpu%d: likely triggers a gfx reset\n",
- vgpu->id);
- gvt_err("------------------------------------------\n");
- vgpu->mmio.disable_warn_untrack = true;
- }
- }
-
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
if (WARN_ON(!IS_ALIGNED(offset, bytes)))
goto err;
}
+ mmio = intel_gvt_find_mmio_info(gvt, rounddown(offset, 4));
if (mmio) {
if (!intel_gvt_mmio_is_unalign(gvt, mmio->offset)) {
if (WARN_ON(offset + bytes > mmio->offset + mmio->size))
@@ -152,9 +139,23 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto err;
}
ret = mmio->read(vgpu, offset, p_data, bytes);
- } else
+ } else {
ret = intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
+ if (!vgpu->mmio.disable_warn_untrack) {
+ gvt_err("vgpu%d: read untracked MMIO %x(%dB) val %x\n",
+ vgpu->id, offset, bytes, *(u32 *)p_data);
+
+ if (offset == 0x206c) {
+ gvt_err("------------------------------------------\n");
+ gvt_err("vgpu%d: likely triggers a gfx reset\n",
+ vgpu->id);
+ gvt_err("------------------------------------------\n");
+ vgpu->mmio.disable_warn_untrack = true;
+ }
+ }
+ }
+
if (ret)
goto err;
@@ -302,3 +303,56 @@ err:
mutex_unlock(&gvt->lock);
return ret;
}
+
+
+/**
+ * intel_vgpu_reset_mmio - reset virtual MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ const struct intel_gvt_device_info *info = &gvt->device_info;
+
+ memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
+ memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
+
+ vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
+
+ /* set the bit 0:2(Core C-State ) to C0 */
+ vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
+}
+
+/**
+ * intel_vgpu_init_mmio - init MMIO space
+ * @vgpu: a vGPU
+ *
+ * Returns:
+ * Zero on success, negative error code if failed
+ */
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
+{
+ const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
+
+ vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
+ if (!vgpu->mmio.vreg)
+ return -ENOMEM;
+
+ vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
+
+ intel_vgpu_reset_mmio(vgpu);
+
+ return 0;
+}
+
+/**
+ * intel_vgpu_clean_mmio - clean MMIO space
+ * @vgpu: a vGPU
+ *
+ */
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
+{
+ vfree(vgpu->mmio.vreg);
+ vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
+}
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 87d5b5e..3bc620f 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -86,6 +86,10 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
*offset; \
})
+int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu);
+void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
+
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
diff --git a/drivers/gpu/drm/i915/gvt/opregion.c b/drivers/gpu/drm/i915/gvt/opregion.c
index 81cd921..d9fb41a 100644
--- a/drivers/gpu/drm/i915/gvt/opregion.c
+++ b/drivers/gpu/drm/i915/gvt/opregion.c
@@ -36,9 +36,9 @@ static int init_vgpu_opregion(struct intel_vgpu *vgpu, u32 gpa)
vgpu->id))
return -EINVAL;
- vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_ATOMIC |
- GFP_DMA32 | __GFP_ZERO,
- INTEL_GVT_OPREGION_PORDER);
+ vgpu_opregion(vgpu)->va = (void *)__get_free_pages(GFP_KERNEL |
+ __GFP_ZERO,
+ get_order(INTEL_GVT_OPREGION_SIZE));
if (!vgpu_opregion(vgpu)->va)
return -ENOMEM;
@@ -97,7 +97,7 @@ void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va,
- INTEL_GVT_OPREGION_PORDER);
+ get_order(INTEL_GVT_OPREGION_SIZE));
vgpu_opregion(vgpu)->va = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h
index 0dfe789..fbd023a 100644
--- a/drivers/gpu/drm/i915/gvt/reg.h
+++ b/drivers/gpu/drm/i915/gvt/reg.h
@@ -50,8 +50,7 @@
#define INTEL_GVT_OPREGION_PARM 0x204
#define INTEL_GVT_OPREGION_PAGES 2
-#define INTEL_GVT_OPREGION_PORDER 1
-#define INTEL_GVT_OPREGION_SIZE (2 * 4096)
+#define INTEL_GVT_OPREGION_SIZE (INTEL_GVT_OPREGION_PAGES * PAGE_SIZE)
#define VGT_SPRSTRIDE(pipe) _PIPE(pipe, _SPRA_STRIDE, _PLANE_STRIDE_2_B)
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 4db2422..e91885d 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -350,13 +350,15 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload;
+ struct intel_vgpu *vgpu;
int event;
mutex_lock(&gvt->lock);
workload = scheduler->current_workload[ring_id];
+ vgpu = workload->vgpu;
- if (!workload->status && !workload->vgpu->resetting) {
+ if (!workload->status && !vgpu->resetting) {
wait_event(workload->shadow_ctx_status_wq,
!atomic_read(&workload->shadow_ctx_active));
@@ -364,8 +366,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
- intel_vgpu_trigger_virtual_event(workload->vgpu,
- event);
+ intel_vgpu_trigger_virtual_event(vgpu, event);
}
gvt_dbg_sched("ring id %d complete workload %p status %d\n",
@@ -373,11 +374,10 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
scheduler->current_workload[ring_id] = NULL;
- atomic_dec(&workload->vgpu->running_workload_num);
-
list_del_init(&workload->list);
workload->complete(workload);
+ atomic_dec(&vgpu->running_workload_num);
wake_up(&scheduler->workload_complete_wq);
mutex_unlock(&gvt->lock);
}
@@ -459,11 +459,11 @@ complete:
gvt_dbg_sched("will complete workload %p\n, status: %d\n",
workload, workload->status);
- complete_current_workload(gvt, ring_id);
-
if (workload->req)
i915_gem_request_put(fetch_and_zero(&workload->req));
+ complete_current_workload(gvt, ring_id);
+
if (need_force_wake)
intel_uncore_forcewake_put(gvt->dev_priv,
FORCEWAKE_ALL);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 3b30c28..2833dfa 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
- void *bb_start_cmd_va;
+ u32 *bb_start_cmd_va;
};
#define workload_q_head(vgpu, ring_id) \
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 536d2b9..7295bc8 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -35,79 +35,6 @@
#include "gvt.h"
#include "i915_pvinfo.h"
-static void clean_vgpu_mmio(struct intel_vgpu *vgpu)
-{
- vfree(vgpu->mmio.vreg);
- vgpu->mmio.vreg = vgpu->mmio.sreg = NULL;
-}
-
-int setup_vgpu_mmio(struct intel_vgpu *vgpu)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
-
- if (vgpu->mmio.vreg)
- memset(vgpu->mmio.vreg, 0, info->mmio_size * 2);
- else {
- vgpu->mmio.vreg = vzalloc(info->mmio_size * 2);
- if (!vgpu->mmio.vreg)
- return -ENOMEM;
- }
-
- vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
-
- memcpy(vgpu->mmio.vreg, gvt->firmware.mmio, info->mmio_size);
- memcpy(vgpu->mmio.sreg, gvt->firmware.mmio, info->mmio_size);
-
- vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
-
- /* set the bit 0:2(Core C-State ) to C0 */
- vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0;
- return 0;
-}
-
-static void setup_vgpu_cfg_space(struct intel_vgpu *vgpu,
- struct intel_vgpu_creation_params *param)
-{
- struct intel_gvt *gvt = vgpu->gvt;
- const struct intel_gvt_device_info *info = &gvt->device_info;
- u16 *gmch_ctl;
- int i;
-
- memcpy(vgpu_cfg_space(vgpu), gvt->firmware.cfg_space,
- info->cfg_space_size);
-
- if (!param->primary) {
- vgpu_cfg_space(vgpu)[PCI_CLASS_DEVICE] =
- INTEL_GVT_PCI_CLASS_VGA_OTHER;
- vgpu_cfg_space(vgpu)[PCI_CLASS_PROG] =
- INTEL_GVT_PCI_CLASS_VGA_OTHER;
- }
-
- /* Show guest that there isn't any stolen memory.*/
- gmch_ctl = (u16 *)(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_GMCH_CONTROL);
- *gmch_ctl &= ~(BDW_GMCH_GMS_MASK << BDW_GMCH_GMS_SHIFT);
-
- intel_vgpu_write_pci_bar(vgpu, PCI_BASE_ADDRESS_2,
- gvt_aperture_pa_base(gvt), true);
-
- vgpu_cfg_space(vgpu)[PCI_COMMAND] &= ~(PCI_COMMAND_IO
- | PCI_COMMAND_MEMORY
- | PCI_COMMAND_MASTER);
- /*
- * Clear the bar upper 32bit and let guest to assign the new value
- */
- memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_1, 0, 4);
- memset(vgpu_cfg_space(vgpu) + PCI_BASE_ADDRESS_3, 0, 4);
- memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
-
- for (i = 0; i < INTEL_GVT_MAX_BAR_NUM; i++) {
- vgpu->cfg_space.bar[i].size = pci_resource_len(
- gvt->dev_priv->drm.pdev, i * 2);
- vgpu->cfg_space.bar[i].tracked = false;
- }
-}
-
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
/* setup the ballooning information */
@@ -177,7 +104,7 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
if (low_avail / min_low == 0)
break;
gvt->types[i].low_gm_size = min_low;
- gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
+ gvt->types[i].high_gm_size = max((min_low<<3), MB_TO_BYTES(384U));
gvt->types[i].fence = 4;
gvt->types[i].max_instance = low_avail / min_low;
gvt->types[i].avail_instance = gvt->types[i].max_instance;
@@ -217,7 +144,7 @@ static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
*/
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size;
- high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
+ high_gm_avail = MB_TO_BYTES(256) * 8UL - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num;
@@ -268,7 +195,7 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
intel_vgpu_clean_gtt(vgpu);
intel_gvt_hypervisor_detach_vgpu(vgpu);
intel_vgpu_free_resource(vgpu);
- clean_vgpu_mmio(vgpu);
+ intel_vgpu_clean_mmio(vgpu);
vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
@@ -300,11 +227,11 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
bitmap_zero(vgpu->tlb_handle_pending, I915_NUM_ENGINES);
- setup_vgpu_cfg_space(vgpu, param);
+ intel_vgpu_init_cfg_space(vgpu, param->primary);
- ret = setup_vgpu_mmio(vgpu);
+ ret = intel_vgpu_init_mmio(vgpu);
if (ret)
- goto out_free_vgpu;
+ goto out_clean_idr;
ret = intel_vgpu_alloc_resource(vgpu, param);
if (ret)
@@ -354,7 +281,9 @@ out_detach_hypervisor_vgpu:
out_clean_vgpu_resource:
intel_vgpu_free_resource(vgpu);
out_clean_vgpu_mmio:
- clean_vgpu_mmio(vgpu);
+ intel_vgpu_clean_mmio(vgpu);
+out_clean_idr:
+ idr_remove(&gvt->vgpu_idr, vgpu->id);
out_free_vgpu:
vfree(vgpu);
mutex_unlock(&gvt->lock);
@@ -398,7 +327,75 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
}
/**
- * intel_gvt_reset_vgpu - reset a virtual GPU
+ * intel_gvt_reset_vgpu_locked - reset a virtual GPU by DMLR or GT reset
+ * @vgpu: virtual GPU
+ * @dmlr: vGPU Device Model Level Reset or GT Reset
+ * @engine_mask: engines to reset for GT reset
+ *
+ * This function is called when user wants to reset a virtual GPU through
+ * device model reset or GT reset. The caller should hold the gvt lock.
+ *
+ * vGPU Device Model Level Reset (DMLR) simulates the PCI level reset to reset
+ * the whole vGPU to default state as when it is created. This vGPU function
+ * is required both for functionary and security concerns.The ultimate goal
+ * of vGPU FLR is that reuse a vGPU instance by virtual machines. When we
+ * assign a vGPU to a virtual machine we must isse such reset first.
+ *
+ * Full GT Reset and Per-Engine GT Reset are soft reset flow for GPU engines
+ * (Render, Blitter, Video, Video Enhancement). It is defined by GPU Spec.
+ * Unlike the FLR, GT reset only reset particular resource of a vGPU per
+ * the reset request. Guest driver can issue a GT reset by programming the
+ * virtual GDRST register to reset specific virtual GPU engine or all
+ * engines.
+ *
+ * The parameter dev_level is to identify if we will do DMLR or GT reset.
+ * The parameter engine_mask is to specific the engines that need to be
+ * resetted. If value ALL_ENGINES is given for engine_mask, it means
+ * the caller requests a full GT reset that we will reset all virtual
+ * GPU engines. For FLR, engine_mask is ignored.
+ */
+void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
+ unsigned int engine_mask)
+{
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
+
+ gvt_dbg_core("------------------------------------------\n");
+ gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
+ vgpu->id, dmlr, engine_mask);
+ vgpu->resetting = true;
+
+ intel_vgpu_stop_schedule(vgpu);
+ /*
+ * The current_vgpu will set to NULL after stopping the
+ * scheduler when the reset is triggered by current vgpu.
+ */
+ if (scheduler->current_vgpu == NULL) {
+ mutex_unlock(&gvt->lock);
+ intel_gvt_wait_vgpu_idle(vgpu);
+ mutex_lock(&gvt->lock);
+ }
+
+ intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask);
+
+ /* full GPU reset or device model level reset */
+ if (engine_mask == ALL_ENGINES || dmlr) {
+ intel_vgpu_reset_gtt(vgpu, dmlr);
+ intel_vgpu_reset_resource(vgpu);
+ intel_vgpu_reset_mmio(vgpu);
+ populate_pvinfo_page(vgpu);
+
+ if (dmlr)
+ intel_vgpu_reset_cfg_space(vgpu);
+ }
+
+ vgpu->resetting = false;
+ gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
+ gvt_dbg_core("------------------------------------------\n");
+}
+
+/**
+ * intel_gvt_reset_vgpu - reset a virtual GPU (Function Level)
* @vgpu: virtual GPU
*
* This function is called when user wants to reset a virtual GPU.
@@ -406,4 +403,7 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
+ mutex_lock(&vgpu->gvt->lock);
+ intel_gvt_reset_vgpu_locked(vgpu, true, 0);
+ mutex_unlock(&vgpu->gvt->lock);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 445fec9c..b2c4a0b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(dev_priv);
- if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
DRM_DEBUG_KMS("Device suspended\n");
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 243224a..69bc3b0c4 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1977,6 +1977,11 @@ struct drm_i915_private {
struct i915_frontbuffer_tracking fb_tracking;
+ struct intel_atomic_helper {
+ struct llist_head free_list;
+ struct work_struct free_work;
+ } atomic_helper;
+
u16 orig_clock;
bool mchbar_need_disable;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3dd7fc6..4b23a78 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -595,47 +595,21 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
- int ret;
/* We manually control the domain here and pretend that it
* remains coherent i.e. in the GTT domain, like shmem_pwrite.
*/
- lockdep_assert_held(&obj->base.dev->struct_mutex);
- ret = i915_gem_object_wait(obj,
- I915_WAIT_INTERRUPTIBLE |
- I915_WAIT_LOCKED |
- I915_WAIT_ALL,
- MAX_SCHEDULE_TIMEOUT,
- to_rps_client(file));
- if (ret)
- return ret;
-
intel_fb_obj_invalidate(obj, ORIGIN_CPU);
- if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
- unsigned long unwritten;
-
- /* The physical object once assigned is fixed for the lifetime
- * of the obj, so we can safely drop the lock and continue
- * to access vaddr.
- */
- mutex_unlock(&dev->struct_mutex);
- unwritten = copy_from_user(vaddr, user_data, args->size);
- mutex_lock(&dev->struct_mutex);
- if (unwritten) {
- ret = -EFAULT;
- goto out;
- }
- }
+ if (copy_from_user(vaddr, user_data, args->size))
+ return -EFAULT;
drm_clflush_virt_range(vaddr, args->size);
- i915_gem_chipset_flush(to_i915(dev));
+ i915_gem_chipset_flush(to_i915(obj->base.dev));
-out:
intel_fb_obj_flush(obj, false, ORIGIN_CPU);
- return ret;
+ return 0;
}
void *i915_gem_object_alloc(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index bd08814..d534a31 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -199,6 +199,7 @@ found:
}
/* Unbinding will emit any required flushes */
+ ret = 0;
while (!list_empty(&eviction_list)) {
vma = list_first_entry(&eviction_list,
struct i915_vma,
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index a792dcb..e924a95 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret;
}
+ trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 86ecec5..588470e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
+ bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
- return true;
+ ret = true;
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
-
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
- return false;
+ return ret;
}
static enum drm_connector_status
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 3dc8724..77f7b1d 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets.
*/
- _intel_adjust_tile_offset(&x, &y, tile_size,
- tile_width, tile_height, pitch_tiles,
+ _intel_adjust_tile_offset(&x, &y,
+ tile_width, tile_height,
+ tile_size, pitch_tiles,
gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
@@ -2967,6 +2968,9 @@ int skl_check_plane_surface(struct intel_plane_state *plane_state)
unsigned int rotation = plane_state->base.rotation;
int ret;
+ if (!plane_state->base.visible)
+ return 0;
+
/* Rotate src coordinates to match rotated GTT view */
if (drm_rotation_90_or_270(rotation))
drm_rect_rotate(&plane_state->base.src,
@@ -6846,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
}
state = drm_atomic_state_alloc(crtc->dev);
+ if (!state) {
+ DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.id, crtc->name);
+ return;
+ }
+
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
/* Everything's already locked, -EDEADLK can't happen. */
@@ -11243,6 +11253,7 @@ found:
}
old->restore_state = restore_state;
+ drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -14512,8 +14523,14 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break;
case FENCE_FREE:
- drm_atomic_state_put(&state->base);
- break;
+ {
+ struct intel_atomic_helper *helper =
+ &to_i915(state->base.dev)->atomic_helper;
+
+ if (llist_add(&state->freed, &helper->free_list))
+ schedule_work(&helper->free_work);
+ break;
+ }
}
return NOTIFY_DONE;
@@ -16392,6 +16409,18 @@ fail:
drm_modeset_acquire_fini(&ctx);
}
+static void intel_atomic_helper_free_state(struct work_struct *work)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(work, typeof(*dev_priv), atomic_helper.free_work);
+ struct intel_atomic_state *state, *next;
+ struct llist_node *freed;
+
+ freed = llist_del_all(&dev_priv->atomic_helper.free_list);
+ llist_for_each_entry_safe(state, next, freed, freed)
+ drm_atomic_state_put(&state->base);
+}
+
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
@@ -16411,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
+ INIT_WORK(&dev_priv->atomic_helper.free_work,
+ intel_atomic_helper_free_state);
+
intel_init_quirks(dev);
intel_init_pm(dev_priv);
@@ -17024,7 +17056,8 @@ void intel_display_resume(struct drm_device *dev)
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
- drm_atomic_state_put(state);
+ if (state)
+ drm_atomic_state_put(state);
}
void intel_modeset_gem_init(struct drm_device *dev)
@@ -17094,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ flush_work(&dev_priv->atomic_helper.free_work);
+ WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
+
intel_disable_gt_powersave(dev_priv);
/*
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index cd132c2..cd72ae1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -370,6 +370,8 @@ struct intel_atomic_state {
struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready;
+
+ struct llist_node freed;
};
struct intel_plane_state {
diff --git a/drivers/gpu/drm/i915/intel_fbdev.c b/drivers/gpu/drm/i915/intel_fbdev.c
index beb0898..8cf2d80 100644
--- a/drivers/gpu/drm/i915/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/intel_fbdev.c
@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
+ if (!ifbdev)
+ return;
+
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
}
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index d4961fa..beabc17 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -979,18 +979,8 @@ static inline int gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine,
uint32_t *batch,
uint32_t index)
{
- struct drm_i915_private *dev_priv = engine->i915;
uint32_t l3sqc4_flush = (0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES);
- /*
- * WaDisableLSQCROPERFforOCL:kbl
- * This WA is implemented in skl_init_clock_gating() but since
- * this batch updates GEN8_L3SQCREG4 with default value we need to
- * set this bit here to retain the WA during flush.
- */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
- l3sqc4_flush |= GEN8_LQSC_RO_PERF_DIS;
-
wa_ctx_emit(batch, index, (MI_STORE_REGISTER_MEM_GEN8 |
MI_SRM_LRM_GLOBAL_GTT));
wa_ctx_emit_reg(batch, index, GEN8_L3SQCREG4);
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index aeb637d..91cb4c4 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -1095,14 +1095,6 @@ static int kbl_init_workarounds(struct intel_engine_cs *engine)
WA_SET_BIT_MASKED(HDC_CHICKEN0,
HDC_FENCE_DEST_SLM_DISABLE);
- /* GEN8_L3SQCREG4 has a dependency with WA batch so any new changes
- * involving this register should also be added to WA batch as required.
- */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_E0))
- /* WaDisableLSQCROPERFforOCL:kbl */
- I915_WRITE(GEN8_L3SQCREG4, I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_RO_PERF_DIS);
-
/* WaToEnableHwFixForPushConstHWBug:kbl */
if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
diff --git a/drivers/gpu/drm/meson/meson_plane.c b/drivers/gpu/drm/meson/meson_plane.c
index 4942ca0..7890e30 100644
--- a/drivers/gpu/drm/meson/meson_plane.c
+++ b/drivers/gpu/drm/meson/meson_plane.c
@@ -51,6 +51,9 @@ static int meson_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc_state *crtc_state;
struct drm_rect clip = { 0, };
+ if (!state->crtc)
+ return 0;
+
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
if (IS_ERR(crtc_state))
return PTR_ERR(crtc_state);
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index d836b22..f7c87017 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -38,6 +38,11 @@
* - TV Panel encoding via ENCT
*/
+/* HHI Registers */
+#define HHI_VDAC_CNTL0 0x2F4 /* 0xbd offset in data sheet */
+#define HHI_VDAC_CNTL1 0x2F8 /* 0xbe offset in data sheet */
+#define HHI_HDMI_PHY_CNTL0 0x3a0 /* 0xe8 offset in data sheet */
+
struct meson_cvbs_enci_mode meson_cvbs_enci_pal = {
.mode_tag = MESON_VENC_MODE_CVBS_PAL,
.hso_begin = 3,
@@ -242,6 +247,20 @@ void meson_venc_disable_vsync(struct meson_drm *priv)
void meson_venc_init(struct meson_drm *priv)
{
+ /* Disable CVBS VDAC */
+ regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
+
+ /* Power Down Dacs */
+ writel_relaxed(0xff, priv->io_base + _REG(VENC_VDAC_SETTING));
+
+ /* Disable HDMI PHY */
+ regmap_write(priv->hhi, HHI_HDMI_PHY_CNTL0, 0);
+
+ /* Disable HDMI */
+ writel_bits_relaxed(0x3, 0,
+ priv->io_base + _REG(VPU_HDMI_SETTING));
+
/* Disable all encoders */
writel_relaxed(0, priv->io_base + _REG(ENCI_VIDEO_EN));
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index c809c08..a2bcc70 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -167,7 +167,7 @@ static void meson_venc_cvbs_encoder_disable(struct drm_encoder *encoder)
/* Disable CVBS VDAC */
regmap_write(priv->hhi, HHI_VDAC_CNTL0, 0);
- regmap_write(priv->hhi, HHI_VDAC_CNTL1, 0);
+ regmap_write(priv->hhi, HHI_VDAC_CNTL1, 8);
}
static void meson_venc_cvbs_encoder_enable(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a181261..686a580 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -213,7 +213,14 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
void adreno_flush(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr;
+
+ /*
+ * Mask wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -338,7 +345,6 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@@ -378,8 +384,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->aspace->mmu;
- if (mmu) {
+ if (gpu->aspace && gpu->aspace->mmu) {
+ struct msm_mmu *mmu = gpu->aspace->mmu;
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 5f6cd87..c396d45 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -119,13 +119,7 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
{
- int i;
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct drm_plane *plane;
- struct drm_plane_state *plane_state;
-
- for_each_plane_in_state(state, plane, plane_state, i)
- mdp5_plane_complete_commit(plane, plane_state);
if (mdp5_kms->smp)
mdp5_smp_complete_commit(mdp5_kms->smp, &mdp5_kms->state->smp);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 17b0cc1..cdfc63d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -104,8 +104,6 @@ struct mdp5_plane_state {
/* assigned by crtc blender */
enum mdp_mixer_stage_id stage;
-
- bool pending : 1;
};
#define to_mdp5_plane_state(x) \
container_of(x, struct mdp5_plane_state, base)
@@ -232,8 +230,6 @@ int mdp5_irq_domain_init(struct mdp5_kms *mdp5_kms);
void mdp5_irq_domain_fini(struct mdp5_kms *mdp5_kms);
uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
-void mdp5_plane_complete_commit(struct drm_plane *plane,
- struct drm_plane_state *state);
enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index c099da7..25d9d0a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -179,7 +179,6 @@ mdp5_plane_atomic_print_state(struct drm_printer *p,
drm_printf(p, "\tzpos=%u\n", pstate->zpos);
drm_printf(p, "\talpha=%u\n", pstate->alpha);
drm_printf(p, "\tstage=%s\n", stage2name(pstate->stage));
- drm_printf(p, "\tpending=%u\n", pstate->pending);
}
static void mdp5_plane_reset(struct drm_plane *plane)
@@ -220,8 +219,6 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
if (mdp5_state && mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
- mdp5_state->pending = false;
-
return &mdp5_state->base;
}
@@ -288,13 +285,6 @@ static int mdp5_plane_atomic_check(struct drm_plane *plane,
DBG("%s: check (%d -> %d)", plane->name,
plane_enabled(old_state), plane_enabled(state));
- /* We don't allow faster-than-vblank updates.. if we did add this
- * some day, we would need to disallow in cases where hwpipe
- * changes
- */
- if (WARN_ON(to_mdp5_plane_state(old_state)->pending))
- return -EBUSY;
-
max_width = config->hw->lm.max_width << 16;
max_height = config->hw->lm.max_height << 16;
@@ -370,12 +360,9 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = plane->state;
- struct mdp5_plane_state *mdp5_state = to_mdp5_plane_state(state);
DBG("%s: update", plane->name);
- mdp5_state->pending = true;
-
if (plane_enabled(state)) {
int ret;
@@ -851,15 +838,6 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
return pstate->hwpipe->flush_mask;
}
-/* called after vsync in thread context */
-void mdp5_plane_complete_commit(struct drm_plane *plane,
- struct drm_plane_state *state)
-{
- struct mdp5_plane_state *pstate = to_mdp5_plane_state(plane->state);
-
- pstate->pending = false;
-}
-
/* initialize plane */
struct drm_plane *mdp5_plane_init(struct drm_device *dev, bool primary)
{
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index d8bc59c..8098677 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -294,6 +294,8 @@ put_iova(struct drm_gem_object *obj)
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
+ if (!priv->aspace[id])
+ continue;
msm_gem_unmap_vma(priv->aspace[id],
&msm_obj->domain[id], msm_obj->sgt);
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 166e84e..4896765 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -106,7 +106,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -290,7 +291,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
- int ret;
+ int ret = 0;
if (offset % 4) {
DRM_ERROR("non-aligned cmdstream buffer: %u\n", offset);
@@ -318,12 +319,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
if (ret)
- return -EFAULT;
+ goto out;
if (submit_reloc.submit_offset % 4) {
DRM_ERROR("non-aligned reloc offset: %u\n",
submit_reloc.submit_offset);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
/* offset in dwords: */
@@ -332,12 +334,13 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
if ((off >= (obj->base.size / 4)) ||
(off < last_offset)) {
DRM_ERROR("invalid offset %u at reloc %u\n", off, i);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
if (ret)
- return ret;
+ goto out;
if (valid)
continue;
@@ -354,9 +357,10 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
last_offset = off;
}
+out:
msm_gem_put_vaddr_locked(&obj->base);
- return 0;
+ return ret;
}
static void submit_cleanup(struct msm_gem_submit *submit)
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index f326cf6..67b34e0 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -23,7 +23,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
struct msm_ringbuffer *ring;
int ret;
- size = ALIGN(size, 4); /* size should be dword aligned */
+ if (WARN_ON(!is_power_of_2(size)))
+ return ERR_PTR(-EINVAL);
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index cef08da..6a15776 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -411,7 +411,8 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable polling for external displays */
- drm_kms_helper_poll_enable(dev);
+ if (!dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 59348fc..bc85a45 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
- drm_kms_helper_poll_enable(drm_dev);
+
+ if (!drm_dev->mode_config.poll_enabled)
+ drm_kms_helper_poll_enable(drm_dev);
+
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 8d5ed5b..42c1fa5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -165,6 +165,8 @@ struct nouveau_drm {
struct backlight_device *backlight;
struct list_head bl_connectors;
struct work_struct hpd_work;
+ struct work_struct fbcon_work;
+ int fbcon_new_state;
#ifdef CONFIG_ACPI
struct notifier_block acpi_nb;
#endif
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 2f2a3dc..fa2d0a9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -470,19 +470,43 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
+static void
+nouveau_fbcon_set_suspend_work(struct work_struct *work)
+{
+ struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
+ int state = READ_ONCE(drm->fbcon_new_state);
+
+ if (state == FBINFO_STATE_RUNNING)
+ pm_runtime_get_sync(drm->dev->dev);
+
+ console_lock();
+ if (state == FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_restore(drm->dev);
+ drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
+ if (state != FBINFO_STATE_RUNNING)
+ nouveau_fbcon_accel_save_disable(drm->dev);
+ console_unlock();
+
+ if (state == FBINFO_STATE_RUNNING) {
+ pm_runtime_mark_last_busy(drm->dev->dev);
+ pm_runtime_put_sync(drm->dev->dev);
+ }
+}
+
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
- if (drm->fbcon) {
- console_lock();
- if (state == FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_restore(dev);
- drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
- if (state != FBINFO_STATE_RUNNING)
- nouveau_fbcon_accel_save_disable(dev);
- console_unlock();
- }
+
+ if (!drm->fbcon)
+ return;
+
+ drm->fbcon_new_state = state;
+ /* Since runtime resume can happen as a result of a sysfs operation,
+ * it's possible we already have the console locked. So handle fbcon
+ * init/deinit from a seperate work thread
+ */
+ schedule_work(&drm->fbcon_work);
}
int
@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
return -ENOMEM;
drm->fbcon = fbcon;
+ INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 00ea000..e0c143b 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -366,11 +366,10 @@ static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
/* if we are running in a VM, make sure the device
- * torn down properly on reboot/shutdown.
- * unfortunately we can't detect certain
- * hypervisors so just do this all the time.
+ * torn down properly on reboot/shutdown
*/
- radeon_pci_remove(pdev);
+ if (radeon_device_is_virtual())
+ radeon_pci_remove(pdev);
}
static int radeon_pmops_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index ad4d7b8..4147768 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -50,7 +50,6 @@ MODULE_FIRMWARE("radeon/tahiti_ce.bin");
MODULE_FIRMWARE("radeon/tahiti_mc.bin");
MODULE_FIRMWARE("radeon/tahiti_rlc.bin");
MODULE_FIRMWARE("radeon/tahiti_smc.bin");
-MODULE_FIRMWARE("radeon/tahiti_k_smc.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
@@ -115,6 +114,9 @@ MODULE_FIRMWARE("radeon/hainan_mc.bin");
MODULE_FIRMWARE("radeon/hainan_rlc.bin");
MODULE_FIRMWARE("radeon/hainan_smc.bin");
MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
+MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
+
+MODULE_FIRMWARE("radeon/si58_mc.bin");
static u32 si_get_cu_active_bitmap(struct radeon_device *rdev, u32 se, u32 sh);
static void si_pcie_gen3_enable(struct radeon_device *rdev);
@@ -1651,15 +1653,14 @@ static int si_init_microcode(struct radeon_device *rdev)
int err;
int new_fw = 0;
bool new_smc = false;
+ bool si58_fw = false;
+ bool banks2_fw = false;
DRM_DEBUG("\n");
switch (rdev->family) {
case CHIP_TAHITI:
chip_name = "TAHITI";
- /* XXX: figure out which Tahitis need the new ucode */
- if (0)
- new_smc = true;
new_chip_name = "tahiti";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1671,12 +1672,9 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_PITCAIRN:
chip_name = "PITCAIRN";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->device == 0x6810) ||
- (rdev->pdev->device == 0x6811) ||
- (rdev->pdev->device == 0x6816) ||
- (rdev->pdev->device == 0x6817) ||
- (rdev->pdev->device == 0x6806))
+ if ((rdev->pdev->revision == 0x81) &&
+ ((rdev->pdev->device == 0x6810) ||
+ (rdev->pdev->device == 0x6811)))
new_smc = true;
new_chip_name = "pitcairn";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1689,15 +1687,15 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_VERDE:
chip_name = "VERDE";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6820) ||
- (rdev->pdev->device == 0x6821) ||
- (rdev->pdev->device == 0x6822) ||
- (rdev->pdev->device == 0x6823) ||
- (rdev->pdev->device == 0x682A) ||
- (rdev->pdev->device == 0x682B))
+ if (((rdev->pdev->device == 0x6820) &&
+ ((rdev->pdev->revision == 0x81) ||
+ (rdev->pdev->revision == 0x83))) ||
+ ((rdev->pdev->device == 0x6821) &&
+ ((rdev->pdev->revision == 0x83) ||
+ (rdev->pdev->revision == 0x87))) ||
+ ((rdev->pdev->revision == 0x87) &&
+ ((rdev->pdev->device == 0x6823) ||
+ (rdev->pdev->device == 0x682b))))
new_smc = true;
new_chip_name = "verde";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1710,13 +1708,13 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_OLAND:
chip_name = "OLAND";
- if ((rdev->pdev->revision == 0xC7) ||
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605))
+ if (((rdev->pdev->revision == 0x81) &&
+ ((rdev->pdev->device == 0x6600) ||
+ (rdev->pdev->device == 0x6604) ||
+ (rdev->pdev->device == 0x6605) ||
+ (rdev->pdev->device == 0x6610))) ||
+ ((rdev->pdev->revision == 0x83) &&
+ (rdev->pdev->device == 0x6610)))
new_smc = true;
new_chip_name = "oland";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
@@ -1728,13 +1726,17 @@ static int si_init_microcode(struct radeon_device *rdev)
break;
case CHIP_HAINAN:
chip_name = "HAINAN";
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0xC3) ||
- (rdev->pdev->device == 0x6664) ||
- (rdev->pdev->device == 0x6665) ||
- (rdev->pdev->device == 0x6667))
+ if (((rdev->pdev->revision == 0x81) &&
+ (rdev->pdev->device == 0x6660)) ||
+ ((rdev->pdev->revision == 0x83) &&
+ ((rdev->pdev->device == 0x6660) ||
+ (rdev->pdev->device == 0x6663) ||
+ (rdev->pdev->device == 0x6665) ||
+ (rdev->pdev->device == 0x6667))))
new_smc = true;
+ else if ((rdev->pdev->revision == 0xc3) &&
+ (rdev->pdev->device == 0x6665))
+ banks2_fw = true;
new_chip_name = "hainan";
pfp_req_size = SI_PFP_UCODE_SIZE * 4;
me_req_size = SI_PM4_UCODE_SIZE * 4;
@@ -1746,6 +1748,10 @@ static int si_init_microcode(struct radeon_device *rdev)
default: BUG();
}
+ /* this memory configuration requires special firmware */
+ if (((RREG32(MC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58)
+ si58_fw = true;
+
DRM_INFO("Loading %s Microcode\n", new_chip_name);
snprintf(fw_name, sizeof(fw_name), "radeon/%s_pfp.bin", new_chip_name);
@@ -1849,7 +1855,10 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
+ if (si58_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/si58_mc.bin");
+ else
+ snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc.bin", new_chip_name);
err = request_firmware(&rdev->mc_fw, fw_name, rdev->dev);
if (err) {
snprintf(fw_name, sizeof(fw_name), "radeon/%s_mc2.bin", chip_name);
@@ -1880,7 +1889,9 @@ static int si_init_microcode(struct radeon_device *rdev)
}
}
- if (new_smc)
+ if (banks2_fw)
+ snprintf(fw_name, sizeof(fw_name), "radeon/banks_k_2_smc.bin");
+ else if (new_smc)
snprintf(fw_name, sizeof(fw_name), "radeon/%s_k_smc.bin", new_chip_name);
else
snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", new_chip_name);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8b5e697..2944916 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -3008,30 +3008,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6817) ||
(rdev->pdev->device == 0x6806))
max_mclk = 120000;
- } else if (rdev->family == CHIP_VERDE) {
- if ((rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6820) ||
- (rdev->pdev->device == 0x6821) ||
- (rdev->pdev->device == 0x6822) ||
- (rdev->pdev->device == 0x6823) ||
- (rdev->pdev->device == 0x682A) ||
- (rdev->pdev->device == 0x682B)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
- } else if (rdev->family == CHIP_OLAND) {
- if ((rdev->pdev->revision == 0xC7) ||
- (rdev->pdev->revision == 0x80) ||
- (rdev->pdev->revision == 0x81) ||
- (rdev->pdev->revision == 0x83) ||
- (rdev->pdev->revision == 0x87) ||
- (rdev->pdev->device == 0x6604) ||
- (rdev->pdev->device == 0x6605)) {
- max_sclk = 75000;
- max_mclk = 80000;
- }
} else if (rdev->family == CHIP_HAINAN) {
if ((rdev->pdev->revision == 0x81) ||
(rdev->pdev->revision == 0x83) ||
@@ -3040,7 +3016,6 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
(rdev->pdev->device == 0x6665) ||
(rdev->pdev->device == 0x6667)) {
max_sclk = 75000;
- max_mclk = 80000;
}
}
/* Apply dpm quirks */
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
index 725dffa..6dfdb14 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_crtc.c
@@ -856,7 +856,7 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
struct tilcdc_crtc *tilcdc_crtc = to_tilcdc_crtc(crtc);
struct drm_device *dev = crtc->dev;
struct tilcdc_drm_private *priv = dev->dev_private;
- uint32_t stat;
+ uint32_t stat, reg;
stat = tilcdc_read_irqstatus(dev);
tilcdc_clear_irqstatus(dev, stat);
@@ -921,17 +921,26 @@ irqreturn_t tilcdc_crtc_irq(struct drm_crtc *crtc)
dev_err_ratelimited(dev->dev, "%s(0x%08x): Sync lost",
__func__, stat);
tilcdc_crtc->frame_intact = false;
- if (tilcdc_crtc->sync_lost_count++ >
- SYNC_LOST_COUNT_LIMIT) {
- dev_err(dev->dev, "%s(0x%08x): Sync lost flood detected, recovering", __func__, stat);
- queue_work(system_wq, &tilcdc_crtc->recover_work);
- if (priv->rev == 1)
+ if (priv->rev == 1) {
+ reg = tilcdc_read(dev, LCDC_RASTER_CTRL_REG);
+ if (reg & LCDC_RASTER_ENABLE) {
tilcdc_clear(dev, LCDC_RASTER_CTRL_REG,
- LCDC_V1_SYNC_LOST_INT_ENA);
- else
+ LCDC_RASTER_ENABLE);
+ tilcdc_set(dev, LCDC_RASTER_CTRL_REG,
+ LCDC_RASTER_ENABLE);
+ }
+ } else {
+ if (tilcdc_crtc->sync_lost_count++ >
+ SYNC_LOST_COUNT_LIMIT) {
+ dev_err(dev->dev,
+ "%s(0x%08x): Sync lost flood detected, recovering",
+ __func__, stat);
+ queue_work(system_wq,
+ &tilcdc_crtc->recover_work);
tilcdc_write(dev, LCDC_INT_ENABLE_CLR_REG,
LCDC_SYNC_LOST);
- tilcdc_crtc->sync_lost_count = 0;
+ tilcdc_crtc->sync_lost_count = 0;
+ }
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index a0fd3e6..7aadce1 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
}
- __drm_atomic_helper_crtc_destroy_state(state);
+ drm_atomic_helper_crtc_destroy_state(crtc, state);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index db92077..ab30169 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count);
struct vc4_bo *bo;
- if (uniforms_offset < shader_rec_offset ||
+ if (shader_rec_offset < args->bin_cl_size ||
+ uniforms_offset < shader_rec_offset ||
exec_size < uniforms_offset ||
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
DRM_ERROR("overflow in exec arguments\n");
+ ret = -EINVAL;
goto fail;
}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 08886a3..5cdd003 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
}
ret = vc4_full_res_bounds_check(exec, *obj, surf);
- if (!ret)
+ if (ret)
return ret;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index dd21f95..cde9f37 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -331,7 +331,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
info->fbops = &virtio_gpufb_ops;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
- info->screen_base = obj->vmap;
+ info->screen_buffer = obj->vmap;
info->screen_size = obj->gem_base.size;
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &vfbdev->helper,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index cff060b..ea36b55 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -2496,6 +2496,7 @@ static const struct hid_device_id hid_ignore_list[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0002) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0003) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PANJIT, 0x0004) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_PETZL, USB_DEVICE_ID_PETZL_HEADLAMP) },
{ HID_USB_DEVICE(USB_VENDOR_ID_PHILIPS, USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_POWERCOM, USB_DEVICE_ID_POWERCOM_UPS) },
#if IS_ENABLED(CONFIG_MOUSE_SYNAPTICS_USB)
diff --git a/drivers/hid/hid-corsair.c b/drivers/hid/hid-corsair.c
index 717704e..c0303f6 100644
--- a/drivers/hid/hid-corsair.c
+++ b/drivers/hid/hid-corsair.c
@@ -148,26 +148,36 @@ static enum led_brightness k90_backlight_get(struct led_classdev *led_cdev)
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int brightness;
- char data[8];
+ char *data;
+
+ data = kmalloc(8, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 5) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
brightness = data[4];
if (brightness < 0 || brightness > 3) {
dev_warn(dev,
"Read invalid backlight brightness: %02hhx.\n",
data[4]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return brightness;
+ ret = brightness;
+out:
+ kfree(data);
+
+ return ret;
}
static enum led_brightness k90_record_led_get(struct led_classdev *led_cdev)
@@ -253,17 +263,22 @@ static ssize_t k90_show_macro_mode(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
const char *macro_mode;
- char data[8];
+ char *data;
+
+ data = kmalloc(2, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_GET_MODE,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 2,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 1) {
dev_warn(dev, "Failed to get K90 initial mode (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
switch (data[0]) {
@@ -277,10 +292,15 @@ static ssize_t k90_show_macro_mode(struct device *dev,
default:
dev_warn(dev, "K90 in unknown mode: %02hhx.\n",
data[0]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+ ret = snprintf(buf, PAGE_SIZE, "%s\n", macro_mode);
+out:
+ kfree(data);
+
+ return ret;
}
static ssize_t k90_store_macro_mode(struct device *dev,
@@ -320,26 +340,36 @@ static ssize_t k90_show_current_profile(struct device *dev,
struct usb_interface *usbif = to_usb_interface(dev->parent);
struct usb_device *usbdev = interface_to_usbdev(usbif);
int current_profile;
- char data[8];
+ char *data;
+
+ data = kmalloc(8, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0),
K90_REQUEST_STATUS,
USB_DIR_IN | USB_TYPE_VENDOR |
USB_RECIP_DEVICE, 0, 0, data, 8,
USB_CTRL_SET_TIMEOUT);
- if (ret < 0) {
+ if (ret < 8) {
dev_warn(dev, "Failed to get K90 initial state (error %d).\n",
ret);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
current_profile = data[7];
if (current_profile < 1 || current_profile > 3) {
dev_warn(dev, "Read invalid current profile: %02hhx.\n",
data[7]);
- return -EIO;
+ ret = -EIO;
+ goto out;
}
- return snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+ ret = snprintf(buf, PAGE_SIZE, "%d\n", current_profile);
+out:
+ kfree(data);
+
+ return ret;
}
static ssize_t k90_store_current_profile(struct device *dev,
diff --git a/drivers/hid/hid-cypress.c b/drivers/hid/hid-cypress.c
index 1b764d1..1689568 100644
--- a/drivers/hid/hid-cypress.c
+++ b/drivers/hid/hid-cypress.c
@@ -39,6 +39,9 @@ static __u8 *cp_report_fixup(struct hid_device *hdev, __u8 *rdesc,
if (!(quirks & CP_RDESC_SWAPPED_MIN_MAX))
return rdesc;
+ if (*rsize < 4)
+ return rdesc;
+
for (i = 0; i < *rsize - 4; i++)
if (rdesc[i] == 0x29 && rdesc[i + 2] == 0x19) {
rdesc[i] = 0x19;
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 54bd22d..f46f2c5 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -816,6 +816,9 @@
#define USB_VENDOR_ID_PETALYNX 0x18b1
#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
+#define USB_VENDOR_ID_PETZL 0x2122
+#define USB_DEVICE_ID_PETZL_HEADLAMP 0x1234
+
#define USB_VENDOR_ID_PHILIPS 0x0471
#define USB_DEVICE_ID_PHILIPS_IEEE802154_DONGLE 0x0617
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index 78fb32a..ea3c354 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -426,6 +426,15 @@ static int i2c_hid_hwreset(struct i2c_client *client)
if (ret)
goto out_unlock;
+ /*
+ * The HID over I2C specification states that if a DEVICE needs time
+ * after the PWR_ON request, it should utilise CLOCK stretching.
+ * However, it has been observered that the Windows driver provides a
+ * 1ms sleep between the PWR_ON and RESET requests and that some devices
+ * rely on this.
+ */
+ usleep_range(1000, 5000);
+
i2c_hid_dbg(ihid, "resetting...\n");
ret = i2c_hid_command(client, &hid_reset_cmd, NULL, 0);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index b9779bc..8aeca03 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -740,6 +740,11 @@ static int wacom_add_shared_data(struct hid_device *hdev)
return retval;
}
+ if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
+ wacom_wac->shared->touch = hdev;
+ else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
+ wacom_wac->shared->pen = hdev;
+
out:
mutex_unlock(&wacom_udev_list_lock);
return retval;
@@ -2036,10 +2041,6 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
if (error)
goto fail;
- error = wacom_add_shared_data(hdev);
- if (error)
- goto fail;
-
/*
* Bamboo Pad has a generic hid handling for the Pen, and we switch it
* into debug mode for the touch part.
@@ -2080,10 +2081,9 @@ static int wacom_parse_and_register(struct wacom *wacom, bool wireless)
wacom_update_name(wacom, wireless ? " (WL)" : "");
- if (wacom_wac->features.device_type & WACOM_DEVICETYPE_TOUCH)
- wacom_wac->shared->touch = hdev;
- else if (wacom_wac->features.device_type & WACOM_DEVICETYPE_PEN)
- wacom_wac->shared->pen = hdev;
+ error = wacom_add_shared_data(hdev);
+ if (error)
+ goto fail;
if (!(features->device_type & WACOM_DEVICETYPE_WL_MONITOR) &&
(features->quirks & WACOM_QUIRK_BATTERY)) {
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index b1a9a3c..0884dc9 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2187,6 +2187,16 @@ void wacom_wac_report(struct hid_device *hdev, struct hid_report *report)
wacom_report_events(hdev, report);
+ /*
+ * Non-input reports may be sent prior to the device being
+ * completely initialized. Since only their events need
+ * to be processed, exit after 'wacom_report_events' has
+ * been called to prevent potential crashes in the report-
+ * processing functions.
+ */
+ if (report->type != HID_INPUT_REPORT)
+ return;
+
if (WACOM_PAD_FIELD(field)) {
wacom_wac_pad_battery_report(hdev, report);
if (wacom->wacom_wac.pad_input)
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index c2268cd..e34d82e 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -585,10 +585,29 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
u8 command, int size, union i2c_smbus_data *data)
{
struct i2c_piix4_adapdata *adapdata = i2c_get_adapdata(adap);
+ unsigned short piix4_smba = adapdata->smba;
+ int retries = MAX_TIMEOUT;
+ int smbslvcnt;
u8 smba_en_lo;
u8 port;
int retval;
+ /* Request the SMBUS semaphore, avoid conflicts with the IMC */
+ smbslvcnt = inb_p(SMBSLVCNT);
+ do {
+ outb_p(smbslvcnt | 0x10, SMBSLVCNT);
+
+ /* Check the semaphore status */
+ smbslvcnt = inb_p(SMBSLVCNT);
+ if (smbslvcnt & 0x10)
+ break;
+
+ usleep_range(1000, 2000);
+ } while (--retries);
+ /* SMBus is still owned by the IMC, we give up */
+ if (!retries)
+ return -EBUSY;
+
mutex_lock(&piix4_mutex_sb800);
outb_p(piix4_port_sel_sb800, SB800_PIIX4_SMB_IDX);
@@ -606,6 +625,9 @@ static s32 piix4_access_sb800(struct i2c_adapter *adap, u16 addr,
mutex_unlock(&piix4_mutex_sb800);
+ /* Release the semaphore */
+ outb_p(smbslvcnt | 0x20, SMBSLVCNT);
+
return retval;
}
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index cf9e396..583e950 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -931,7 +931,10 @@ static int i2c_device_probe(struct device *dev)
if (!client->irq) {
int irq = -ENOENT;
- if (dev->of_node) {
+ if (client->flags & I2C_CLIENT_HOST_NOTIFY) {
+ dev_dbg(dev, "Using Host Notify IRQ\n");
+ irq = i2c_smbus_host_notify_to_irq(client);
+ } else if (dev->of_node) {
irq = of_irq_get_byname(dev->of_node, "irq");
if (irq == -EINVAL || irq == -ENODATA)
irq = of_irq_get(dev->of_node, 0);
@@ -940,14 +943,7 @@ static int i2c_device_probe(struct device *dev)
}
if (irq == -EPROBE_DEFER)
return irq;
- /*
- * ACPI and OF did not find any useful IRQ, try to see
- * if Host Notify can be used.
- */
- if (irq < 0) {
- dev_dbg(dev, "Using Host Notify IRQ\n");
- irq = i2c_smbus_host_notify_to_irq(client);
- }
+
if (irq < 0)
irq = 0;
@@ -1708,7 +1704,7 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
if (i2c_check_addr_validity(addr, info.flags)) {
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
- info.addr, node->full_name);
+ addr, node->full_name);
return ERR_PTR(-EINVAL);
}
@@ -1716,6 +1712,9 @@ static struct i2c_client *of_i2c_register_device(struct i2c_adapter *adap,
info.of_node = of_node_get(node);
info.archdata = &dev_ad;
+ if (of_property_read_bool(node, "host-notify"))
+ info.flags |= I2C_CLIENT_HOST_NOTIFY;
+
if (of_get_property(node, "wakeup-source", NULL))
info.flags |= I2C_CLIENT_WAKE;
@@ -3633,7 +3632,7 @@ int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb)
int ret;
if (!client || !slave_cb) {
- WARN(1, "insufficent data\n");
+ WARN(1, "insufficient data\n");
return -EINVAL;
}
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 66f323f..6f638bb 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -331,7 +331,7 @@ static noinline int i2cdev_ioctl_smbus(struct i2c_client *client,
unsigned long arg)
{
struct i2c_smbus_ioctl_data data_arg;
- union i2c_smbus_data temp;
+ union i2c_smbus_data temp = {};
int datasize, res;
if (copy_from_user(&data_arg,
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index e7dcfac..3e70a9c 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -2811,7 +2811,8 @@ static int cma_bind_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
if (!src_addr || !src_addr->sa_family) {
src_addr = (struct sockaddr *) &id->route.addr.src_addr;
src_addr->sa_family = dst_addr->sa_family;
- if (dst_addr->sa_family == AF_INET6) {
+ if (IS_ENABLED(CONFIG_IPV6) &&
+ dst_addr->sa_family == AF_INET6) {
struct sockaddr_in6 *src_addr6 = (struct sockaddr_in6 *) src_addr;
struct sockaddr_in6 *dst_addr6 = (struct sockaddr_in6 *) dst_addr;
src_addr6->sin6_scope_id = dst_addr6->sin6_scope_id;
diff --git a/drivers/infiniband/core/umem.c b/drivers/infiniband/core/umem.c
index 1e62a5f..4609b92 100644
--- a/drivers/infiniband/core/umem.c
+++ b/drivers/infiniband/core/umem.c
@@ -134,6 +134,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_MW_BIND));
if (access & IB_ACCESS_ON_DEMAND) {
+ put_pid(umem->pid);
ret = ib_umem_odp_get(context, umem);
if (ret) {
kfree(umem);
@@ -149,6 +150,7 @@ struct ib_umem *ib_umem_get(struct ib_ucontext *context, unsigned long addr,
page_list = (struct page **) __get_free_page(GFP_KERNEL);
if (!page_list) {
+ put_pid(umem->pid);
kfree(umem);
return ERR_PTR(-ENOMEM);
}
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index 9d5fe18..6262dc0 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -1135,16 +1135,7 @@ static int iwch_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index f1510cc..9398143 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1804,20 +1804,21 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)
skb_trim(skb, dlen);
mutex_lock(&ep->com.mutex);
- /* update RX credits */
- update_rx_credits(ep, dlen);
-
switch (ep->com.state) {
case MPA_REQ_SENT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_reply(ep, skb);
break;
case MPA_REQ_WAIT:
+ update_rx_credits(ep, dlen);
ep->rcv_seq += dlen;
disconnect = process_mpa_request(ep, skb);
break;
case FPDU_MODE: {
struct c4iw_qp_attributes attrs;
+
+ update_rx_credits(ep, dlen);
BUG_ON(!ep->com.qp);
if (status)
pr_err("%s Unexpected streaming data." \
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index 19c6477..bec82a6 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -505,6 +505,15 @@ static int poll_cq(struct t4_wq *wq, struct t4_cq *cq, struct t4_cqe *cqe,
}
/*
+ * Special cqe for drain WR completions...
+ */
+ if (CQE_OPCODE(hw_cqe) == C4IW_DRAIN_OPCODE) {
+ *cookie = CQE_DRAIN_COOKIE(hw_cqe);
+ *cqe = *hw_cqe;
+ goto skip_cqe;
+ }
+
+ /*
* Gotta tweak READ completions:
* 1) the cqe doesn't contain the sq_wptr from the wr.
* 2) opcode not reflected from the wr.
@@ -753,6 +762,9 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
c4iw_invalidate_mr(qhp->rhp,
CQE_WRID_FR_STAG(&cqe));
break;
+ case C4IW_DRAIN_OPCODE:
+ wc->opcode = IB_WC_SEND;
+ break;
default:
printk(KERN_ERR MOD "Unexpected opcode %d "
"in the CQE received for QPID=0x%0x\n",
@@ -817,15 +829,8 @@ static int c4iw_poll_cq_one(struct c4iw_cq *chp, struct ib_wc *wc)
}
}
out:
- if (wq) {
- if (unlikely(qhp->attr.state != C4IW_QP_STATE_RTS)) {
- if (t4_sq_empty(wq))
- complete(&qhp->sq_drained);
- if (t4_rq_empty(wq))
- complete(&qhp->rq_drained);
- }
+ if (wq)
spin_unlock(&qhp->lock);
- }
return ret;
}
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 516b0ae..40c0e7b 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -846,9 +846,17 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
}
}
+ rdev->free_workq = create_singlethread_workqueue("iw_cxgb4_free");
+ if (!rdev->free_workq) {
+ err = -ENOMEM;
+ goto err_free_status_page;
+ }
+
rdev->status_page->db_off = 0;
return 0;
+err_free_status_page:
+ free_page((unsigned long)rdev->status_page);
destroy_ocqp_pool:
c4iw_ocqp_pool_destroy(rdev);
destroy_rqtpool:
@@ -862,6 +870,7 @@ destroy_resource:
static void c4iw_rdev_close(struct c4iw_rdev *rdev)
{
+ destroy_workqueue(rdev->free_workq);
kfree(rdev->wr_log);
free_page((unsigned long)rdev->status_page);
c4iw_pblpool_destroy(rdev);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 4788e1a..8cd4d05 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -45,6 +45,7 @@
#include <linux/kref.h>
#include <linux/timer.h>
#include <linux/io.h>
+#include <linux/workqueue.h>
#include <asm/byteorder.h>
@@ -107,6 +108,7 @@ struct c4iw_dev_ucontext {
struct list_head qpids;
struct list_head cqids;
struct mutex lock;
+ struct kref kref;
};
enum c4iw_rdev_flags {
@@ -183,6 +185,7 @@ struct c4iw_rdev {
atomic_t wr_log_idx;
struct wr_log_entry *wr_log;
int wr_log_size;
+ struct workqueue_struct *free_workq;
};
static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -480,8 +483,8 @@ struct c4iw_qp {
wait_queue_head_t wait;
struct timer_list timer;
int sq_sig_all;
- struct completion rq_drained;
- struct completion sq_drained;
+ struct work_struct free_work;
+ struct c4iw_ucontext *ucontext;
};
static inline struct c4iw_qp *to_c4iw_qp(struct ib_qp *ibqp)
@@ -495,6 +498,7 @@ struct c4iw_ucontext {
u32 key;
spinlock_t mmap_lock;
struct list_head mmaps;
+ struct kref kref;
};
static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
@@ -502,6 +506,18 @@ static inline struct c4iw_ucontext *to_c4iw_ucontext(struct ib_ucontext *c)
return container_of(c, struct c4iw_ucontext, ibucontext);
}
+void _c4iw_free_ucontext(struct kref *kref);
+
+static inline void c4iw_put_ucontext(struct c4iw_ucontext *ucontext)
+{
+ kref_put(&ucontext->kref, _c4iw_free_ucontext);
+}
+
+static inline void c4iw_get_ucontext(struct c4iw_ucontext *ucontext)
+{
+ kref_get(&ucontext->kref);
+}
+
struct c4iw_mm_entry {
struct list_head entry;
u64 addr;
@@ -615,6 +631,8 @@ static inline int to_ib_qp_state(int c4iw_qp_state)
return IB_QPS_ERR;
}
+#define C4IW_DRAIN_OPCODE FW_RI_SGE_EC_CR_RETURN
+
static inline u32 c4iw_ib_to_tpt_access(int a)
{
return (a & IB_ACCESS_REMOTE_WRITE ? FW_RI_MEM_ACCESS_REM_WRITE : 0) |
@@ -997,8 +1015,6 @@ extern int c4iw_wr_log;
extern int db_fc_threshold;
extern int db_coalescing_threshold;
extern int use_dsgl;
-void c4iw_drain_rq(struct ib_qp *qp);
-void c4iw_drain_sq(struct ib_qp *qp);
void c4iw_invalidate_mr(struct c4iw_dev *rhp, u32 rkey);
#endif
diff --git a/drivers/infiniband/hw/cxgb4/provider.c b/drivers/infiniband/hw/cxgb4/provider.c
index 49b51b7..3345e1c 100644
--- a/drivers/infiniband/hw/cxgb4/provider.c
+++ b/drivers/infiniband/hw/cxgb4/provider.c
@@ -93,17 +93,28 @@ static int c4iw_process_mad(struct ib_device *ibdev, int mad_flags,
return -ENOSYS;
}
-static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+void _c4iw_free_ucontext(struct kref *kref)
{
- struct c4iw_dev *rhp = to_c4iw_dev(context->device);
- struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_dev *rhp;
struct c4iw_mm_entry *mm, *tmp;
- PDBG("%s context %p\n", __func__, context);
+ ucontext = container_of(kref, struct c4iw_ucontext, kref);
+ rhp = to_c4iw_dev(ucontext->ibucontext.device);
+
+ PDBG("%s ucontext %p\n", __func__, ucontext);
list_for_each_entry_safe(mm, tmp, &ucontext->mmaps, entry)
kfree(mm);
c4iw_release_dev_ucontext(&rhp->rdev, &ucontext->uctx);
kfree(ucontext);
+}
+
+static int c4iw_dealloc_ucontext(struct ib_ucontext *context)
+{
+ struct c4iw_ucontext *ucontext = to_c4iw_ucontext(context);
+
+ PDBG("%s context %p\n", __func__, context);
+ c4iw_put_ucontext(ucontext);
return 0;
}
@@ -127,6 +138,7 @@ static struct ib_ucontext *c4iw_alloc_ucontext(struct ib_device *ibdev,
c4iw_init_dev_ucontext(&rhp->rdev, &context->uctx);
INIT_LIST_HEAD(&context->mmaps);
spin_lock_init(&context->mmap_lock);
+ kref_init(&context->kref);
if (udata->outlen < sizeof(uresp) - sizeof(uresp.reserved)) {
if (!warned++)
@@ -361,16 +373,7 @@ static int c4iw_query_port(struct ib_device *ibdev, u8 port,
memset(props, 0, sizeof(struct ib_port_attr));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
if (!netif_carrier_ok(netdev))
props->state = IB_PORT_DOWN;
@@ -607,8 +610,6 @@ int c4iw_register_device(struct c4iw_dev *dev)
dev->ibdev.uverbs_abi_ver = C4IW_UVERBS_ABI_VERSION;
dev->ibdev.get_port_immutable = c4iw_port_immutable;
dev->ibdev.get_dev_fw_str = get_dev_fw_str;
- dev->ibdev.drain_sq = c4iw_drain_sq;
- dev->ibdev.drain_rq = c4iw_drain_rq;
dev->ibdev.iwcm = kmalloc(sizeof(struct iw_cm_verbs), GFP_KERNEL);
if (!dev->ibdev.iwcm)
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cda5542..04c1c38 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -715,13 +715,32 @@ static int build_inv_stag(union t4_wr *wqe, struct ib_send_wr *wr, u8 *len16)
return 0;
}
-static void _free_qp(struct kref *kref)
+static void free_qp_work(struct work_struct *work)
+{
+ struct c4iw_ucontext *ucontext;
+ struct c4iw_qp *qhp;
+ struct c4iw_dev *rhp;
+
+ qhp = container_of(work, struct c4iw_qp, free_work);
+ ucontext = qhp->ucontext;
+ rhp = qhp->rhp;
+
+ PDBG("%s qhp %p ucontext %p\n", __func__, qhp, ucontext);
+ destroy_qp(&rhp->rdev, &qhp->wq,
+ ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+
+ if (ucontext)
+ c4iw_put_ucontext(ucontext);
+ kfree(qhp);
+}
+
+static void queue_qp_free(struct kref *kref)
{
struct c4iw_qp *qhp;
qhp = container_of(kref, struct c4iw_qp, kref);
PDBG("%s qhp %p\n", __func__, qhp);
- kfree(qhp);
+ queue_work(qhp->rhp->rdev.free_workq, &qhp->free_work);
}
void c4iw_qp_add_ref(struct ib_qp *qp)
@@ -733,7 +752,7 @@ void c4iw_qp_add_ref(struct ib_qp *qp)
void c4iw_qp_rem_ref(struct ib_qp *qp)
{
PDBG("%s ib_qp %p\n", __func__, qp);
- kref_put(&to_c4iw_qp(qp)->kref, _free_qp);
+ kref_put(&to_c4iw_qp(qp)->kref, queue_qp_free);
}
static void add_to_fc_list(struct list_head *head, struct list_head *entry)
@@ -776,6 +795,64 @@ static int ring_kernel_rq_db(struct c4iw_qp *qhp, u16 inc)
return 0;
}
+static void complete_sq_drain_wr(struct c4iw_qp *qhp, struct ib_send_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *schp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ schp = to_c4iw_cq(qhp->ibqp.send_cq);
+ cq = &schp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_TYPE_V(1) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&schp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&schp->lock, flag);
+
+ spin_lock_irqsave(&schp->comp_handler_lock, flag);
+ (*schp->ibcq.comp_handler)(&schp->ibcq,
+ schp->ibcq.cq_context);
+ spin_unlock_irqrestore(&schp->comp_handler_lock, flag);
+}
+
+static void complete_rq_drain_wr(struct c4iw_qp *qhp, struct ib_recv_wr *wr)
+{
+ struct t4_cqe cqe = {};
+ struct c4iw_cq *rchp;
+ unsigned long flag;
+ struct t4_cq *cq;
+
+ rchp = to_c4iw_cq(qhp->ibqp.recv_cq);
+ cq = &rchp->cq;
+
+ cqe.u.drain_cookie = wr->wr_id;
+ cqe.header = cpu_to_be32(CQE_STATUS_V(T4_ERR_SWFLUSH) |
+ CQE_OPCODE_V(C4IW_DRAIN_OPCODE) |
+ CQE_TYPE_V(0) |
+ CQE_SWCQE_V(1) |
+ CQE_QPID_V(qhp->wq.sq.qid));
+
+ spin_lock_irqsave(&rchp->lock, flag);
+ cqe.bits_type_ts = cpu_to_be64(CQE_GENBIT_V((u64)cq->gen));
+ cq->sw_queue[cq->sw_pidx] = cqe;
+ t4_swcq_produce(cq);
+ spin_unlock_irqrestore(&rchp->lock, flag);
+
+ spin_lock_irqsave(&rchp->comp_handler_lock, flag);
+ (*rchp->ibcq.comp_handler)(&rchp->ibcq,
+ rchp->ibcq.cq_context);
+ spin_unlock_irqrestore(&rchp->comp_handler_lock, flag);
+}
+
int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
@@ -794,8 +871,8 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- *bad_wr = wr;
- return -EINVAL;
+ complete_sq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_sq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -937,8 +1014,8 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qhp->lock, flag);
if (t4_wq_in_error(&qhp->wq)) {
spin_unlock_irqrestore(&qhp->lock, flag);
- *bad_wr = wr;
- return -EINVAL;
+ complete_rq_drain_wr(qhp, wr);
+ return err;
}
num_wrs = t4_rq_avail(&qhp->wq);
if (num_wrs == 0) {
@@ -1550,7 +1627,12 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
}
break;
case C4IW_QP_STATE_CLOSING:
- if (!internal) {
+
+ /*
+ * Allow kernel users to move to ERROR for qp draining.
+ */
+ if (!internal && (qhp->ibqp.uobject || attrs->next_state !=
+ C4IW_QP_STATE_ERROR)) {
ret = -EINVAL;
goto out;
}
@@ -1643,7 +1725,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
struct c4iw_dev *rhp;
struct c4iw_qp *qhp;
struct c4iw_qp_attributes attrs;
- struct c4iw_ucontext *ucontext;
qhp = to_c4iw_qp(ib_qp);
rhp = qhp->rhp;
@@ -1663,11 +1744,6 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
spin_unlock_irq(&rhp->lock);
free_ird(rhp, qhp->attr.max_ird);
- ucontext = ib_qp->uobject ?
- to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
- destroy_qp(&rhp->rdev, &qhp->wq,
- ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
-
c4iw_qp_rem_ref(ib_qp);
PDBG("%s ib_qp %p qpid 0x%0x\n", __func__, ib_qp, qhp->wq.sq.qid);
@@ -1763,11 +1839,10 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
qhp->attr.max_ird = 0;
qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
spin_lock_init(&qhp->lock);
- init_completion(&qhp->sq_drained);
- init_completion(&qhp->rq_drained);
mutex_init(&qhp->mutex);
init_waitqueue_head(&qhp->wait);
kref_init(&qhp->kref);
+ INIT_WORK(&qhp->free_work, free_qp_work);
ret = insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.sq.qid);
if (ret)
@@ -1854,6 +1929,9 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
ma_sync_key_mm->len = PAGE_SIZE;
insert_mmap(ucontext, ma_sync_key_mm);
}
+
+ c4iw_get_ucontext(ucontext);
+ qhp->ucontext = ucontext;
}
qhp->ibqp.qp_num = qhp->wq.sq.qid;
init_timer(&(qhp->timer));
@@ -1958,40 +2036,3 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
return 0;
}
-
-static void move_qp_to_err(struct c4iw_qp *qp)
-{
- struct c4iw_qp_attributes attrs = { .next_state = C4IW_QP_STATE_ERROR };
-
- (void)c4iw_modify_qp(qp->rhp, qp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
-}
-
-void c4iw_drain_sq(struct ib_qp *ibqp)
-{
- struct c4iw_qp *qp = to_c4iw_qp(ibqp);
- unsigned long flag;
- bool need_to_wait;
-
- move_qp_to_err(qp);
- spin_lock_irqsave(&qp->lock, flag);
- need_to_wait = !t4_sq_empty(&qp->wq);
- spin_unlock_irqrestore(&qp->lock, flag);
-
- if (need_to_wait)
- wait_for_completion(&qp->sq_drained);
-}
-
-void c4iw_drain_rq(struct ib_qp *ibqp)
-{
- struct c4iw_qp *qp = to_c4iw_qp(ibqp);
- unsigned long flag;
- bool need_to_wait;
-
- move_qp_to_err(qp);
- spin_lock_irqsave(&qp->lock, flag);
- need_to_wait = !t4_rq_empty(&qp->wq);
- spin_unlock_irqrestore(&qp->lock, flag);
-
- if (need_to_wait)
- wait_for_completion(&qp->rq_drained);
-}
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 862381a..640d221 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -179,6 +179,7 @@ struct t4_cqe {
__be32 wrid_hi;
__be32 wrid_low;
} gen;
+ u64 drain_cookie;
} u;
__be64 reserved;
__be64 bits_type_ts;
@@ -238,6 +239,7 @@ struct t4_cqe {
/* generic accessor macros */
#define CQE_WRID_HI(x) (be32_to_cpu((x)->u.gen.wrid_hi))
#define CQE_WRID_LOW(x) (be32_to_cpu((x)->u.gen.wrid_low))
+#define CQE_DRAIN_COOKIE(x) ((x)->u.drain_cookie)
/* macros for flit 3 of the cqe */
#define CQE_GENBIT_S 63
diff --git a/drivers/infiniband/hw/i40iw/i40iw_verbs.c b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
index 29e97df..4c000d6 100644
--- a/drivers/infiniband/hw/i40iw/i40iw_verbs.c
+++ b/drivers/infiniband/hw/i40iw/i40iw_verbs.c
@@ -100,16 +100,7 @@ static int i40iw_query_port(struct ib_device *ibdev,
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
if (netif_carrier_ok(iwdev->netdev))
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index a191b93..9d85353 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -53,6 +53,7 @@
#include <linux/in.h>
#include <linux/etherdevice.h>
#include <linux/mlx5/fs.h>
+#include <linux/mlx5/vport.h>
#include "mlx5_ib.h"
#define DRIVER_NAME "mlx5_ib"
@@ -1202,6 +1203,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
resp.response_length += sizeof(resp.cmds_supp_uhw);
}
+ if (field_avail(typeof(resp), eth_min_inline, udata->outlen)) {
+ if (mlx5_ib_port_link_layer(ibdev, 1) == IB_LINK_LAYER_ETHERNET) {
+ mlx5_query_min_inline(dev->mdev, &resp.eth_min_inline);
+ resp.eth_min_inline++;
+ }
+ resp.response_length += sizeof(resp.eth_min_inline);
+ }
+
/*
* We don't want to expose information from the PCI bar that is located
* after 4096 bytes, so if the arch only supports larger pages, let's
@@ -1704,9 +1713,9 @@ static int parse_flow_attr(u32 *match_c, u32 *match_v,
if (ib_spec->eth.mask.vlan_tag) {
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
first_vid, ntohs(ib_spec->eth.mask.vlan_tag));
diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
index aff9fb1..5a31f3c 100644
--- a/drivers/infiniband/hw/nes/nes_verbs.c
+++ b/drivers/infiniband/hw/nes/nes_verbs.c
@@ -478,17 +478,7 @@ static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr
memset(props, 0, sizeof(*props));
props->max_mtu = IB_MTU_4096;
-
- if (netdev->mtu >= 4096)
- props->active_mtu = IB_MTU_4096;
- else if (netdev->mtu >= 2048)
- props->active_mtu = IB_MTU_2048;
- else if (netdev->mtu >= 1024)
- props->active_mtu = IB_MTU_1024;
- else if (netdev->mtu >= 512)
- props->active_mtu = IB_MTU_512;
- else
- props->active_mtu = IB_MTU_256;
+ props->active_mtu = ib_mtu_int_to_enum(netdev->mtu);
props->lid = 1;
props->lmc = 0;
diff --git a/drivers/infiniband/hw/qedr/main.c b/drivers/infiniband/hw/qedr/main.c
index 7b74d09..3ac8aa5 100644
--- a/drivers/infiniband/hw/qedr/main.c
+++ b/drivers/infiniband/hw/qedr/main.c
@@ -576,8 +576,7 @@ static int qedr_set_device_attr(struct qedr_dev *dev)
return 0;
}
-void qedr_unaffiliated_event(void *context,
- u8 event_code)
+void qedr_unaffiliated_event(void *context, u8 event_code)
{
pr_err("unaffiliated event not implemented yet\n");
}
@@ -792,6 +791,9 @@ static struct qedr_dev *qedr_add(struct qed_dev *cdev, struct pci_dev *pdev,
if (device_create_file(&dev->ibdev.dev, qedr_attributes[i]))
goto sysfs_err;
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+
DP_DEBUG(dev, QEDR_MSG_INIT, "qedr driver loaded successfully\n");
return dev;
@@ -824,11 +826,10 @@ static void qedr_remove(struct qedr_dev *dev)
ib_dealloc_device(&dev->ibdev);
}
-static int qedr_close(struct qedr_dev *dev)
+static void qedr_close(struct qedr_dev *dev)
{
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ERR);
-
- return 0;
+ if (test_and_clear_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ERR);
}
static void qedr_shutdown(struct qedr_dev *dev)
@@ -837,6 +838,12 @@ static void qedr_shutdown(struct qedr_dev *dev)
qedr_remove(dev);
}
+static void qedr_open(struct qedr_dev *dev)
+{
+ if (!test_and_set_bit(QEDR_ENET_STATE_BIT, &dev->enet_state))
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_PORT_ACTIVE);
+}
+
static void qedr_mac_address_change(struct qedr_dev *dev)
{
union ib_gid *sgid = &dev->sgid_tbl[0];
@@ -863,7 +870,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
ether_addr_copy(dev->gsi_ll2_mac_address, dev->ndev->dev_addr);
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_GID_CHANGE);
+ qedr_ib_dispatch_event(dev, QEDR_PORT, IB_EVENT_GID_CHANGE);
if (rc)
DP_ERR(dev, "Error updating mac filter\n");
@@ -877,7 +884,7 @@ static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event)
{
switch (event) {
case QEDE_UP:
- qedr_ib_dispatch_event(dev, 1, IB_EVENT_PORT_ACTIVE);
+ qedr_open(dev);
break;
case QEDE_DOWN:
qedr_close(dev);
diff --git a/drivers/infiniband/hw/qedr/qedr.h b/drivers/infiniband/hw/qedr/qedr.h
index 620badd..bb32e47 100644
--- a/drivers/infiniband/hw/qedr/qedr.h
+++ b/drivers/infiniband/hw/qedr/qedr.h
@@ -113,6 +113,8 @@ struct qedr_device_attr {
struct qed_rdma_events events;
};
+#define QEDR_ENET_STATE_BIT (0)
+
struct qedr_dev {
struct ib_device ibdev;
struct qed_dev *cdev;
@@ -153,6 +155,8 @@ struct qedr_dev {
struct qedr_cq *gsi_sqcq;
struct qedr_cq *gsi_rqcq;
struct qedr_qp *gsi_qp;
+
+ unsigned long enet_state;
};
#define QEDR_MAX_SQ_PBL (0x8000)
@@ -188,6 +192,7 @@ struct qedr_dev {
#define QEDR_ROCE_MAX_CNQ_SIZE (0x4000)
#define QEDR_MAX_PORT (1)
+#define QEDR_PORT (1)
#define QEDR_UVERBS(CMD_NAME) (1ull << IB_USER_VERBS_CMD_##CMD_NAME)
@@ -251,9 +256,6 @@ struct qedr_cq {
u16 icid;
- /* Lock to protect completion handler */
- spinlock_t comp_handler_lock;
-
/* Lock to protect multiplem CQ's */
spinlock_t cq_lock;
u8 arm_flags;
diff --git a/drivers/infiniband/hw/qedr/qedr_cm.c b/drivers/infiniband/hw/qedr/qedr_cm.c
index 63890eb..a9a8d87 100644
--- a/drivers/infiniband/hw/qedr/qedr_cm.c
+++ b/drivers/infiniband/hw/qedr/qedr_cm.c
@@ -87,11 +87,8 @@ void qedr_ll2_tx_cb(void *_qdev, struct qed_roce_ll2_packet *pkt)
qedr_inc_sw_gsi_cons(&qp->sq);
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (cq->ibcq.comp_handler) {
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
- }
}
void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
@@ -113,11 +110,8 @@ void qedr_ll2_rx_cb(void *_dev, struct qed_roce_ll2_packet *pkt,
spin_unlock_irqrestore(&qp->q_lock, flags);
- if (cq->ibcq.comp_handler) {
- spin_lock_irqsave(&cq->comp_handler_lock, flags);
+ if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler) (&cq->ibcq, cq->ibcq.cq_context);
- spin_unlock_irqrestore(&cq->comp_handler_lock, flags);
- }
}
static void qedr_destroy_gsi_cq(struct qedr_dev *dev,
@@ -404,9 +398,9 @@ static inline int qedr_gsi_build_packet(struct qedr_dev *dev,
}
if (ether_addr_equal(udh.eth.smac_h, udh.eth.dmac_h))
- packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
- else
packet->tx_dest = QED_ROCE_LL2_TX_DEST_LB;
+ else
+ packet->tx_dest = QED_ROCE_LL2_TX_DEST_NW;
packet->roce_mode = roce_mode;
memcpy(packet->header.vaddr, ud_header_buffer, header_size);
diff --git a/drivers/infiniband/hw/qedr/verbs.c b/drivers/infiniband/hw/qedr/verbs.c
index 57c8de2..c7d6c9a 100644
--- a/drivers/infiniband/hw/qedr/verbs.c
+++ b/drivers/infiniband/hw/qedr/verbs.c
@@ -471,8 +471,6 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
struct ib_ucontext *context, struct ib_udata *udata)
{
struct qedr_dev *dev = get_qedr_dev(ibdev);
- struct qedr_ucontext *uctx = NULL;
- struct qedr_alloc_pd_uresp uresp;
struct qedr_pd *pd;
u16 pd_id;
int rc;
@@ -489,21 +487,33 @@ struct ib_pd *qedr_alloc_pd(struct ib_device *ibdev,
if (!pd)
return ERR_PTR(-ENOMEM);
- dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ rc = dev->ops->rdma_alloc_pd(dev->rdma_ctx, &pd_id);
+ if (rc)
+ goto err;
- uresp.pd_id = pd_id;
pd->pd_id = pd_id;
if (udata && context) {
+ struct qedr_alloc_pd_uresp uresp;
+
+ uresp.pd_id = pd_id;
+
rc = ib_copy_to_udata(udata, &uresp, sizeof(uresp));
- if (rc)
+ if (rc) {
DP_ERR(dev, "copy error pd_id=0x%x.\n", pd_id);
- uctx = get_qedr_ucontext(context);
- uctx->pd = pd;
- pd->uctx = uctx;
+ dev->ops->rdma_dealloc_pd(dev->rdma_ctx, pd_id);
+ goto err;
+ }
+
+ pd->uctx = get_qedr_ucontext(context);
+ pd->uctx->pd = pd;
}
return &pd->ibpd;
+
+err:
+ kfree(pd);
+ return ERR_PTR(rc);
}
int qedr_dealloc_pd(struct ib_pd *ibpd)
@@ -1600,7 +1610,7 @@ err0:
return ERR_PTR(-EFAULT);
}
-enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
+static enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
{
switch (qp_state) {
case QED_ROCE_QP_STATE_RESET:
@@ -1621,7 +1631,8 @@ enum ib_qp_state qedr_get_ibqp_state(enum qed_roce_qp_state qp_state)
return IB_QPS_ERR;
}
-enum qed_roce_qp_state qedr_get_state_from_ibqp(enum ib_qp_state qp_state)
+static enum qed_roce_qp_state qedr_get_state_from_ibqp(
+ enum ib_qp_state qp_state)
{
switch (qp_state) {
case IB_QPS_RESET:
@@ -1657,7 +1668,7 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
int status = 0;
if (new_state == qp->state)
- return 1;
+ return 0;
switch (qp->state) {
case QED_ROCE_QP_STATE_RESET:
@@ -1733,6 +1744,14 @@ static int qedr_update_qp_state(struct qedr_dev *dev,
/* ERR->XXX */
switch (new_state) {
case QED_ROCE_QP_STATE_RESET:
+ if ((qp->rq.prod != qp->rq.cons) ||
+ (qp->sq.prod != qp->sq.cons)) {
+ DP_NOTICE(dev,
+ "Error->Reset with rq/sq not empty rq.prod=%x rq.cons=%x sq.prod=%x sq.cons=%x\n",
+ qp->rq.prod, qp->rq.cons, qp->sq.prod,
+ qp->sq.cons);
+ status = -EINVAL;
+ }
break;
default:
status = -EINVAL;
@@ -1865,7 +1884,6 @@ int qedr_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
qp_params.sgid.dwords[2], qp_params.sgid.dwords[3]);
DP_DEBUG(dev, QEDR_MSG_QP, "remote_mac=[%pM]\n",
qp_params.remote_mac_addr);
-;
qp_params.mtu = qp->mtu;
qp_params.lb_indication = false;
@@ -2016,7 +2034,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->qp_state = qedr_get_ibqp_state(params.state);
qp_attr->cur_qp_state = qedr_get_ibqp_state(params.state);
- qp_attr->path_mtu = iboe_get_mtu(params.mtu);
+ qp_attr->path_mtu = ib_mtu_int_to_enum(params.mtu);
qp_attr->path_mig_state = IB_MIG_MIGRATED;
qp_attr->rq_psn = params.rq_psn;
qp_attr->sq_psn = params.sq_psn;
@@ -2028,7 +2046,7 @@ int qedr_query_qp(struct ib_qp *ibqp,
qp_attr->cap.max_recv_wr = qp->rq.max_wr;
qp_attr->cap.max_send_sge = qp->sq.max_sges;
qp_attr->cap.max_recv_sge = qp->rq.max_sges;
- qp_attr->cap.max_inline_data = qp->max_inline_data;
+ qp_attr->cap.max_inline_data = ROCE_REQ_MAX_INLINE_DATA_SIZE;
qp_init_attr->cap = qp_attr->cap;
memcpy(&qp_attr->ah_attr.grh.dgid.raw[0], &params.dgid.bytes[0],
@@ -2302,7 +2320,8 @@ int qedr_dereg_mr(struct ib_mr *ib_mr)
return rc;
}
-struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd, int max_page_list_len)
+static struct qedr_mr *__qedr_alloc_mr(struct ib_pd *ibpd,
+ int max_page_list_len)
{
struct qedr_pd *pd = get_qedr_pd(ibpd);
struct qedr_dev *dev = get_qedr_dev(ibpd->device);
@@ -2704,7 +2723,7 @@ static int qedr_prepare_reg(struct qedr_qp *qp,
return 0;
}
-enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
+static enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
{
switch (opcode) {
case IB_WR_RDMA_WRITE:
@@ -2729,7 +2748,7 @@ enum ib_wc_opcode qedr_ib_to_wc_opcode(enum ib_wr_opcode opcode)
}
}
-inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
+static inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
{
int wq_is_full, err_wr, pbl_is_full;
struct qedr_dev *dev = qp->dev;
@@ -2766,7 +2785,7 @@ inline bool qedr_can_post_send(struct qedr_qp *qp, struct ib_send_wr *wr)
return true;
}
-int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
+static int __qedr_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct qedr_dev *dev = get_qedr_dev(ibqp->device);
@@ -3234,9 +3253,10 @@ static int qedr_poll_cq_req(struct qedr_dev *dev,
IB_WC_SUCCESS, 0);
break;
case RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR:
- DP_ERR(dev,
- "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
- cq->icid, qp->icid);
+ if (qp->state != QED_ROCE_QP_STATE_ERR)
+ DP_ERR(dev,
+ "Error: POLL CQ with RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR. CQ icid=0x%x, QP icid=0x%x\n",
+ cq->icid, qp->icid);
cnt = process_req(dev, qp, cq, num_entries, wc, req->sq_cons,
IB_WC_WR_FLUSH_ERR, 1);
break;
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
index 231a1ce..bd8fbd3 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_main.c
@@ -1029,7 +1029,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
if (ret) {
dev_err(&pdev->dev, "failed to allocate interrupts\n");
ret = -ENOMEM;
- goto err_netdevice;
+ goto err_free_cq_ring;
}
/* Allocate UAR table. */
@@ -1092,8 +1092,6 @@ err_free_uar_table:
err_free_intrs:
pvrdma_free_irq(dev);
pvrdma_disable_msi_all(dev);
-err_netdevice:
- unregister_netdevice_notifier(&dev->nb_netdev);
err_free_cq_ring:
pvrdma_page_dir_cleanup(dev, &dev->cq_pdir);
err_free_async_ring:
diff --git a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
index 5489137..c2aa526 100644
--- a/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
+++ b/drivers/infiniband/hw/vmw_pvrdma/pvrdma_verbs.c
@@ -306,7 +306,7 @@ struct ib_ucontext *pvrdma_alloc_ucontext(struct ib_device *ibdev,
union pvrdma_cmd_resp rsp;
struct pvrdma_cmd_create_uc *cmd = &req.create_uc;
struct pvrdma_cmd_create_uc_resp *resp = &rsp.create_uc_resp;
- struct pvrdma_alloc_ucontext_resp uresp;
+ struct pvrdma_alloc_ucontext_resp uresp = {0};
int ret;
void *ptr;
diff --git a/drivers/infiniband/sw/rxe/rxe_net.c b/drivers/infiniband/sw/rxe/rxe_net.c
index 342e781..4abdeb3 100644
--- a/drivers/infiniband/sw/rxe/rxe_net.c
+++ b/drivers/infiniband/sw/rxe/rxe_net.c
@@ -555,7 +555,7 @@ struct rxe_dev *rxe_net_add(struct net_device *ndev)
}
spin_lock_bh(&dev_list_lock);
- list_add_tail(&rxe_dev_list, &rxe->list);
+ list_add_tail(&rxe->list, &rxe_dev_list);
spin_unlock_bh(&dev_list_lock);
return rxe;
}
diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
index 486d576..44b2108 100644
--- a/drivers/infiniband/sw/rxe/rxe_qp.c
+++ b/drivers/infiniband/sw/rxe/rxe_qp.c
@@ -813,8 +813,7 @@ void rxe_qp_destroy(struct rxe_qp *qp)
del_timer_sync(&qp->rnr_nak_timer);
rxe_cleanup_task(&qp->req.task);
- if (qp_type(qp) == IB_QPT_RC)
- rxe_cleanup_task(&qp->comp.task);
+ rxe_cleanup_task(&qp->comp.task);
/* flush out any receive wr's or pending requests */
__rxe_do_task(&qp->req.task);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 9104e6b..e71af71 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -651,13 +651,6 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
SHOST_DIX_GUARD_CRC);
}
- /*
- * Limit the sg_tablesize and max_sectors based on the device
- * max fastreg page list length.
- */
- shost->sg_tablesize = min_t(unsigned short, shost->sg_tablesize,
- ib_conn->device->ib_device->attrs.max_fast_reg_page_list_len);
-
if (iscsi_host_add(shost,
ib_conn->device->ib_device->dma_device)) {
mutex_unlock(&iser_conn->state_mutex);
@@ -679,6 +672,10 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
max_fr_sectors = ((shost->sg_tablesize - 1) * PAGE_SIZE) >> 9;
shost->max_sectors = min(iser_max_sectors, max_fr_sectors);
+ iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
+ iser_conn, shost->sg_tablesize,
+ shost->max_sectors);
+
if (cmds_max > max_cmds) {
iser_info("cmds_max changed from %u to %u\n",
cmds_max, max_cmds);
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.h b/drivers/infiniband/ulp/iser/iscsi_iser.h
index 0be6a7c..9d0b22a 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.h
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.h
@@ -496,7 +496,6 @@ struct ib_conn {
* @rx_descs: rx buffers array (cyclic buffer)
* @num_rx_descs: number of rx descriptors
* @scsi_sg_tablesize: scsi host sg_tablesize
- * @scsi_max_sectors: scsi host max sectors
*/
struct iser_conn {
struct ib_conn ib_conn;
@@ -519,7 +518,6 @@ struct iser_conn {
struct iser_rx_desc *rx_descs;
u32 num_rx_descs;
unsigned short scsi_sg_tablesize;
- unsigned int scsi_max_sectors;
bool snd_w_inv;
};
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c
index 8ae7a3b..6a9d1cb 100644
--- a/drivers/infiniband/ulp/iser/iser_verbs.c
+++ b/drivers/infiniband/ulp/iser/iser_verbs.c
@@ -707,18 +707,7 @@ iser_calc_scsi_params(struct iser_conn *iser_conn,
sup_sg_tablesize = min_t(unsigned, ISCSI_ISER_MAX_SG_TABLESIZE,
device->ib_device->attrs.max_fast_reg_page_list_len);
- if (sg_tablesize > sup_sg_tablesize) {
- sg_tablesize = sup_sg_tablesize;
- iser_conn->scsi_max_sectors = sg_tablesize * SIZE_4K / 512;
- } else {
- iser_conn->scsi_max_sectors = max_sectors;
- }
-
- iser_conn->scsi_sg_tablesize = sg_tablesize;
-
- iser_dbg("iser_conn %p, sg_tablesize %u, max_sectors %u\n",
- iser_conn, iser_conn->scsi_sg_tablesize,
- iser_conn->scsi_max_sectors);
+ iser_conn->scsi_sg_tablesize = min(sg_tablesize, sup_sg_tablesize);
}
/**
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 8ddc071..79bf484 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -371,6 +371,7 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
struct srp_fr_desc *d;
struct ib_mr *mr;
int i, ret = -EINVAL;
+ enum ib_mr_type mr_type;
if (pool_size <= 0)
goto err;
@@ -384,9 +385,13 @@ static struct srp_fr_pool *srp_create_fr_pool(struct ib_device *device,
spin_lock_init(&pool->lock);
INIT_LIST_HEAD(&pool->free_list);
+ if (device->attrs.device_cap_flags & IB_DEVICE_SG_GAPS_REG)
+ mr_type = IB_MR_TYPE_SG_GAPS;
+ else
+ mr_type = IB_MR_TYPE_MEM_REG;
+
for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) {
- mr = ib_alloc_mr(pd, IB_MR_TYPE_MEM_REG,
- max_page_list_len);
+ mr = ib_alloc_mr(pd, mr_type, max_page_list_len);
if (IS_ERR(mr)) {
ret = PTR_ERR(mr);
if (ret == -ENOMEM)
@@ -3694,6 +3699,12 @@ static int __init srp_init_module(void)
indirect_sg_entries = cmd_sg_entries;
}
+ if (indirect_sg_entries > SG_MAX_SEGMENTS) {
+ pr_warn("Clamping indirect_sg_entries to %u\n",
+ SG_MAX_SEGMENTS);
+ indirect_sg_entries = SG_MAX_SEGMENTS;
+ }
+
srp_remove_wq = create_workqueue("srp_remove");
if (!srp_remove_wq) {
ret = -ENOMEM;
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index f3135ae..abd18f3 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -22,7 +22,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/mm.h>
-#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/poll.h>
#include <linux/init.h>
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6d94996..c7d5b2b 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1377,6 +1377,12 @@ static int xpad_init_input(struct usb_xpad *xpad)
input_dev->name = xpad->name;
input_dev->phys = xpad->phys;
usb_to_input_id(xpad->udev, &input_dev->id);
+
+ if (xpad->xtype == XTYPE_XBOX360W) {
+ /* x360w controllers and the receiver have different ids */
+ input_dev->id.product = 0x02a1;
+ }
+
input_dev->dev.parent = &xpad->intf->dev;
input_set_drvdata(input_dev, xpad);
diff --git a/drivers/input/misc/adxl34x-i2c.c b/drivers/input/misc/adxl34x-i2c.c
index a8b0a2e..7fed92f 100644
--- a/drivers/input/misc/adxl34x-i2c.c
+++ b/drivers/input/misc/adxl34x-i2c.c
@@ -136,7 +136,6 @@ static const struct i2c_device_id adxl34x_id[] = {
MODULE_DEVICE_TABLE(i2c, adxl34x_id);
-#ifdef CONFIG_OF
static const struct of_device_id adxl34x_of_id[] = {
/*
* The ADXL346 is backward-compatible with the ADXL345. Differences are
@@ -153,13 +152,12 @@ static const struct of_device_id adxl34x_of_id[] = {
};
MODULE_DEVICE_TABLE(of, adxl34x_of_id);
-#endif
static struct i2c_driver adxl34x_driver = {
.driver = {
.name = "adxl34x",
.pm = &adxl34x_i2c_pm,
- .of_match_table = of_match_ptr(adxl34x_of_id),
+ .of_match_table = adxl34x_of_id,
},
.probe = adxl34x_i2c_probe,
.remove = adxl34x_i2c_remove,
diff --git a/drivers/input/mouse/alps.h b/drivers/input/mouse/alps.h
index cde6f4b..6d279aa 100644
--- a/drivers/input/mouse/alps.h
+++ b/drivers/input/mouse/alps.h
@@ -114,7 +114,7 @@ enum SS4_PACKET_ID {
(_b[1] & 0x7F) \
)
-#define SS4_TS_Y_V2(_b) (s8)( \
+#define SS4_TS_Y_V2(_b) -(s8)( \
((_b[3] & 0x01) << 7) | \
(_b[2] & 0x7F) \
)
diff --git a/drivers/input/mouse/synaptics_i2c.c b/drivers/input/mouse/synaptics_i2c.c
index aa7c5da..cb2bf20 100644
--- a/drivers/input/mouse/synaptics_i2c.c
+++ b/drivers/input/mouse/synaptics_i2c.c
@@ -29,7 +29,7 @@
* after soft reset, we should wait for 1 ms
* before the device becomes operational
*/
-#define SOFT_RESET_DELAY_MS 3
+#define SOFT_RESET_DELAY_US 3000
/* and after hard reset, we should wait for max 500ms */
#define HARD_RESET_DELAY_MS 500
@@ -311,7 +311,7 @@ static int synaptics_i2c_reset_config(struct i2c_client *client)
if (ret) {
dev_err(&client->dev, "Unable to reset device\n");
} else {
- msleep(SOFT_RESET_DELAY_MS);
+ usleep_range(SOFT_RESET_DELAY_US, SOFT_RESET_DELAY_US + 100);
ret = synaptics_i2c_config(client);
if (ret)
dev_err(&client->dev, "Unable to config device\n");
diff --git a/drivers/input/rmi4/Kconfig b/drivers/input/rmi4/Kconfig
index 30cc627..8993983 100644
--- a/drivers/input/rmi4/Kconfig
+++ b/drivers/input/rmi4/Kconfig
@@ -41,7 +41,8 @@ config RMI4_SMB
config RMI4_F03
bool "RMI4 Function 03 (PS2 Guest)"
- depends on RMI4_CORE && SERIO
+ depends on RMI4_CORE
+ depends on SERIO=y || RMI4_CORE=SERIO
help
Say Y here if you want to add support for RMI4 function 03.
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index 77551f5..a761877 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -211,6 +211,12 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"),
},
},
+ {
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "PEGATRON CORPORATION"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "C15B"),
+ },
+ },
{ }
};
diff --git a/drivers/input/touchscreen/elants_i2c.c b/drivers/input/touchscreen/elants_i2c.c
index 02aec28..3e6003d 100644
--- a/drivers/input/touchscreen/elants_i2c.c
+++ b/drivers/input/touchscreen/elants_i2c.c
@@ -914,9 +914,9 @@ static irqreturn_t elants_i2c_irq(int irq, void *_dev)
case QUEUE_HEADER_NORMAL:
report_count = ts->buf[FW_HDR_COUNT];
- if (report_count > 3) {
+ if (report_count == 0 || report_count > 3) {
dev_err(&client->dev,
- "too large report count: %*ph\n",
+ "bad report count: %*ph\n",
HEADER_SIZE, ts->buf);
break;
}
diff --git a/drivers/isdn/hardware/eicon/message.c b/drivers/isdn/hardware/eicon/message.c
index 1a1d997..3b11422 100644
--- a/drivers/isdn/hardware/eicon/message.c
+++ b/drivers/isdn/hardware/eicon/message.c
@@ -147,7 +147,7 @@ static word plci_remove_check(PLCI *);
static void listen_check(DIVA_CAPI_ADAPTER *);
static byte AddInfo(byte **, byte **, byte *, byte *);
static byte getChannel(API_PARSE *);
-static void IndParse(PLCI *, word *, byte **, byte);
+static void IndParse(PLCI *, const word *, byte **, byte);
static byte ie_compare(byte *, byte *);
static word find_cip(DIVA_CAPI_ADAPTER *, byte *, byte *);
static word CPN_filter_ok(byte *cpn, DIVA_CAPI_ADAPTER *, word);
@@ -4858,7 +4858,7 @@ static void sig_ind(PLCI *plci)
/* included before the ESC_MSGTYPE and MAXPARMSIDS has to be incremented */
/* SMSG is situated at the end because its 0 (for compatibility reasons */
/* (see Info_Mask Bit 4, first IE. then the message type) */
- word parms_id[] =
+ static const word parms_id[] =
{MAXPARMSIDS, CPN, 0xff, DSA, OSA, BC, LLC, HLC, ESC_CAUSE, DSP, DT, CHA,
UUI, CONG_RR, CONG_RNR, ESC_CHI, KEY, CHI, CAU, ESC_LAW,
RDN, RDX, CONN_NR, RIN, NI, CAI, ESC_CR,
@@ -4866,12 +4866,12 @@ static void sig_ind(PLCI *plci)
/* 14 FTY repl by ESC_CHI */
/* 18 PI repl by ESC_LAW */
/* removed OAD changed to 0xff for future use, OAD is multiIE now */
- word multi_fac_id[] = {1, FTY};
- word multi_pi_id[] = {1, PI};
- word multi_CiPN_id[] = {1, OAD};
- word multi_ssext_id[] = {1, ESC_SSEXT};
+ static const word multi_fac_id[] = {1, FTY};
+ static const word multi_pi_id[] = {1, PI};
+ static const word multi_CiPN_id[] = {1, OAD};
+ static const word multi_ssext_id[] = {1, ESC_SSEXT};
- word multi_vswitch_id[] = {1, ESC_VSWITCH};
+ static const word multi_vswitch_id[] = {1, ESC_VSWITCH};
byte *cau;
word ncci;
@@ -8924,7 +8924,7 @@ static void listen_check(DIVA_CAPI_ADAPTER *a)
/* functions for all parameters sent in INDs */
/*------------------------------------------------------------------*/
-static void IndParse(PLCI *plci, word *parms_id, byte **parms, byte multiIEsize)
+static void IndParse(PLCI *plci, const word *parms_id, byte **parms, byte multiIEsize)
{
word ploc; /* points to current location within packet */
byte w;
@@ -11297,7 +11297,8 @@ static void mixer_notify_update(PLCI *plci, byte others)
((CAPI_MSG *) msg)->header.ncci = 0;
((CAPI_MSG *) msg)->info.facility_req.Selector = SELECTOR_LINE_INTERCONNECT;
((CAPI_MSG *) msg)->info.facility_req.structs[0] = 3;
- PUT_WORD(&(((CAPI_MSG *) msg)->info.facility_req.structs[1]), LI_REQ_SILENT_UPDATE);
+ ((CAPI_MSG *) msg)->info.facility_req.structs[1] = LI_REQ_SILENT_UPDATE & 0xff;
+ ((CAPI_MSG *) msg)->info.facility_req.structs[2] = LI_REQ_SILENT_UPDATE >> 8;
((CAPI_MSG *) msg)->info.facility_req.structs[3] = 0;
w = api_put(notify_plci->appl, (CAPI_MSG *) msg);
if (w != _QUEUE_FULL)
diff --git a/drivers/md/md.h b/drivers/md/md.h
index e38936d..2a51403 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -212,6 +212,7 @@ extern int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors,
int is_new);
struct md_cluster_info;
+/* change UNSUPPORTED_MDDEV_FLAGS for each array type if new flag is added */
enum mddev_flags {
MD_ARRAY_FIRST_USE, /* First use of array, needs initialization */
MD_CLOSING, /* If set, we are closing the array, do not open
@@ -702,4 +703,11 @@ static inline int mddev_is_clustered(struct mddev *mddev)
{
return mddev->cluster_info && mddev->bitmap_info.nodes > 1;
}
+
+/* clear unsupported mddev_flags */
+static inline void mddev_clear_unsupported_flags(struct mddev *mddev,
+ unsigned long unsupported_flags)
+{
+ mddev->flags &= ~unsupported_flags;
+}
#endif /* _MD_MD_H */
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a162fed..848365d 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -26,6 +26,11 @@
#include "raid0.h"
#include "raid5.h"
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN) | \
+ (1L << MD_FAILFAST_SUPPORTED))
+
static int raid0_congested(struct mddev *mddev, int bits)
{
struct r0conf *conf = mddev->private;
@@ -539,8 +544,7 @@ static void *raid0_takeover_raid45(struct mddev *mddev)
mddev->delta_disks = -1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_HAS_JOURNAL, &mddev->flags);
- clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
@@ -583,7 +587,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
mddev->degraded = 0;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
@@ -626,7 +630,7 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
mddev->raid_disks = 1;
/* make sure it will be not marked as dirty */
mddev->recovery_cp = MaxSector;
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev, UNSUPPORTED_MDDEV_FLAGS);
create_strip_zones(mddev, &priv_conf);
return priv_conf;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a1f3fbe..7b0f647 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -42,6 +42,10 @@
#include "raid1.h"
#include "bitmap.h"
+#define UNSUPPORTED_MDDEV_FLAGS \
+ ((1L << MD_HAS_JOURNAL) | \
+ (1L << MD_JOURNAL_CLEAN))
+
/*
* Number of guaranteed r1bios in case of extreme VM load:
*/
@@ -1066,17 +1070,107 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void raid1_make_request(struct mddev *mddev, struct bio * bio)
+static void raid1_read_request(struct mddev *mddev, struct bio *bio,
+ struct r1bio *r1_bio)
{
struct r1conf *conf = mddev->private;
struct raid1_info *mirror;
- struct r1bio *r1_bio;
struct bio *read_bio;
+ struct bitmap *bitmap = mddev->bitmap;
+ const int op = bio_op(bio);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ int sectors_handled;
+ int max_sectors;
+ int rdisk;
+
+ wait_barrier(conf, bio);
+
+read_again:
+ rdisk = read_balance(conf, r1_bio, &max_sectors);
+
+ if (rdisk < 0) {
+ /* couldn't find anywhere to read from */
+ raid_end_bio_io(r1_bio);
+ return;
+ }
+ mirror = conf->mirrors + rdisk;
+
+ if (test_bit(WriteMostly, &mirror->rdev->flags) &&
+ bitmap) {
+ /*
+ * Reading from a write-mostly device must take care not to
+ * over-take any writes that are 'behind'
+ */
+ raid1_log(mddev, "wait behind writes");
+ wait_event(bitmap->behind_wait,
+ atomic_read(&bitmap->behind_writes) == 0);
+ }
+ r1_bio->read_disk = rdisk;
+ r1_bio->start_next_window = 0;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
+
+ r1_bio->bios[rdisk] = read_bio;
+
+ read_bio->bi_iter.bi_sector = r1_bio->sector +
+ mirror->rdev->data_offset;
+ read_bio->bi_bdev = mirror->rdev->bdev;
+ read_bio->bi_end_io = raid1_end_read_request;
+ bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &mirror->rdev->flags) &&
+ test_bit(R1BIO_FailFast, &r1_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r1_bio;
+
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r1_bio->sector);
+
+ if (max_sectors < r1_bio->sectors) {
+ /*
+ * could not read all from this device, so we will need another
+ * r1_bio.
+ */
+ sectors_handled = (r1_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
+ r1_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+
+ /*
+ * Cannot call generic_make_request directly as that will be
+ * queued in __make_request and subsequent mempool_alloc might
+ * block waiting for it. So hand bio over to raid1d.
+ */
+ reschedule_retry(r1_bio);
+
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio) - sectors_handled;
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
+}
+
+static void raid1_write_request(struct mddev *mddev, struct bio *bio,
+ struct r1bio *r1_bio)
+{
+ struct r1conf *conf = mddev->private;
int i, disks;
- struct bitmap *bitmap;
+ struct bitmap *bitmap = mddev->bitmap;
unsigned long flags;
const int op = bio_op(bio);
- const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_flush_fua = (bio->bi_opf &
(REQ_PREFLUSH | REQ_FUA));
@@ -1096,15 +1190,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
md_write_start(mddev, bio); /* wait on superblock update early */
- if (bio_data_dir(bio) == WRITE &&
- ((bio_end_sector(bio) > mddev->suspend_lo &&
+ if ((bio_end_sector(bio) > mddev->suspend_lo &&
bio->bi_iter.bi_sector < mddev->suspend_hi) ||
(mddev_is_clustered(mddev) &&
md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
- /* As the suspend_* range is controlled by
- * userspace, we want an interruptible
- * wait.
+ bio->bi_iter.bi_sector, bio_end_sector(bio)))) {
+
+ /*
+ * As the suspend_* range is controlled by userspace, we want
+ * an interruptible wait.
*/
DEFINE_WAIT(w);
for (;;) {
@@ -1115,128 +1209,15 @@ static void raid1_make_request(struct mddev *mddev, struct bio * bio)
bio->bi_iter.bi_sector >= mddev->suspend_hi ||
(mddev_is_clustered(mddev) &&
!md_cluster_ops->area_resyncing(mddev, WRITE,
- bio->bi_iter.bi_sector, bio_end_sector(bio))))
+ bio->bi_iter.bi_sector,
+ bio_end_sector(bio))))
break;
schedule();
}
finish_wait(&conf->wait_barrier, &w);
}
-
start_next_window = wait_barrier(conf, bio);
- bitmap = mddev->bitmap;
-
- /*
- * make_request() can abort the operation when read-ahead is being
- * used and no empty request is available.
- *
- */
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio);
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_iter.bi_sector;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r1_bio and no locking
- * will be needed when requests complete. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- if (rw == READ) {
- /*
- * read balancing logic:
- */
- int rdisk;
-
-read_again:
- rdisk = read_balance(conf, r1_bio, &max_sectors);
-
- if (rdisk < 0) {
- /* couldn't find anywhere to read from */
- raid_end_bio_io(r1_bio);
- return;
- }
- mirror = conf->mirrors + rdisk;
-
- if (test_bit(WriteMostly, &mirror->rdev->flags) &&
- bitmap) {
- /* Reading from a write-mostly device must
- * take care not to over-take any writes
- * that are 'behind'
- */
- raid1_log(mddev, "wait behind writes");
- wait_event(bitmap->behind_wait,
- atomic_read(&bitmap->behind_writes) == 0);
- }
- r1_bio->read_disk = rdisk;
- r1_bio->start_next_window = 0;
-
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
- max_sectors);
-
- r1_bio->bios[rdisk] = read_bio;
-
- read_bio->bi_iter.bi_sector = r1_bio->sector +
- mirror->rdev->data_offset;
- read_bio->bi_bdev = mirror->rdev->bdev;
- read_bio->bi_end_io = raid1_end_read_request;
- bio_set_op_attrs(read_bio, op, do_sync);
- if (test_bit(FailFast, &mirror->rdev->flags) &&
- test_bit(R1BIO_FailFast, &r1_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
- read_bio->bi_private = r1_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r1_bio->sector);
-
- if (max_sectors < r1_bio->sectors) {
- /* could not read all from this device, so we will
- * need another r1_bio.
- */
-
- sectors_handled = (r1_bio->sector + max_sectors
- - bio->bi_iter.bi_sector);
- r1_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __make_request
- * and subsequent mempool_alloc might block waiting
- * for it. So hand bio over to raid1d.
- */
- reschedule_retry(r1_bio);
-
- r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
-
- r1_bio->master_bio = bio;
- r1_bio->sectors = bio_sectors(bio) - sectors_handled;
- r1_bio->state = 0;
- r1_bio->mddev = mddev;
- r1_bio->sector = bio->bi_iter.bi_sector +
- sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
- return;
- }
-
- /*
- * WRITE:
- */
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid1_log(mddev, "wait queued");
@@ -1280,8 +1261,7 @@ read_again:
int bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, r1_bio->sector,
- max_sectors,
+ is_bad = is_badblock(rdev, r1_bio->sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
/* mustn't write here until the bad block is
@@ -1370,7 +1350,8 @@ read_again:
continue;
mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
+ bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
if (first_clone) {
/* do behind I/O ?
@@ -1464,6 +1445,40 @@ read_again:
wake_up(&conf->wait_barrier);
}
+static void raid1_make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r1conf *conf = mddev->private;
+ struct r1bio *r1_bio;
+
+ /*
+ * make_request() can abort the operation when read-ahead is being
+ * used and no empty request is available.
+ *
+ */
+ r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
+
+ r1_bio->master_bio = bio;
+ r1_bio->sectors = bio_sectors(bio);
+ r1_bio->state = 0;
+ r1_bio->mddev = mddev;
+ r1_bio->sector = bio->bi_iter.bi_sector;
+
+ /*
+ * We might need to issue multiple reads to different devices if there
+ * are bad blocks around, so we keep track of the number of reads in
+ * bio->bi_phys_segments. If this is 0, there is only one r1_bio and
+ * no locking will be needed when requests complete. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ bio_clear_flag(bio, BIO_SEG_VALID);
+
+ if (bio_data_dir(bio) == READ)
+ raid1_read_request(mddev, bio, r1_bio);
+ else
+ raid1_write_request(mddev, bio, r1_bio);
+}
+
static void raid1_status(struct seq_file *seq, struct mddev *mddev)
{
struct r1conf *conf = mddev->private;
@@ -3246,8 +3261,8 @@ static void *raid1_takeover(struct mddev *mddev)
if (!IS_ERR(conf)) {
/* Array must appear to be quiesced */
conf->array_frozen = 1;
- clear_bit(MD_HAS_JOURNAL, &mddev->flags);
- clear_bit(MD_JOURNAL_CLEAN, &mddev->flags);
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
}
return conf;
}
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index ab5e862..1920756 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1087,23 +1087,122 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
kfree(plug);
}
-static void __make_request(struct mddev *mddev, struct bio *bio)
+static void raid10_read_request(struct mddev *mddev, struct bio *bio,
+ struct r10bio *r10_bio)
{
struct r10conf *conf = mddev->private;
- struct r10bio *r10_bio;
struct bio *read_bio;
+ const int op = bio_op(bio);
+ const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
+ int sectors_handled;
+ int max_sectors;
+ sector_t sectors;
+ struct md_rdev *rdev;
+ int slot;
+
+ /*
+ * Register the new request and wait if the reconstruction
+ * thread has put up a bar for new requests.
+ * Continue immediately if no resync is active currently.
+ */
+ wait_barrier(conf);
+
+ sectors = bio_sectors(bio);
+ while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
+ bio->bi_iter.bi_sector < conf->reshape_progress &&
+ bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
+ /*
+ * IO spans the reshape position. Need to wait for reshape to
+ * pass
+ */
+ raid10_log(conf->mddev, "wait reshape");
+ allow_barrier(conf);
+ wait_event(conf->wait_barrier,
+ conf->reshape_progress <= bio->bi_iter.bi_sector ||
+ conf->reshape_progress >= bio->bi_iter.bi_sector +
+ sectors);
+ wait_barrier(conf);
+ }
+
+read_again:
+ rdev = read_balance(conf, r10_bio, &max_sectors);
+ if (!rdev) {
+ raid_end_bio_io(r10_bio);
+ return;
+ }
+ slot = r10_bio->read_slot;
+
+ read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
+ bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
+ max_sectors);
+
+ r10_bio->devs[slot].bio = read_bio;
+ r10_bio->devs[slot].rdev = rdev;
+
+ read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
+ choose_data_offset(r10_bio, rdev);
+ read_bio->bi_bdev = rdev->bdev;
+ read_bio->bi_end_io = raid10_end_read_request;
+ bio_set_op_attrs(read_bio, op, do_sync);
+ if (test_bit(FailFast, &rdev->flags) &&
+ test_bit(R10BIO_FailFast, &r10_bio->state))
+ read_bio->bi_opf |= MD_FAILFAST;
+ read_bio->bi_private = r10_bio;
+
+ if (mddev->gendisk)
+ trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
+ read_bio, disk_devt(mddev->gendisk),
+ r10_bio->sector);
+ if (max_sectors < r10_bio->sectors) {
+ /*
+ * Could not read all from this device, so we will need another
+ * r10_bio.
+ */
+ sectors_handled = (r10_bio->sector + max_sectors
+ - bio->bi_iter.bi_sector);
+ r10_bio->sectors = max_sectors;
+ spin_lock_irq(&conf->device_lock);
+ if (bio->bi_phys_segments == 0)
+ bio->bi_phys_segments = 2;
+ else
+ bio->bi_phys_segments++;
+ spin_unlock_irq(&conf->device_lock);
+ /*
+ * Cannot call generic_make_request directly as that will be
+ * queued in __generic_make_request and subsequent
+ * mempool_alloc might block waiting for it. so hand bio over
+ * to raid10d.
+ */
+ reschedule_retry(r10_bio);
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = bio_sectors(bio) - sectors_handled;
+ r10_bio->state = 0;
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
+ goto read_again;
+ } else
+ generic_make_request(read_bio);
+ return;
+}
+
+static void raid10_write_request(struct mddev *mddev, struct bio *bio,
+ struct r10bio *r10_bio)
+{
+ struct r10conf *conf = mddev->private;
int i;
const int op = bio_op(bio);
- const int rw = bio_data_dir(bio);
const unsigned long do_sync = (bio->bi_opf & REQ_SYNC);
const unsigned long do_fua = (bio->bi_opf & REQ_FUA);
unsigned long flags;
struct md_rdev *blocked_rdev;
struct blk_plug_cb *cb;
struct raid10_plug_cb *plug = NULL;
+ sector_t sectors;
int sectors_handled;
int max_sectors;
- int sectors;
md_write_start(mddev, bio);
@@ -1118,8 +1217,9 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_iter.bi_sector < conf->reshape_progress &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
- /* IO spans the reshape position. Need to wait for
- * reshape to pass
+ /*
+ * IO spans the reshape position. Need to wait for reshape to
+ * pass
*/
raid10_log(conf->mddev, "wait reshape");
allow_barrier(conf);
@@ -1129,8 +1229,8 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
sectors);
wait_barrier(conf);
}
+
if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
- bio_data_dir(bio) == WRITE &&
(mddev->reshape_backwards
? (bio->bi_iter.bi_sector < conf->reshape_safe &&
bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
@@ -1148,98 +1248,6 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
conf->reshape_safe = mddev->reshape_position;
}
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
- r10_bio->master_bio = bio;
- r10_bio->sectors = sectors;
-
- r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_iter.bi_sector;
- r10_bio->state = 0;
-
- /* We might need to issue multiple reads to different
- * devices if there are bad blocks around, so we keep
- * track of the number of reads in bio->bi_phys_segments.
- * If this is 0, there is only one r10_bio and no locking
- * will be needed when the request completes. If it is
- * non-zero, then it is the number of not-completed requests.
- */
- bio->bi_phys_segments = 0;
- bio_clear_flag(bio, BIO_SEG_VALID);
-
- if (rw == READ) {
- /*
- * read balancing logic:
- */
- struct md_rdev *rdev;
- int slot;
-
-read_again:
- rdev = read_balance(conf, r10_bio, &max_sectors);
- if (!rdev) {
- raid_end_bio_io(r10_bio);
- return;
- }
- slot = r10_bio->read_slot;
-
- read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
- bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
- max_sectors);
-
- r10_bio->devs[slot].bio = read_bio;
- r10_bio->devs[slot].rdev = rdev;
-
- read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
- choose_data_offset(r10_bio, rdev);
- read_bio->bi_bdev = rdev->bdev;
- read_bio->bi_end_io = raid10_end_read_request;
- bio_set_op_attrs(read_bio, op, do_sync);
- if (test_bit(FailFast, &rdev->flags) &&
- test_bit(R10BIO_FailFast, &r10_bio->state))
- read_bio->bi_opf |= MD_FAILFAST;
- read_bio->bi_private = r10_bio;
-
- if (mddev->gendisk)
- trace_block_bio_remap(bdev_get_queue(read_bio->bi_bdev),
- read_bio, disk_devt(mddev->gendisk),
- r10_bio->sector);
- if (max_sectors < r10_bio->sectors) {
- /* Could not read all from this device, so we will
- * need another r10_bio.
- */
- sectors_handled = (r10_bio->sector + max_sectors
- - bio->bi_iter.bi_sector);
- r10_bio->sectors = max_sectors;
- spin_lock_irq(&conf->device_lock);
- if (bio->bi_phys_segments == 0)
- bio->bi_phys_segments = 2;
- else
- bio->bi_phys_segments++;
- spin_unlock_irq(&conf->device_lock);
- /* Cannot call generic_make_request directly
- * as that will be queued in __generic_make_request
- * and subsequent mempool_alloc might block
- * waiting for it. so hand bio over to raid10d.
- */
- reschedule_retry(r10_bio);
-
- r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
-
- r10_bio->master_bio = bio;
- r10_bio->sectors = bio_sectors(bio) - sectors_handled;
- r10_bio->state = 0;
- r10_bio->mddev = mddev;
- r10_bio->sector = bio->bi_iter.bi_sector +
- sectors_handled;
- goto read_again;
- } else
- generic_make_request(read_bio);
- return;
- }
-
- /*
- * WRITE:
- */
if (conf->pending_count >= max_queued_requests) {
md_wakeup_thread(mddev->thread);
raid10_log(mddev, "wait queued");
@@ -1300,8 +1308,7 @@ retry_write:
int bad_sectors;
int is_bad;
- is_bad = is_badblock(rdev, dev_sector,
- max_sectors,
+ is_bad = is_badblock(rdev, dev_sector, max_sectors,
&first_bad, &bad_sectors);
if (is_bad < 0) {
/* Mustn't write here until the bad block
@@ -1405,8 +1412,7 @@ retry_write:
r10_bio->devs[i].bio = mbio;
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
- choose_data_offset(r10_bio,
- rdev));
+ choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1457,8 +1463,7 @@ retry_write:
r10_bio->devs[i].repl_bio = mbio;
mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
- choose_data_offset(
- r10_bio, rdev));
+ choose_data_offset(r10_bio, rdev));
mbio->bi_bdev = rdev->bdev;
mbio->bi_end_io = raid10_end_write_request;
bio_set_op_attrs(mbio, op, do_sync | do_fua);
@@ -1503,6 +1508,36 @@ retry_write:
one_write_done(r10_bio);
}
+static void __make_request(struct mddev *mddev, struct bio *bio)
+{
+ struct r10conf *conf = mddev->private;
+ struct r10bio *r10_bio;
+
+ r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
+
+ r10_bio->master_bio = bio;
+ r10_bio->sectors = bio_sectors(bio);
+
+ r10_bio->mddev = mddev;
+ r10_bio->sector = bio->bi_iter.bi_sector;
+ r10_bio->state = 0;
+
+ /*
+ * We might need to issue multiple reads to different devices if there
+ * are bad blocks around, so we keep track of the number of reads in
+ * bio->bi_phys_segments. If this is 0, there is only one r10_bio and
+ * no locking will be needed when the request completes. If it is
+ * non-zero, then it is the number of not-completed requests.
+ */
+ bio->bi_phys_segments = 0;
+ bio_clear_flag(bio, BIO_SEG_VALID);
+
+ if (bio_data_dir(bio) == READ)
+ raid10_read_request(mddev, bio, r10_bio);
+ else
+ raid10_write_request(mddev, bio, r10_bio);
+}
+
static void raid10_make_request(struct mddev *mddev, struct bio *bio)
{
struct r10conf *conf = mddev->private;
diff --git a/drivers/md/raid5-cache.c b/drivers/md/raid5-cache.c
index d7bfb6f..0e8ed2c 100644
--- a/drivers/md/raid5-cache.c
+++ b/drivers/md/raid5-cache.c
@@ -1682,8 +1682,7 @@ out:
static struct stripe_head *
r5c_recovery_alloc_stripe(struct r5conf *conf,
- sector_t stripe_sect,
- sector_t log_start)
+ sector_t stripe_sect)
{
struct stripe_head *sh;
@@ -1692,7 +1691,6 @@ r5c_recovery_alloc_stripe(struct r5conf *conf,
return NULL; /* no more stripe available */
r5l_recovery_reset_stripe(sh);
- sh->log_start = log_start;
return sh;
}
@@ -1862,7 +1860,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
stripe_sect);
if (!sh) {
- sh = r5c_recovery_alloc_stripe(conf, stripe_sect, ctx->pos);
+ sh = r5c_recovery_alloc_stripe(conf, stripe_sect);
/*
* cannot get stripe from raid5_get_active_stripe
* try replay some stripes
@@ -1871,7 +1869,7 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
r5c_recovery_replay_stripes(
cached_stripe_list, ctx);
sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect, ctx->pos);
+ conf, stripe_sect);
}
if (!sh) {
pr_debug("md/raid:%s: Increasing stripe cache size to %d to recovery data on journal.\n",
@@ -1879,8 +1877,8 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
conf->min_nr_stripes * 2);
raid5_set_cache_size(mddev,
conf->min_nr_stripes * 2);
- sh = r5c_recovery_alloc_stripe(
- conf, stripe_sect, ctx->pos);
+ sh = r5c_recovery_alloc_stripe(conf,
+ stripe_sect);
}
if (!sh) {
pr_err("md/raid:%s: Cannot get enough stripes due to memory pressure. Recovery failed.\n",
@@ -1894,7 +1892,6 @@ r5c_recovery_analyze_meta_block(struct r5l_log *log,
if (!test_bit(STRIPE_R5C_CACHING, &sh->state) &&
test_bit(R5_Wantwrite, &sh->dev[sh->pd_idx].flags)) {
r5l_recovery_replay_one_stripe(conf, sh, ctx);
- sh->log_start = ctx->pos;
list_move_tail(&sh->lru, cached_stripe_list);
}
r5l_recovery_load_data(log, sh, ctx, payload,
@@ -1933,8 +1930,6 @@ static void r5c_recovery_load_one_stripe(struct r5l_log *log,
set_bit(R5_UPTODATE, &dev->flags);
}
}
- list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
- atomic_inc(&log->stripe_in_journal_count);
}
/*
@@ -2070,6 +2065,7 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
struct stripe_head *sh, *next;
struct mddev *mddev = log->rdev->mddev;
struct page *page;
+ sector_t next_checkpoint = MaxSector;
page = alloc_page(GFP_KERNEL);
if (!page) {
@@ -2078,6 +2074,8 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
return -ENOMEM;
}
+ WARN_ON(list_empty(&ctx->cached_list));
+
list_for_each_entry_safe(sh, next, &ctx->cached_list, lru) {
struct r5l_meta_block *mb;
int i;
@@ -2123,12 +2121,15 @@ r5c_recovery_rewrite_data_only_stripes(struct r5l_log *log,
sync_page_io(log->rdev, ctx->pos, PAGE_SIZE, page,
REQ_OP_WRITE, REQ_FUA, false);
sh->log_start = ctx->pos;
+ list_add_tail(&sh->r5c, &log->stripe_in_journal_list);
+ atomic_inc(&log->stripe_in_journal_count);
ctx->pos = write_pos;
ctx->seq += 1;
-
+ next_checkpoint = sh->log_start;
list_del_init(&sh->lru);
raid5_release_stripe(sh);
}
+ log->next_checkpoint = next_checkpoint;
__free_page(page);
return 0;
}
@@ -2139,7 +2140,6 @@ static int r5l_recovery_log(struct r5l_log *log)
struct r5l_recovery_ctx ctx;
int ret;
sector_t pos;
- struct stripe_head *sh;
ctx.pos = log->last_checkpoint;
ctx.seq = log->last_cp_seq;
@@ -2164,16 +2164,13 @@ static int r5l_recovery_log(struct r5l_log *log)
log->next_checkpoint = ctx.pos;
r5l_log_write_empty_meta_block(log, ctx.pos, ctx.seq++);
ctx.pos = r5l_ring_add(log, ctx.pos, BLOCK_SECTORS);
- } else {
- sh = list_last_entry(&ctx.cached_list, struct stripe_head, lru);
- log->next_checkpoint = sh->log_start;
}
if ((ctx.data_only_stripes == 0) && (ctx.data_parity_stripes == 0))
pr_debug("md/raid:%s: starting from clean shutdown\n",
mdname(mddev));
else {
- pr_debug("md/raid:%s: recoverying %d data-only stripes and %d data-parity stripes\n",
+ pr_debug("md/raid:%s: recovering %d data-only stripes and %d data-parity stripes\n",
mdname(mddev), ctx.data_only_stripes,
ctx.data_parity_stripes);
@@ -2418,9 +2415,6 @@ void r5c_finish_stripe_write_out(struct r5conf *conf,
if (do_wakeup)
wake_up(&conf->wait_for_overlap);
- if (conf->log->r5c_journal_mode == R5C_JOURNAL_MODE_WRITE_THROUGH)
- return;
-
spin_lock_irq(&conf->log->stripe_in_journal_lock);
list_del_init(&sh->r5c);
spin_unlock_irq(&conf->log->stripe_in_journal_lock);
@@ -2639,14 +2633,16 @@ int r5l_init_log(struct r5conf *conf, struct md_rdev *rdev)
spin_lock_init(&log->stripe_in_journal_lock);
atomic_set(&log->stripe_in_journal_count, 0);
+ rcu_assign_pointer(conf->log, log);
+
if (r5l_load_log(log))
goto error;
- rcu_assign_pointer(conf->log, log);
set_bit(MD_HAS_JOURNAL, &conf->mddev->flags);
return 0;
error:
+ rcu_assign_pointer(conf->log, NULL);
md_unregister_thread(&log->reclaim_thread);
reclaim_thread:
mempool_destroy(log->meta_pool);
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 06d7279..36c13e4 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -62,6 +62,8 @@
#include "raid0.h"
#include "bitmap.h"
+#define UNSUPPORTED_MDDEV_FLAGS (1L << MD_FAILFAST_SUPPORTED)
+
#define cpu_to_group(cpu) cpu_to_node(cpu)
#define ANY_GROUP NUMA_NO_NODE
@@ -7829,8 +7831,9 @@ static void *raid5_takeover_raid1(struct mddev *mddev)
mddev->new_chunk_sectors = chunksect;
ret = setup_conf(mddev);
- if (!IS_ERR_VALUE(ret))
- clear_bit(MD_FAILFAST_SUPPORTED, &mddev->flags);
+ if (!IS_ERR(ret))
+ mddev_clear_unsupported_flags(mddev,
+ UNSUPPORTED_MDDEV_FLAGS);
return ret;
}
diff --git a/drivers/media/cec/cec-adap.c b/drivers/media/cec/cec-adap.c
index 0ea4efb..ebb5e391 100644
--- a/drivers/media/cec/cec-adap.c
+++ b/drivers/media/cec/cec-adap.c
@@ -30,8 +30,9 @@
#include "cec-priv.h"
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx);
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx);
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx);
/*
* 400 ms is the time it takes for one 16 byte message to be
@@ -288,10 +289,10 @@ static void cec_data_cancel(struct cec_data *data)
/* Mark it as an error */
data->msg.tx_ts = ktime_get_ns();
- data->msg.tx_status = CEC_TX_STATUS_ERROR |
- CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_status |= CEC_TX_STATUS_ERROR |
+ CEC_TX_STATUS_MAX_RETRIES;
+ data->msg.tx_error_cnt++;
data->attempts = 0;
- data->msg.tx_error_cnt = 1;
/* Queue transmitted message for monitoring purposes */
cec_queue_msg_monitor(data->adap, &data->msg, 1);
@@ -851,7 +852,7 @@ static const u8 cec_msg_size[256] = {
[CEC_MSG_REQUEST_ARC_TERMINATION] = 2 | DIRECTED,
[CEC_MSG_TERMINATE_ARC] = 2 | DIRECTED,
[CEC_MSG_REQUEST_CURRENT_LATENCY] = 4 | BCAST,
- [CEC_MSG_REPORT_CURRENT_LATENCY] = 7 | BCAST,
+ [CEC_MSG_REPORT_CURRENT_LATENCY] = 6 | BCAST,
[CEC_MSG_CDC_MESSAGE] = 2 | BCAST,
};
@@ -1250,30 +1251,49 @@ configured:
for (i = 1; i < las->num_log_addrs; i++)
las->log_addr[i] = CEC_LOG_ADDR_INVALID;
}
+ for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
+ las->log_addr[i] = CEC_LOG_ADDR_INVALID;
adap->is_configured = true;
adap->is_configuring = false;
cec_post_state_event(adap);
- mutex_unlock(&adap->lock);
+ /*
+ * Now post the Report Features and Report Physical Address broadcast
+ * messages. Note that these are non-blocking transmits, meaning that
+ * they are just queued up and once adap->lock is unlocked the main
+ * thread will kick in and start transmitting these.
+ *
+ * If after this function is done (but before one or more of these
+ * messages are actually transmitted) the CEC adapter is unconfigured,
+ * then any remaining messages will be dropped by the main thread.
+ */
for (i = 0; i < las->num_log_addrs; i++) {
+ struct cec_msg msg = {};
+
if (las->log_addr[i] == CEC_LOG_ADDR_INVALID ||
(las->flags & CEC_LOG_ADDRS_FL_CDC_ONLY))
continue;
- /*
- * Report Features must come first according
- * to CEC 2.0
- */
- if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED)
- cec_report_features(adap, i);
- cec_report_phys_addr(adap, i);
+ msg.msg[0] = (las->log_addr[i] << 4) | 0x0f;
+
+ /* Report Features must come first according to CEC 2.0 */
+ if (las->log_addr[i] != CEC_LOG_ADDR_UNREGISTERED &&
+ adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0) {
+ cec_fill_msg_report_features(adap, &msg, i);
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
+ }
+
+ /* Report Physical Address */
+ cec_msg_report_physical_addr(&msg, adap->phys_addr,
+ las->primary_device_type[i]);
+ dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
+ las->log_addr[i],
+ cec_phys_addr_exp(adap->phys_addr));
+ cec_transmit_msg_fh(adap, &msg, NULL, false);
}
- for (i = las->num_log_addrs; i < CEC_MAX_LOG_ADDRS; i++)
- las->log_addr[i] = CEC_LOG_ADDR_INVALID;
- mutex_lock(&adap->lock);
adap->kthread_config = NULL;
- mutex_unlock(&adap->lock);
complete(&adap->config_completion);
+ mutex_unlock(&adap->lock);
return 0;
unconfigure:
@@ -1526,52 +1546,32 @@ EXPORT_SYMBOL_GPL(cec_s_log_addrs);
/* High-level core CEC message handling */
-/* Transmit the Report Features message */
-static int cec_report_features(struct cec_adapter *adap, unsigned int la_idx)
+/* Fill in the Report Features message */
+static void cec_fill_msg_report_features(struct cec_adapter *adap,
+ struct cec_msg *msg,
+ unsigned int la_idx)
{
- struct cec_msg msg = { };
const struct cec_log_addrs *las = &adap->log_addrs;
const u8 *features = las->features[la_idx];
bool op_is_dev_features = false;
unsigned int idx;
- /* This is 2.0 and up only */
- if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
- return 0;
-
/* Report Features */
- msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
- msg.len = 4;
- msg.msg[1] = CEC_MSG_REPORT_FEATURES;
- msg.msg[2] = adap->log_addrs.cec_version;
- msg.msg[3] = las->all_device_types[la_idx];
+ msg->msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
+ msg->len = 4;
+ msg->msg[1] = CEC_MSG_REPORT_FEATURES;
+ msg->msg[2] = adap->log_addrs.cec_version;
+ msg->msg[3] = las->all_device_types[la_idx];
/* Write RC Profiles first, then Device Features */
for (idx = 0; idx < ARRAY_SIZE(las->features[0]); idx++) {
- msg.msg[msg.len++] = features[idx];
+ msg->msg[msg->len++] = features[idx];
if ((features[idx] & CEC_OP_FEAT_EXT) == 0) {
if (op_is_dev_features)
break;
op_is_dev_features = true;
}
}
- return cec_transmit_msg(adap, &msg, false);
-}
-
-/* Transmit the Report Physical Address message */
-static int cec_report_phys_addr(struct cec_adapter *adap, unsigned int la_idx)
-{
- const struct cec_log_addrs *las = &adap->log_addrs;
- struct cec_msg msg = { };
-
- /* Report Physical Address */
- msg.msg[0] = (las->log_addr[la_idx] << 4) | 0x0f;
- cec_msg_report_physical_addr(&msg, adap->phys_addr,
- las->primary_device_type[la_idx]);
- dprintk(2, "config: la %d pa %x.%x.%x.%x\n",
- las->log_addr[la_idx],
- cec_phys_addr_exp(adap->phys_addr));
- return cec_transmit_msg(adap, &msg, false);
}
/* Transmit the Feature Abort message */
@@ -1777,9 +1777,10 @@ static int cec_receive_notify(struct cec_adapter *adap, struct cec_msg *msg,
}
case CEC_MSG_GIVE_FEATURES:
- if (adap->log_addrs.cec_version >= CEC_OP_CEC_VERSION_2_0)
- return cec_report_features(adap, la_idx);
- return 0;
+ if (adap->log_addrs.cec_version < CEC_OP_CEC_VERSION_2_0)
+ return cec_feature_abort(adap, msg);
+ cec_fill_msg_report_features(adap, &tx_cec_msg, la_idx);
+ return cec_transmit_msg(adap, &tx_cec_msg, false);
default:
/*
diff --git a/drivers/media/dvb-core/dvb_net.c b/drivers/media/dvb-core/dvb_net.c
index bc5e8cf..8f11d7e 100644
--- a/drivers/media/dvb-core/dvb_net.c
+++ b/drivers/media/dvb-core/dvb_net.c
@@ -719,6 +719,9 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
skb_copy_from_linear_data(h->priv->ule_skb, dest_addr,
ETH_ALEN);
skb_pull(h->priv->ule_skb, ETH_ALEN);
+ } else {
+ /* dest_addr buffer is only valid if h->priv->ule_dbit == 0 */
+ eth_zero_addr(dest_addr);
}
/* Handle ULE Extension Headers. */
@@ -750,16 +753,8 @@ static void dvb_net_ule_check_crc(struct dvb_net_ule_handle *h,
if (!h->priv->ule_bridged) {
skb_push(h->priv->ule_skb, ETH_HLEN);
h->ethh = (struct ethhdr *)h->priv->ule_skb->data;
- if (!h->priv->ule_dbit) {
- /*
- * dest_addr buffer is only valid if
- * h->priv->ule_dbit == 0
- */
- memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
- eth_zero_addr(h->ethh->h_source);
- } else /* zeroize source and dest */
- memset(h->ethh, 0, ETH_ALEN * 2);
-
+ memcpy(h->ethh->h_dest, dest_addr, ETH_ALEN);
+ eth_zero_addr(h->ethh->h_source);
h->ethh->h_proto = htons(h->priv->ule_sndu_type);
}
/* else: skb is in correct state; nothing to do. */
diff --git a/drivers/media/i2c/Kconfig b/drivers/media/i2c/Kconfig
index b31fa6f..b979ea1 100644
--- a/drivers/media/i2c/Kconfig
+++ b/drivers/media/i2c/Kconfig
@@ -655,6 +655,7 @@ config VIDEO_S5K6A3
config VIDEO_S5K4ECGX
tristate "Samsung S5K4ECGX sensor support"
depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
+ select CRC32
---help---
This is a V4L2 sensor-level driver for Samsung S5K4ECGX 5M
camera sensor with an embedded SoC image signal processor.
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 59872b3..f4e92bd 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2741,9 +2741,7 @@ static const struct v4l2_subdev_internal_ops smiapp_internal_ops = {
* I2C Driver
*/
-#ifdef CONFIG_PM
-
-static int smiapp_suspend(struct device *dev)
+static int __maybe_unused smiapp_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2768,7 +2766,7 @@ static int smiapp_suspend(struct device *dev)
return 0;
}
-static int smiapp_resume(struct device *dev)
+static int __maybe_unused smiapp_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
@@ -2783,13 +2781,6 @@ static int smiapp_resume(struct device *dev)
return rval;
}
-#else
-
-#define smiapp_suspend NULL
-#define smiapp_resume NULL
-
-#endif /* CONFIG_PM */
-
static struct smiapp_hwconfig *smiapp_get_hwconfig(struct device *dev)
{
struct smiapp_hwconfig *hwcfg;
@@ -2913,13 +2904,9 @@ static int smiapp_probe(struct i2c_client *client,
if (IS_ERR(sensor->xshutdown))
return PTR_ERR(sensor->xshutdown);
- pm_runtime_enable(&client->dev);
-
- rval = pm_runtime_get_sync(&client->dev);
- if (rval < 0) {
- rval = -ENODEV;
- goto out_power_off;
- }
+ rval = smiapp_power_on(&client->dev);
+ if (rval < 0)
+ return rval;
rval = smiapp_identify_module(sensor);
if (rval) {
@@ -3100,6 +3087,9 @@ static int smiapp_probe(struct i2c_client *client,
if (rval < 0)
goto out_media_entity_cleanup;
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_enable(&client->dev);
pm_runtime_set_autosuspend_delay(&client->dev, 1000);
pm_runtime_use_autosuspend(&client->dev);
pm_runtime_put_autosuspend(&client->dev);
@@ -3113,8 +3103,7 @@ out_cleanup:
smiapp_cleanup(sensor);
out_power_off:
- pm_runtime_put(&client->dev);
- pm_runtime_disable(&client->dev);
+ smiapp_power_off(&client->dev);
return rval;
}
@@ -3127,8 +3116,10 @@ static int smiapp_remove(struct i2c_client *client)
v4l2_async_unregister_subdev(subdev);
- pm_runtime_suspend(&client->dev);
pm_runtime_disable(&client->dev);
+ if (!pm_runtime_status_suspended(&client->dev))
+ smiapp_power_off(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
for (i = 0; i < sensor->ssds_used; i++) {
v4l2_device_unregister_subdev(&sensor->ssds[i].sd);
diff --git a/drivers/media/i2c/tvp5150.c b/drivers/media/i2c/tvp5150.c
index 3a0fe8c..48646a7 100644
--- a/drivers/media/i2c/tvp5150.c
+++ b/drivers/media/i2c/tvp5150.c
@@ -291,8 +291,12 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
tvp5150_write(sd, TVP5150_OP_MODE_CTL, opmode);
tvp5150_write(sd, TVP5150_VD_IN_SRC_SEL_1, input);
- /* Svideo should enable YCrCb output and disable GPCL output
- * For Composite and TV, it should be the reverse
+ /*
+ * Setup the FID/GLCO/VLK/HVLK and INTREQ/GPCL/VBLK output signals. For
+ * S-Video we output the vertical lock (VLK) signal on FID/GLCO/VLK/HVLK
+ * and set INTREQ/GPCL/VBLK to logic 0. For composite we output the
+ * field indicator (FID) signal on FID/GLCO/VLK/HVLK and set
+ * INTREQ/GPCL/VBLK to logic 1.
*/
val = tvp5150_read(sd, TVP5150_MISC_CTL);
if (val < 0) {
@@ -301,9 +305,9 @@ static void tvp5150_selmux(struct v4l2_subdev *sd)
}
if (decoder->input == TVP5150_SVIDEO)
- val = (val & ~0x40) | 0x10;
+ val = (val & ~TVP5150_MISC_CTL_GPCL) | TVP5150_MISC_CTL_HVLK;
else
- val = (val & ~0x10) | 0x40;
+ val = (val & ~TVP5150_MISC_CTL_HVLK) | TVP5150_MISC_CTL_GPCL;
tvp5150_write(sd, TVP5150_MISC_CTL, val);
};
@@ -455,7 +459,12 @@ static const struct i2c_reg_value tvp5150_init_enable[] = {
},{ /* Automatic offset and AGC enabled */
TVP5150_ANAL_CHL_CTL, 0x15
},{ /* Activate YCrCb output 0x9 or 0xd ? */
- TVP5150_MISC_CTL, 0x6f
+ TVP5150_MISC_CTL, TVP5150_MISC_CTL_GPCL |
+ TVP5150_MISC_CTL_INTREQ_OE |
+ TVP5150_MISC_CTL_YCBCR_OE |
+ TVP5150_MISC_CTL_SYNC_OE |
+ TVP5150_MISC_CTL_VBLANK |
+ TVP5150_MISC_CTL_CLOCK_OE,
},{ /* Activates video std autodetection for all standards */
TVP5150_AUTOSW_MSK, 0x0
},{ /* Default format: 0x47. For 4:2:2: 0x40 */
@@ -861,8 +870,6 @@ static int tvp5150_fill_fmt(struct v4l2_subdev *sd,
f = &format->format;
- tvp5150_reset(sd, 0);
-
f->width = decoder->rect.width;
f->height = decoder->rect.height / 2;
@@ -1051,21 +1058,27 @@ static const struct media_entity_operations tvp5150_sd_media_ops = {
static int tvp5150_s_stream(struct v4l2_subdev *sd, int enable)
{
struct tvp5150 *decoder = to_tvp5150(sd);
- /* Output format: 8-bit ITU-R BT.656 with embedded syncs */
- int val = 0x09;
-
- /* Output format: 8-bit 4:2:2 YUV with discrete sync */
- if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
- val = 0x0d;
+ int val;
- /* Initializes TVP5150 to its default values */
- /* # set PCLK (27MHz) */
- tvp5150_write(sd, TVP5150_CONF_SHARED_PIN, 0x00);
+ /* Enable or disable the video output signals. */
+ val = tvp5150_read(sd, TVP5150_MISC_CTL);
+ if (val < 0)
+ return val;
+
+ val &= ~(TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_SYNC_OE |
+ TVP5150_MISC_CTL_CLOCK_OE);
+
+ if (enable) {
+ /*
+ * Enable the YCbCr and clock outputs. In discrete sync mode
+ * (non-BT.656) additionally enable the the sync outputs.
+ */
+ val |= TVP5150_MISC_CTL_YCBCR_OE | TVP5150_MISC_CTL_CLOCK_OE;
+ if (decoder->mbus_type == V4L2_MBUS_PARALLEL)
+ val |= TVP5150_MISC_CTL_SYNC_OE;
+ }
- if (enable)
- tvp5150_write(sd, TVP5150_MISC_CTL, val);
- else
- tvp5150_write(sd, TVP5150_MISC_CTL, 0x00);
+ tvp5150_write(sd, TVP5150_MISC_CTL, val);
return 0;
}
@@ -1524,7 +1537,6 @@ static int tvp5150_probe(struct i2c_client *c,
res = core->hdl.error;
goto err;
}
- v4l2_ctrl_handler_setup(&core->hdl);
/* Default is no cropping */
core->rect.top = 0;
@@ -1535,6 +1547,8 @@ static int tvp5150_probe(struct i2c_client *c,
core->rect.left = 0;
core->rect.width = TVP5150_H_MAX;
+ tvp5150_reset(sd, 0); /* Calls v4l2_ctrl_handler_setup() */
+
res = v4l2_async_register_subdev(sd);
if (res < 0)
goto err;
diff --git a/drivers/media/i2c/tvp5150_reg.h b/drivers/media/i2c/tvp5150_reg.h
index 25a9949..30a48c2 100644
--- a/drivers/media/i2c/tvp5150_reg.h
+++ b/drivers/media/i2c/tvp5150_reg.h
@@ -9,6 +9,15 @@
#define TVP5150_ANAL_CHL_CTL 0x01 /* Analog channel controls */
#define TVP5150_OP_MODE_CTL 0x02 /* Operation mode controls */
#define TVP5150_MISC_CTL 0x03 /* Miscellaneous controls */
+#define TVP5150_MISC_CTL_VBLK_GPCL BIT(7)
+#define TVP5150_MISC_CTL_GPCL BIT(6)
+#define TVP5150_MISC_CTL_INTREQ_OE BIT(5)
+#define TVP5150_MISC_CTL_HVLK BIT(4)
+#define TVP5150_MISC_CTL_YCBCR_OE BIT(3)
+#define TVP5150_MISC_CTL_SYNC_OE BIT(2)
+#define TVP5150_MISC_CTL_VBLANK BIT(1)
+#define TVP5150_MISC_CTL_CLOCK_OE BIT(0)
+
#define TVP5150_AUTOSW_MSK 0x04 /* Autoswitch mask: TVP5150A / TVP5150AM */
/* Reserved 05h */
diff --git a/drivers/media/pci/cobalt/cobalt-driver.c b/drivers/media/pci/cobalt/cobalt-driver.c
index 9796340..d5c911c 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.c
+++ b/drivers/media/pci/cobalt/cobalt-driver.c
@@ -308,9 +308,7 @@ static void cobalt_pci_iounmap(struct cobalt *cobalt, struct pci_dev *pci_dev)
static void cobalt_free_msi(struct cobalt *cobalt, struct pci_dev *pci_dev)
{
free_irq(pci_dev->irq, (void *)cobalt);
-
- if (cobalt->msi_enabled)
- pci_disable_msi(pci_dev);
+ pci_free_irq_vectors(pci_dev);
}
static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
@@ -387,14 +385,12 @@ static int cobalt_setup_pci(struct cobalt *cobalt, struct pci_dev *pci_dev,
from being generated. */
cobalt_set_interrupt(cobalt, false);
- if (pci_enable_msi_range(pci_dev, 1, 1) < 1) {
+ if (pci_alloc_irq_vectors(pci_dev, 1, 1, PCI_IRQ_MSI) < 1) {
cobalt_err("Could not enable MSI\n");
- cobalt->msi_enabled = false;
ret = -EIO;
goto err_release;
}
msi_config_show(cobalt, pci_dev);
- cobalt->msi_enabled = true;
/* Register IRQ */
if (request_irq(pci_dev->irq, cobalt_irq_handler, IRQF_SHARED,
diff --git a/drivers/media/pci/cobalt/cobalt-driver.h b/drivers/media/pci/cobalt/cobalt-driver.h
index ed00dc9..00f773e 100644
--- a/drivers/media/pci/cobalt/cobalt-driver.h
+++ b/drivers/media/pci/cobalt/cobalt-driver.h
@@ -287,8 +287,6 @@ struct cobalt {
u32 irq_none;
u32 irq_full_fifo;
- bool msi_enabled;
-
/* omnitek dma */
int dma_channels;
int first_fifo_channel;
diff --git a/drivers/media/usb/dvb-usb/pctv452e.c b/drivers/media/usb/dvb-usb/pctv452e.c
index 07fa08b..d54ebe7 100644
--- a/drivers/media/usb/dvb-usb/pctv452e.c
+++ b/drivers/media/usb/dvb-usb/pctv452e.c
@@ -97,14 +97,13 @@ struct pctv452e_state {
u8 c; /* transaction counter, wraps around... */
u8 initialized; /* set to 1 if 0x15 has been sent */
u16 last_rc_key;
-
- unsigned char data[80];
};
static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
unsigned int write_len, unsigned int read_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *buf;
u8 id;
unsigned int rlen;
int ret;
@@ -114,36 +113,39 @@ static int tt3650_ci_msg(struct dvb_usb_device *d, u8 cmd, u8 *data,
return -EIO;
}
- mutex_lock(&state->ca_mutex);
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
id = state->c++;
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = cmd;
- state->data[3] = write_len;
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = cmd;
+ buf[3] = write_len;
- memcpy(state->data + 4, data, write_len);
+ memcpy(buf + 4, data, write_len);
rlen = (read_len > 0) ? 64 : 0;
- ret = dvb_usb_generic_rw(d, state->data, 4 + write_len,
- state->data, rlen, /* delay_ms */ 0);
+ ret = dvb_usb_generic_rw(d, buf, 4 + write_len,
+ buf, rlen, /* delay_ms */ 0);
if (0 != ret)
goto failed;
ret = -EIO;
- if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
goto failed;
- memcpy(data, state->data + 4, read_len);
+ memcpy(data, buf + 4, read_len);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return 0;
failed:
err("CI error %d; %02X %02X %02X -> %*ph.",
- ret, SYNC_BYTE_OUT, id, cmd, 3, state->data);
+ ret, SYNC_BYTE_OUT, id, cmd, 3, buf);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return ret;
}
@@ -410,53 +412,57 @@ static int pctv452e_i2c_msg(struct dvb_usb_device *d, u8 addr,
u8 *rcv_buf, u8 rcv_len)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *buf;
u8 id;
int ret;
- mutex_lock(&state->ca_mutex);
+ buf = kmalloc(64, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
id = state->c++;
ret = -EINVAL;
if (snd_len > 64 - 7 || rcv_len > 64 - 7)
goto failed;
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = PCTV_CMD_I2C;
- state->data[3] = snd_len + 3;
- state->data[4] = addr << 1;
- state->data[5] = snd_len;
- state->data[6] = rcv_len;
+ buf[0] = SYNC_BYTE_OUT;
+ buf[1] = id;
+ buf[2] = PCTV_CMD_I2C;
+ buf[3] = snd_len + 3;
+ buf[4] = addr << 1;
+ buf[5] = snd_len;
+ buf[6] = rcv_len;
- memcpy(state->data + 7, snd_buf, snd_len);
+ memcpy(buf + 7, snd_buf, snd_len);
- ret = dvb_usb_generic_rw(d, state->data, 7 + snd_len,
- state->data, /* rcv_len */ 64,
+ ret = dvb_usb_generic_rw(d, buf, 7 + snd_len,
+ buf, /* rcv_len */ 64,
/* delay_ms */ 0);
if (ret < 0)
goto failed;
/* TT USB protocol error. */
ret = -EIO;
- if (SYNC_BYTE_IN != state->data[0] || id != state->data[1])
+ if (SYNC_BYTE_IN != buf[0] || id != buf[1])
goto failed;
/* I2C device didn't respond as expected. */
ret = -EREMOTEIO;
- if (state->data[5] < snd_len || state->data[6] < rcv_len)
+ if (buf[5] < snd_len || buf[6] < rcv_len)
goto failed;
- memcpy(rcv_buf, state->data + 7, rcv_len);
- mutex_unlock(&state->ca_mutex);
+ memcpy(rcv_buf, buf + 7, rcv_len);
+ kfree(buf);
return rcv_len;
failed:
err("I2C error %d; %02X %02X %02X %02X %02X -> %*ph",
ret, SYNC_BYTE_OUT, id, addr << 1, snd_len, rcv_len,
- 7, state->data);
+ 7, buf);
- mutex_unlock(&state->ca_mutex);
+ kfree(buf);
return ret;
}
@@ -505,7 +511,7 @@ static u32 pctv452e_i2c_func(struct i2c_adapter *adapter)
static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
- u8 *rx;
+ u8 *b0, *rx;
int ret;
info("%s: %d\n", __func__, i);
@@ -516,11 +522,12 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
if (state->initialized)
return 0;
- rx = kmalloc(PCTV_ANSWER_LEN, GFP_KERNEL);
- if (!rx)
+ b0 = kmalloc(5 + PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!b0)
return -ENOMEM;
- mutex_lock(&state->ca_mutex);
+ rx = b0 + 5;
+
/* hmm where shoud this should go? */
ret = usb_set_interface(d->udev, 0, ISOC_INTERFACE_ALTERNATIVE);
if (ret != 0)
@@ -528,66 +535,70 @@ static int pctv452e_power_ctrl(struct dvb_usb_device *d, int i)
__func__, ret);
/* this is a one-time initialization, dont know where to put */
- state->data[0] = 0xaa;
- state->data[1] = state->c++;
- state->data[2] = PCTV_CMD_RESET;
- state->data[3] = 1;
- state->data[4] = 0;
+ b0[0] = 0xaa;
+ b0[1] = state->c++;
+ b0[2] = PCTV_CMD_RESET;
+ b0[3] = 1;
+ b0[4] = 0;
/* reset board */
- ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
goto ret;
- state->data[1] = state->c++;
- state->data[4] = 1;
+ b0[1] = state->c++;
+ b0[4] = 1;
/* reset board (again?) */
- ret = dvb_usb_generic_rw(d, state->data, 5, rx, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b0, 5, rx, PCTV_ANSWER_LEN, 0);
if (ret)
goto ret;
state->initialized = 1;
ret:
- mutex_unlock(&state->ca_mutex);
- kfree(rx);
+ kfree(b0);
return ret;
}
static int pctv452e_rc_query(struct dvb_usb_device *d)
{
struct pctv452e_state *state = (struct pctv452e_state *)d->priv;
+ u8 *b, *rx;
int ret, i;
u8 id;
- mutex_lock(&state->ca_mutex);
+ b = kmalloc(CMD_BUFFER_SIZE + PCTV_ANSWER_LEN, GFP_KERNEL);
+ if (!b)
+ return -ENOMEM;
+
+ rx = b + CMD_BUFFER_SIZE;
+
id = state->c++;
/* prepare command header */
- state->data[0] = SYNC_BYTE_OUT;
- state->data[1] = id;
- state->data[2] = PCTV_CMD_IR;
- state->data[3] = 0;
+ b[0] = SYNC_BYTE_OUT;
+ b[1] = id;
+ b[2] = PCTV_CMD_IR;
+ b[3] = 0;
/* send ir request */
- ret = dvb_usb_generic_rw(d, state->data, 4,
- state->data, PCTV_ANSWER_LEN, 0);
+ ret = dvb_usb_generic_rw(d, b, 4, rx, PCTV_ANSWER_LEN, 0);
if (ret != 0)
goto ret;
if (debug > 3) {
- info("%s: read: %2d: %*ph: ", __func__, ret, 3, state->data);
- for (i = 0; (i < state->data[3]) && ((i + 3) < PCTV_ANSWER_LEN); i++)
- info(" %02x", state->data[i + 3]);
+ info("%s: read: %2d: %*ph: ", __func__, ret, 3, rx);
+ for (i = 0; (i < rx[3]) && ((i+3) < PCTV_ANSWER_LEN); i++)
+ info(" %02x", rx[i+3]);
info("\n");
}
- if ((state->data[3] == 9) && (state->data[12] & 0x01)) {
+ if ((rx[3] == 9) && (rx[12] & 0x01)) {
/* got a "press" event */
- state->last_rc_key = RC_SCANCODE_RC5(state->data[7], state->data[6]);
+ state->last_rc_key = RC_SCANCODE_RC5(rx[7], rx[6]);
if (debug > 2)
info("%s: cmd=0x%02x sys=0x%02x\n",
- __func__, state->data[6], state->data[7]);
+ __func__, rx[6], rx[7]);
rc_keydown(d->rc_dev, RC_TYPE_RC5, state->last_rc_key, 0);
} else if (state->last_rc_key) {
@@ -595,7 +606,7 @@ static int pctv452e_rc_query(struct dvb_usb_device *d)
state->last_rc_key = 0;
}
ret:
- mutex_unlock(&state->ca_mutex);
+ kfree(b);
return ret;
}
diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c
index a0547db..76382c8 100644
--- a/drivers/memstick/core/memstick.c
+++ b/drivers/memstick/core/memstick.c
@@ -330,7 +330,7 @@ static int h_memstick_read_dev_id(struct memstick_dev *card,
struct ms_id_register id_reg;
if (!(*mrq)) {
- memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, NULL,
+ memstick_init_req(&card->current_mrq, MS_TPC_READ_REG, &id_reg,
sizeof(struct ms_id_register));
*mrq = &card->current_mrq;
return 0;
diff --git a/drivers/misc/mei/bus-fixup.c b/drivers/misc/mei/bus-fixup.c
index 18e05ca..3600c99 100644
--- a/drivers/misc/mei/bus-fixup.c
+++ b/drivers/misc/mei/bus-fixup.c
@@ -152,6 +152,9 @@ static void mei_mkhi_fix(struct mei_cl_device *cldev)
{
int ret;
+ if (!cldev->bus->hbm_f_os_supported)
+ return;
+
ret = mei_cldev_enable(cldev);
if (ret)
return;
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index c6c051b..c6217a4 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -180,6 +180,8 @@ static ssize_t mei_dbgfs_read_devstate(struct file *fp, char __user *ubuf,
dev->hbm_f_ev_supported);
pos += scnprintf(buf + pos, bufsz - pos, "\tFA: %01d\n",
dev->hbm_f_fa_supported);
+ pos += scnprintf(buf + pos, bufsz - pos, "\tOS: %01d\n",
+ dev->hbm_f_os_supported);
}
pos += scnprintf(buf + pos, bufsz - pos, "pg: %s, %s\n",
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index dd7f15a..25b4a1b 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -989,6 +989,10 @@ static void mei_hbm_config_features(struct mei_device *dev)
/* Fixed Address Client Support */
if (dev->version.major_version >= HBM_MAJOR_VERSION_FA)
dev->hbm_f_fa_supported = 1;
+
+ /* OS ver message Support */
+ if (dev->version.major_version >= HBM_MAJOR_VERSION_OS)
+ dev->hbm_f_os_supported = 1;
}
/**
diff --git a/drivers/misc/mei/hw.h b/drivers/misc/mei/hw.h
index 9daf3f9..e1e4d47 100644
--- a/drivers/misc/mei/hw.h
+++ b/drivers/misc/mei/hw.h
@@ -76,6 +76,12 @@
#define HBM_MINOR_VERSION_FA 0
#define HBM_MAJOR_VERSION_FA 2
+/*
+ * MEI version with OS ver message support
+ */
+#define HBM_MINOR_VERSION_OS 0
+#define HBM_MAJOR_VERSION_OS 2
+
/* Host bus message command opcode */
#define MEI_HBM_CMD_OP_MSK 0x7f
/* Host bus message command RESPONSE */
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 699693c..8dadb98 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -406,6 +406,7 @@ const char *mei_pg_state_str(enum mei_pg_state state);
* @hbm_f_ev_supported : hbm feature event notification
* @hbm_f_fa_supported : hbm feature fixed address client
* @hbm_f_ie_supported : hbm feature immediate reply to enum request
+ * @hbm_f_os_supported : hbm feature support OS ver message
*
* @me_clients_rwsem: rw lock over me_clients list
* @me_clients : list of FW clients
@@ -487,6 +488,7 @@ struct mei_device {
unsigned int hbm_f_ev_supported:1;
unsigned int hbm_f_fa_supported:1;
unsigned int hbm_f_ie_supported:1;
+ unsigned int hbm_f_os_supported:1;
struct rw_semaphore me_clients_rwsem;
struct list_head me_clients;
diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
index b11c345..e6ea850 100644
--- a/drivers/mmc/core/mmc_ops.c
+++ b/drivers/mmc/core/mmc_ops.c
@@ -506,9 +506,6 @@ static int mmc_poll_for_busy(struct mmc_card *card, unsigned int timeout_ms,
}
} while (busy);
- if (host->ops->card_busy && send_status)
- return mmc_switch_status(card);
-
return 0;
}
@@ -577,24 +574,26 @@ int __mmc_switch(struct mmc_card *card, u8 set, u8 index, u8 value,
if (!use_busy_signal)
goto out;
- /* Switch to new timing before poll and check switch status. */
- if (timing)
- mmc_set_timing(host, timing);
-
/*If SPI or used HW busy detection above, then we don't need to poll. */
if (((host->caps & MMC_CAP_WAIT_WHILE_BUSY) && use_r1b_resp) ||
- mmc_host_is_spi(host)) {
- if (send_status)
- err = mmc_switch_status(card);
+ mmc_host_is_spi(host))
goto out_tim;
- }
/* Let's try to poll to find out when the command is completed. */
err = mmc_poll_for_busy(card, timeout_ms, send_status, retry_crc_err);
+ if (err)
+ goto out;
out_tim:
- if (err && timing)
- mmc_set_timing(host, old_timing);
+ /* Switch to new timing before check switch status. */
+ if (timing)
+ mmc_set_timing(host, timing);
+
+ if (send_status) {
+ err = mmc_switch_status(card);
+ if (err && timing)
+ mmc_set_timing(host, old_timing);
+ }
out:
mmc_retune_release(host);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index b44306b..73db085 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -3354,10 +3354,11 @@ int dw_mci_runtime_resume(struct device *dev)
if (!slot)
continue;
- if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER) {
+ if (slot->mmc->pm_flags & MMC_PM_KEEP_POWER)
dw_mci_set_ios(slot->mmc, &slot->mmc->ios);
- dw_mci_setup_bus(slot, true);
- }
+
+ /* Force setup bus to guarantee available clock output */
+ dw_mci_setup_bus(slot, true);
}
/* Now that slots are all setup, we can enable card detect */
diff --git a/drivers/mmc/host/meson-gx-mmc.c b/drivers/mmc/host/meson-gx-mmc.c
index b352760..0973935 100644
--- a/drivers/mmc/host/meson-gx-mmc.c
+++ b/drivers/mmc/host/meson-gx-mmc.c
@@ -578,13 +578,15 @@ static irqreturn_t meson_mmc_irq(int irq, void *dev_id)
{
struct meson_host *host = dev_id;
struct mmc_request *mrq;
- struct mmc_command *cmd = host->cmd;
+ struct mmc_command *cmd;
u32 irq_en, status, raw_status;
irqreturn_t ret = IRQ_HANDLED;
if (WARN_ON(!host))
return IRQ_NONE;
+ cmd = host->cmd;
+
mrq = host->mrq;
if (WARN_ON(!mrq))
@@ -670,10 +672,10 @@ static irqreturn_t meson_mmc_irq_thread(int irq, void *dev_id)
int ret = IRQ_HANDLED;
if (WARN_ON(!mrq))
- ret = IRQ_NONE;
+ return IRQ_NONE;
if (WARN_ON(!cmd))
- ret = IRQ_NONE;
+ return IRQ_NONE;
data = cmd->data;
if (data) {
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 44ecebd..c8b8ac6 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -309,6 +309,9 @@ static void mxs_mmc_ac(struct mxs_mmc_host *host)
cmd0 = BF_SSP(cmd->opcode, CMD0_CMD);
cmd1 = cmd->arg;
+ if (cmd->opcode == MMC_STOP_TRANSMISSION)
+ cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
+
if (host->sdio_irq_en) {
ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
cmd0 |= BM_SSP_CMD0_CONT_CLKING_EN | BM_SSP_CMD0_SLOW_CLKING_EN;
@@ -417,8 +420,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
ssp->base + HW_SSP_BLOCK_SIZE);
}
- if ((cmd->opcode == MMC_STOP_TRANSMISSION) ||
- (cmd->opcode == SD_IO_RW_EXTENDED))
+ if (cmd->opcode == SD_IO_RW_EXTENDED)
cmd0 |= BM_SSP_CMD0_APPEND_8CYC;
cmd1 = cmd->arg;
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index 160f695..278a5a4 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -395,7 +395,8 @@ static int sdhci_acpi_probe(struct platform_device *pdev)
/* Power on the SDHCI controller and its children */
acpi_device_fix_up_power(device);
list_for_each_entry(child, &device->children, node)
- acpi_device_fix_up_power(child);
+ if (child->status.present && child->status.enabled)
+ acpi_device_fix_up_power(child);
if (acpi_bus_get_status(device) || !device->status.present)
return -ENODEV;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 353a9dd..9ce5dcb 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -426,6 +426,7 @@ config MTD_NAND_ORION
config MTD_NAND_OXNAS
tristate "NAND Flash support for Oxford Semiconductor SoC"
+ depends on HAS_IOMEM
help
This enables the NAND flash controller on Oxford Semiconductor SoCs.
@@ -540,7 +541,7 @@ config MTD_NAND_FSMC
Flexible Static Memory Controller (FSMC)
config MTD_NAND_XWAY
- tristate "Support for NAND on Lantiq XWAY SoC"
+ bool "Support for NAND on Lantiq XWAY SoC"
depends on LANTIQ && SOC_TYPE_XWAY
help
Enables support for NAND Flash chips on Lantiq XWAY SoCs. NAND is attached
diff --git a/drivers/mtd/nand/lpc32xx_mlc.c b/drivers/mtd/nand/lpc32xx_mlc.c
index 5553a5d..846a66c 100644
--- a/drivers/mtd/nand/lpc32xx_mlc.c
+++ b/drivers/mtd/nand/lpc32xx_mlc.c
@@ -775,7 +775,7 @@ static int lpc32xx_nand_probe(struct platform_device *pdev)
init_completion(&host->comp_controller);
host->irq = platform_get_irq(pdev, 0);
- if ((host->irq < 0) || (host->irq >= NR_IRQS)) {
+ if (host->irq < 0) {
dev_err(&pdev->dev, "failed to get platform irq\n");
res = -EINVAL;
goto err_exit3;
diff --git a/drivers/mtd/nand/tango_nand.c b/drivers/mtd/nand/tango_nand.c
index 28c7f47..4a5e948 100644
--- a/drivers/mtd/nand/tango_nand.c
+++ b/drivers/mtd/nand/tango_nand.c
@@ -632,11 +632,13 @@ static int tango_nand_probe(struct platform_device *pdev)
if (IS_ERR(nfc->pbus_base))
return PTR_ERR(nfc->pbus_base);
+ writel_relaxed(MODE_RAW, nfc->pbus_base + PBUS_PAD_MODE);
+
clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(clk))
return PTR_ERR(clk);
- nfc->chan = dma_request_chan(&pdev->dev, "nfc_sbox");
+ nfc->chan = dma_request_chan(&pdev->dev, "rxtx");
if (IS_ERR(nfc->chan))
return PTR_ERR(nfc->chan);
diff --git a/drivers/mtd/nand/xway_nand.c b/drivers/mtd/nand/xway_nand.c
index 1f2948c..895101a 100644
--- a/drivers/mtd/nand/xway_nand.c
+++ b/drivers/mtd/nand/xway_nand.c
@@ -232,7 +232,6 @@ static const struct of_device_id xway_nand_match[] = {
{ .compatible = "lantiq,nand-xway" },
{},
};
-MODULE_DEVICE_TABLE(of, xway_nand_match);
static struct platform_driver xway_nand_driver = {
.probe = xway_nand_probe,
@@ -243,6 +242,4 @@ static struct platform_driver xway_nand_driver = {
},
};
-module_platform_driver(xway_nand_driver);
-
-MODULE_LICENSE("GPL");
+builtin_platform_driver(xway_nand_driver);
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index b8c2933..a306de4 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -190,7 +190,7 @@ static netdev_tx_t ipddp_xmit(struct sk_buff *skb, struct net_device *dev)
*/
static int ipddp_create(struct ipddp_route *new_rt)
{
- struct ipddp_route *rt = kmalloc(sizeof(*rt), GFP_KERNEL);
+ struct ipddp_route *rt = kzalloc(sizeof(*rt), GFP_KERNEL);
if (rt == NULL)
return -ENOMEM;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index 8f5e93c..0e0df0b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -813,7 +813,7 @@ static int at91_poll(struct napi_struct *napi, int quota)
u32 reg_ier = AT91_IRQ_ERR_FRAME;
reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
at91_write(priv, AT91_IER, reg_ier);
}
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index e3dccd3..606b7d8 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -1070,7 +1070,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
end:
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* enable all IRQs if we are not in bus off state */
if (priv->can.state != CAN_STATE_BUS_OFF)
c_can_irq_control(priv, true);
diff --git a/drivers/net/can/c_can/c_can_pci.c b/drivers/net/can/c_can/c_can_pci.c
index 7be393c..cf7c189 100644
--- a/drivers/net/can/c_can/c_can_pci.c
+++ b/drivers/net/can/c_can/c_can_pci.c
@@ -161,6 +161,7 @@ static int c_can_pci_probe(struct pci_dev *pdev,
dev->irq = pdev->irq;
priv->base = addr;
+ priv->device = &pdev->dev;
if (!c_can_pci_data->freq) {
dev_err(&pdev->dev, "no clock frequency defined\n");
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 8d6208c..611d16a 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -279,25 +279,45 @@ static int can_fixup_bittiming(struct net_device *dev, struct can_bittiming *bt,
return 0;
}
+/* Checks the validity of predefined bitrate settings */
+static int can_validate_bitrate(struct net_device *dev, struct can_bittiming *bt,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
+{
+ struct can_priv *priv = netdev_priv(dev);
+ unsigned int i;
+
+ for (i = 0; i < bitrate_const_cnt; i++) {
+ if (bt->bitrate == bitrate_const[i])
+ break;
+ }
+
+ if (i >= priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ return 0;
+}
+
static int can_get_bittiming(struct net_device *dev, struct can_bittiming *bt,
- const struct can_bittiming_const *btc)
+ const struct can_bittiming_const *btc,
+ const u32 *bitrate_const,
+ const unsigned int bitrate_const_cnt)
{
int err;
- /* Check if the CAN device has bit-timing parameters */
- if (!btc)
- return -EOPNOTSUPP;
-
/*
* Depending on the given can_bittiming parameter structure the CAN
* timing parameters are calculated based on the provided bitrate OR
* alternatively the CAN timing parameters (tq, prop_seg, etc.) are
* provided directly which are then checked and fixed up.
*/
- if (!bt->tq && bt->bitrate)
+ if (!bt->tq && bt->bitrate && btc)
err = can_calc_bittiming(dev, bt, btc);
- else if (bt->tq && !bt->bitrate)
+ else if (bt->tq && !bt->bitrate && btc)
err = can_fixup_bittiming(dev, bt, btc);
+ else if (!bt->tq && bt->bitrate && bitrate_const)
+ err = can_validate_bitrate(dev, bt, bitrate_const,
+ bitrate_const_cnt);
else
err = -EINVAL;
@@ -872,8 +892,20 @@ static int can_changelink(struct net_device *dev,
/* Do not allow changing bittiming while running */
if (dev->flags & IFF_UP)
return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->bittiming_const && !priv->do_set_bittiming)
+ return -EOPNOTSUPP;
+
memcpy(&bt, nla_data(data[IFLA_CAN_BITTIMING]), sizeof(bt));
- err = can_get_bittiming(dev, &bt, priv->bittiming_const);
+ err = can_get_bittiming(dev, &bt,
+ priv->bittiming_const,
+ priv->bitrate_const,
+ priv->bitrate_const_cnt);
if (err)
return err;
memcpy(&priv->bittiming, &bt, sizeof(bt));
@@ -943,9 +975,21 @@ static int can_changelink(struct net_device *dev,
/* Do not allow changing bittiming while running */
if (dev->flags & IFF_UP)
return -EBUSY;
+
+ /* Calculate bittiming parameters based on
+ * data_bittiming_const if set, otherwise pass bitrate
+ * directly via do_set_bitrate(). Bail out if neither
+ * is given.
+ */
+ if (!priv->data_bittiming_const && !priv->do_set_data_bittiming)
+ return -EOPNOTSUPP;
+
memcpy(&dbt, nla_data(data[IFLA_CAN_DATA_BITTIMING]),
sizeof(dbt));
- err = can_get_bittiming(dev, &dbt, priv->data_bittiming_const);
+ err = can_get_bittiming(dev, &dbt,
+ priv->data_bittiming_const,
+ priv->data_bitrate_const,
+ priv->data_bitrate_const_cnt);
if (err)
return err;
memcpy(&priv->data_bittiming, &dbt, sizeof(dbt));
@@ -958,6 +1002,30 @@ static int can_changelink(struct net_device *dev,
}
}
+ if (data[IFLA_CAN_TERMINATION]) {
+ const u16 termval = nla_get_u16(data[IFLA_CAN_TERMINATION]);
+ const unsigned int num_term = priv->termination_const_cnt;
+ unsigned int i;
+
+ if (!priv->do_set_termination)
+ return -EOPNOTSUPP;
+
+ /* check whether given value is supported by the interface */
+ for (i = 0; i < num_term; i++) {
+ if (termval == priv->termination_const[i])
+ break;
+ }
+ if (i >= num_term)
+ return -EINVAL;
+
+ /* Finally, set the termination value */
+ err = priv->do_set_termination(dev, termval);
+ if (err)
+ return err;
+
+ priv->termination = termval;
+ }
+
return 0;
}
@@ -980,6 +1048,17 @@ static size_t can_get_size(const struct net_device *dev)
size += nla_total_size(sizeof(struct can_bittiming));
if (priv->data_bittiming_const) /* IFLA_CAN_DATA_BITTIMING_CONST */
size += nla_total_size(sizeof(struct can_bittiming_const));
+ if (priv->termination_const) {
+ size += nla_total_size(sizeof(priv->termination)); /* IFLA_CAN_TERMINATION */
+ size += nla_total_size(sizeof(*priv->termination_const) * /* IFLA_CAN_TERMINATION_CONST */
+ priv->termination_const_cnt);
+ }
+ if (priv->bitrate_const) /* IFLA_CAN_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt);
+ if (priv->data_bitrate_const) /* IFLA_CAN_DATA_BITRATE_CONST */
+ size += nla_total_size(sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt);
return size;
}
@@ -1018,7 +1097,28 @@ static int can_fill_info(struct sk_buff *skb, const struct net_device *dev)
(priv->data_bittiming_const &&
nla_put(skb, IFLA_CAN_DATA_BITTIMING_CONST,
sizeof(*priv->data_bittiming_const),
- priv->data_bittiming_const)))
+ priv->data_bittiming_const)) ||
+
+ (priv->termination_const &&
+ (nla_put_u16(skb, IFLA_CAN_TERMINATION, priv->termination) ||
+ nla_put(skb, IFLA_CAN_TERMINATION_CONST,
+ sizeof(*priv->termination_const) *
+ priv->termination_const_cnt,
+ priv->termination_const))) ||
+
+ (priv->bitrate_const &&
+ nla_put(skb, IFLA_CAN_BITRATE_CONST,
+ sizeof(*priv->bitrate_const) *
+ priv->bitrate_const_cnt,
+ priv->bitrate_const)) ||
+
+ (priv->data_bitrate_const &&
+ nla_put(skb, IFLA_CAN_DATA_BITRATE_CONST,
+ sizeof(*priv->data_bitrate_const) *
+ priv->data_bitrate_const_cnt,
+ priv->data_bitrate_const))
+ )
+
return -EMSGSIZE;
return 0;
@@ -1073,6 +1173,22 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
*/
int register_candev(struct net_device *dev)
{
+ struct can_priv *priv = netdev_priv(dev);
+
+ /* Ensure termination_const, termination_const_cnt and
+ * do_set_termination consistency. All must be either set or
+ * unset.
+ */
+ if ((!priv->termination_const != !priv->termination_const_cnt) ||
+ (!priv->termination_const != !priv->do_set_termination))
+ return -EINVAL;
+
+ if (!priv->bitrate_const != !priv->bitrate_const_cnt)
+ return -EINVAL;
+
+ if (!priv->data_bitrate_const != !priv->data_bitrate_const_cnt)
+ return -EINVAL;
+
dev->rtnl_link_ops = &can_link_ops;
return register_netdev(dev);
}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 16f7cad..43cfce8 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -703,7 +703,7 @@ static int flexcan_poll(struct napi_struct *napi, int quota)
work_done += flexcan_poll_bus_err(dev, reg_esr);
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* enable IRQs */
flexcan_write(FLEXCAN_IFLAG_DEFAULT, &regs->imask1);
flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
diff --git a/drivers/net/can/ifi_canfd/ifi_canfd.c b/drivers/net/can/ifi_canfd/ifi_canfd.c
index 368bb07..138f5ae 100644
--- a/drivers/net/can/ifi_canfd/ifi_canfd.c
+++ b/drivers/net/can/ifi_canfd/ifi_canfd.c
@@ -578,7 +578,7 @@ static int ifi_canfd_poll(struct napi_struct *napi, int quota)
work_done += ifi_canfd_do_rx_poll(ndev, quota - work_done);
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ifi_canfd_irq_enable(ndev, 1);
}
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index f13bb8d..2ba1a81 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1475,7 +1475,7 @@ static int ican3_napi(struct napi_struct *napi, int budget)
/* We have processed all packets that the adapter had, but it
* was less than our budget, stop polling */
if (received < budget)
- napi_complete(napi);
+ napi_complete_done(napi, received);
spin_lock_irqsave(&mod->lock, flags);
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 195f15e..7a6554e 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -730,7 +730,7 @@ static int m_can_poll(struct napi_struct *napi, int quota)
work_done += m_can_do_rx_poll(dev, (quota - work_done));
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
m_can_enable_all_interrupts(priv);
}
diff --git a/drivers/net/can/rcar/rcar_can.c b/drivers/net/can/rcar/rcar_can.c
index 788459f..caed4e6 100644
--- a/drivers/net/can/rcar/rcar_can.c
+++ b/drivers/net/can/rcar/rcar_can.c
@@ -695,7 +695,7 @@ static int rcar_can_rx_poll(struct napi_struct *napi, int quota)
}
/* All packets processed */
if (num_pkts < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, num_pkts);
priv->ier |= RCAR_CAN_IER_RXFIE;
writeb(priv->ier, &priv->regs->ier);
}
diff --git a/drivers/net/can/rcar/rcar_canfd.c b/drivers/net/can/rcar/rcar_canfd.c
index 43cdd55..4ef07d9 100644
--- a/drivers/net/can/rcar/rcar_canfd.c
+++ b/drivers/net/can/rcar/rcar_canfd.c
@@ -1512,7 +1512,7 @@ static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota)
/* All packets processed */
if (num_pkts < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, num_pkts);
/* Enable Rx FIFO interrupts */
rcar_canfd_set_bit(priv->base, RCANFD_RFCC(ridx),
RCANFD_RFCC_RFIE);
diff --git a/drivers/net/can/softing/softing_cs.c b/drivers/net/can/softing/softing_cs.c
index cdc0c74..4d44928 100644
--- a/drivers/net/can/softing/softing_cs.c
+++ b/drivers/net/can/softing/softing_cs.c
@@ -310,7 +310,7 @@ pcmcia_bad:
pcmcia_failed:
pcmcia_disable_device(pcmcia);
pcmcia->priv = NULL;
- return ret ?: -ENODEV;
+ return ret;
}
static const struct pcmcia_device_id softingcs_ids[] = {
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index 680d1ff..6749b18 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -948,7 +948,12 @@ static int ti_hecc_probe(struct platform_device *pdev)
netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
HECC_DEF_NAPI_WEIGHT);
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err) {
+ dev_err(&pdev->dev, "clk_prepare_enable() failed\n");
+ goto probe_exit_clk;
+ }
+
err = register_candev(ndev);
if (err) {
dev_err(&pdev->dev, "register_candev() failed\n");
@@ -981,7 +986,7 @@ static int ti_hecc_remove(struct platform_device *pdev)
struct ti_hecc_priv *priv = netdev_priv(ndev);
unregister_candev(ndev);
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
clk_put(priv->clk);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iounmap(priv->base);
@@ -1006,7 +1011,7 @@ static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state)
hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_SLEEPING;
- clk_disable(priv->clk);
+ clk_disable_unprepare(priv->clk);
return 0;
}
@@ -1015,8 +1020,11 @@ static int ti_hecc_resume(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
struct ti_hecc_priv *priv = netdev_priv(dev);
+ int err;
- clk_enable(priv->clk);
+ err = clk_prepare_enable(priv->clk);
+ if (err)
+ return err;
hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR);
priv->can.state = CAN_STATE_ERROR_ACTIVE;
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index c71a035..89aec07 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -726,7 +726,7 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
can_led_event(ndev, CAN_LED_EVENT_RX);
if (work_done < quota) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK);
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
diff --git a/drivers/net/dsa/Makefile b/drivers/net/dsa/Makefile
index 8346e4f..a3c9416 100644
--- a/drivers/net/dsa/Makefile
+++ b/drivers/net/dsa/Makefile
@@ -1,5 +1,6 @@
obj-$(CONFIG_NET_DSA_MV88E6060) += mv88e6060.o
-obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm_sf2.o
+obj-$(CONFIG_NET_DSA_BCM_SF2) += bcm-sf2.o
+bcm-sf2-objs := bcm_sf2.o bcm_sf2_cfp.o
obj-$(CONFIG_NET_DSA_QCA8K) += qca8k.o
obj-y += b53/
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 5102a37..8cf4801 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1308,7 +1308,7 @@ int b53_fdb_dump(struct dsa_switch *ds, int port,
}
EXPORT_SYMBOL(b53_fdb_dump);
-int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge)
+int b53_br_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
s8 cpu_port = ds->dst->cpu_port;
@@ -1326,11 +1326,10 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge)
b53_write16(dev, B53_VLAN_PAGE, B53_JOIN_ALL_VLAN_EN, reg);
}
- dev->ports[port].bridge_dev = bridge;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), &pvlan);
b53_for_each_port(dev, i) {
- if (dev->ports[i].bridge_dev != bridge)
+ if (ds->ports[i].bridge_dev != br)
continue;
/* Add this local port to the remote port VLAN control
@@ -1354,10 +1353,9 @@ int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge)
}
EXPORT_SYMBOL(b53_br_join);
-void b53_br_leave(struct dsa_switch *ds, int port)
+void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct b53_device *dev = ds->priv;
- struct net_device *bridge = dev->ports[port].bridge_dev;
struct b53_vlan *vl = &dev->vlans[0];
s8 cpu_port = ds->dst->cpu_port;
unsigned int i;
@@ -1367,7 +1365,7 @@ void b53_br_leave(struct dsa_switch *ds, int port)
b53_for_each_port(dev, i) {
/* Don't touch the remaining ports */
- if (dev->ports[i].bridge_dev != bridge)
+ if (ds->ports[i].bridge_dev != br)
continue;
b53_read16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(i), &reg);
@@ -1382,7 +1380,6 @@ void b53_br_leave(struct dsa_switch *ds, int port)
b53_write16(dev, B53_PVLAN_PAGE, B53_PVLAN_PORT_MASK(port), pvlan);
dev->ports[port].vlan_ctl_mask = pvlan;
- dev->ports[port].bridge_dev = NULL;
if (is5325(dev) || is5365(dev))
pvid = 1;
@@ -1453,6 +1450,71 @@ static enum dsa_tag_protocol b53_get_tag_protocol(struct dsa_switch *ds)
return DSA_TAG_PROTO_NONE;
}
+int b53_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress)
+{
+ struct b53_device *dev = ds->priv;
+ u16 reg, loc;
+
+ if (ingress)
+ loc = B53_IG_MIR_CTL;
+ else
+ loc = B53_EG_MIR_CTL;
+
+ b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+ reg &= ~MIRROR_MASK;
+ reg |= BIT(port);
+ b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+ b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+ reg &= ~CAP_PORT_MASK;
+ reg |= mirror->to_local_port;
+ reg |= MIRROR_EN;
+ b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+
+ return 0;
+}
+EXPORT_SYMBOL(b53_mirror_add);
+
+void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror)
+{
+ struct b53_device *dev = ds->priv;
+ bool loc_disable = false, other_loc_disable = false;
+ u16 reg, loc;
+
+ if (mirror->ingress)
+ loc = B53_IG_MIR_CTL;
+ else
+ loc = B53_EG_MIR_CTL;
+
+ /* Update the desired ingress/egress register */
+ b53_read16(dev, B53_MGMT_PAGE, loc, &reg);
+ reg &= ~BIT(port);
+ if (!(reg & MIRROR_MASK))
+ loc_disable = true;
+ b53_write16(dev, B53_MGMT_PAGE, loc, reg);
+
+ /* Now look at the other one to know if we can disable mirroring
+ * entirely
+ */
+ if (mirror->ingress)
+ b53_read16(dev, B53_MGMT_PAGE, B53_EG_MIR_CTL, &reg);
+ else
+ b53_read16(dev, B53_MGMT_PAGE, B53_IG_MIR_CTL, &reg);
+ if (!(reg & MIRROR_MASK))
+ other_loc_disable = true;
+
+ b53_read16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, &reg);
+ /* Both no longer have ports, let's disable mirroring */
+ if (loc_disable && other_loc_disable) {
+ reg &= ~MIRROR_EN;
+ reg &= ~mirror->to_local_port;
+ }
+ b53_write16(dev, B53_MGMT_PAGE, B53_MIR_CAP_CTL, reg);
+}
+EXPORT_SYMBOL(b53_mirror_del);
+
static const struct dsa_switch_ops b53_switch_ops = {
.get_tag_protocol = b53_get_tag_protocol,
.setup = b53_setup,
@@ -1477,6 +1539,8 @@ static const struct dsa_switch_ops b53_switch_ops = {
.port_fdb_dump = b53_fdb_dump,
.port_fdb_add = b53_fdb_add,
.port_fdb_del = b53_fdb_del,
+ .port_mirror_add = b53_mirror_add,
+ .port_mirror_del = b53_mirror_del,
};
struct b53_chip_data {
@@ -1685,6 +1749,18 @@ static const struct b53_chip_data b53_switch_chips[] = {
.jumbo_pm_reg = B53_JUMBO_PORT_MASK,
.jumbo_size_reg = B53_JUMBO_MAX_SIZE,
},
+ {
+ .chip_id = BCM7278_DEVICE_ID,
+ .dev_name = "BCM7278",
+ .vlans = 4096,
+ .enabled_ports = 0x1ff,
+ .arl_entries= 4,
+ .cpu_port = B53_CPU_PORT,
+ .vta_regs = B53_VTA_REGS,
+ .duplex_reg = B53_DUPLEX_STAT_GE,
+ .jumbo_pm_reg = B53_JUMBO_PORT_MASK,
+ .jumbo_size_reg = B53_JUMBO_MAX_SIZE,
+ },
};
static int b53_switch_init(struct b53_device *dev)
@@ -1778,14 +1854,15 @@ struct b53_device *b53_switch_alloc(struct device *base,
struct dsa_switch *ds;
struct b53_device *dev;
- ds = devm_kzalloc(base, sizeof(*ds) + sizeof(*dev), GFP_KERNEL);
+ ds = dsa_switch_alloc(base, DSA_MAX_PORTS);
if (!ds)
return NULL;
- dev = (struct b53_device *)(ds + 1);
+ dev = devm_kzalloc(base, sizeof(*dev), GFP_KERNEL);
+ if (!dev)
+ return NULL;
ds->priv = dev;
- ds->dev = base;
dev->dev = base;
dev->ds = ds;
@@ -1882,7 +1959,7 @@ int b53_switch_register(struct b53_device *dev)
pr_info("found switch: %s, rev %i\n", dev->name, dev->core_rev);
- return dsa_register_switch(dev->ds, dev->ds->dev->of_node);
+ return dsa_register_switch(dev->ds, dev->ds->dev);
}
EXPORT_SYMBOL(b53_switch_register);
diff --git a/drivers/net/dsa/b53/b53_mdio.c b/drivers/net/dsa/b53/b53_mdio.c
index 477a16b..fa7556f 100644
--- a/drivers/net/dsa/b53/b53_mdio.c
+++ b/drivers/net/dsa/b53/b53_mdio.c
@@ -375,18 +375,7 @@ static struct mdio_driver b53_mdio_driver = {
.of_match_table = b53_of_match,
},
};
-
-static int __init b53_mdio_driver_register(void)
-{
- return mdio_driver_register(&b53_mdio_driver);
-}
-module_init(b53_mdio_driver_register);
-
-static void __exit b53_mdio_driver_unregister(void)
-{
- mdio_driver_unregister(&b53_mdio_driver);
-}
-module_exit(b53_mdio_driver_unregister);
+mdio_module_driver(b53_mdio_driver);
MODULE_DESCRIPTION("B53 MDIO access driver");
MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/dsa/b53/b53_priv.h b/drivers/net/dsa/b53/b53_priv.h
index 86f125d..a9dc90a 100644
--- a/drivers/net/dsa/b53/b53_priv.h
+++ b/drivers/net/dsa/b53/b53_priv.h
@@ -62,6 +62,7 @@ enum {
BCM53019_DEVICE_ID = 0x53019,
BCM58XX_DEVICE_ID = 0x5800,
BCM7445_DEVICE_ID = 0x7445,
+ BCM7278_DEVICE_ID = 0x7278,
};
#define B53_N_PORTS 9
@@ -69,7 +70,6 @@ enum {
struct b53_port {
u16 vlan_ctl_mask;
- struct net_device *bridge_dev;
};
struct b53_vlan {
@@ -179,7 +179,8 @@ static inline int is5301x(struct b53_device *dev)
static inline int is58xx(struct b53_device *dev)
{
return dev->chip_id == BCM58XX_DEVICE_ID ||
- dev->chip_id == BCM7445_DEVICE_ID;
+ dev->chip_id == BCM7445_DEVICE_ID ||
+ dev->chip_id == BCM7278_DEVICE_ID;
}
#define B53_CPU_PORT_25 5
@@ -380,7 +381,7 @@ void b53_get_strings(struct dsa_switch *ds, int port, uint8_t *data);
void b53_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data);
int b53_get_sset_count(struct dsa_switch *ds);
int b53_br_join(struct dsa_switch *ds, int port, struct net_device *bridge);
-void b53_br_leave(struct dsa_switch *ds, int port);
+void b53_br_leave(struct dsa_switch *ds, int port, struct net_device *bridge);
void b53_br_set_stp_state(struct dsa_switch *ds, int port, u8 state);
void b53_br_fast_age(struct dsa_switch *ds, int port);
int b53_vlan_filtering(struct dsa_switch *ds, int port, bool vlan_filtering);
@@ -406,5 +407,9 @@ int b53_fdb_del(struct dsa_switch *ds, int port,
int b53_fdb_dump(struct dsa_switch *ds, int port,
struct switchdev_obj_port_fdb *fdb,
int (*cb)(struct switchdev_obj *obj));
+int b53_mirror_add(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror, bool ingress);
+void b53_mirror_del(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
#endif
diff --git a/drivers/net/dsa/b53/b53_regs.h b/drivers/net/dsa/b53/b53_regs.h
index dac0af4..9fd24c4 100644
--- a/drivers/net/dsa/b53/b53_regs.h
+++ b/drivers/net/dsa/b53/b53_regs.h
@@ -206,6 +206,38 @@
#define BRCM_HDR_P8_EN BIT(0) /* Enable tagging on port 8 */
#define BRCM_HDR_P5_EN BIT(1) /* Enable tagging on port 5 */
+/* Mirror capture control register (16 bit) */
+#define B53_MIR_CAP_CTL 0x10
+#define CAP_PORT_MASK 0xf
+#define BLK_NOT_MIR BIT(14)
+#define MIRROR_EN BIT(15)
+
+/* Ingress mirror control register (16 bit) */
+#define B53_IG_MIR_CTL 0x12
+#define MIRROR_MASK 0x1ff
+#define DIV_EN BIT(13)
+#define MIRROR_FILTER_MASK 0x3
+#define MIRROR_FILTER_SHIFT 14
+#define MIRROR_ALL 0
+#define MIRROR_DA 1
+#define MIRROR_SA 2
+
+/* Ingress mirror divider register (16 bit) */
+#define B53_IG_MIR_DIV 0x14
+#define IN_MIRROR_DIV_MASK 0x3ff
+
+/* Ingress mirror MAC address register (48 bit) */
+#define B53_IG_MIR_MAC 0x16
+
+/* Egress mirror control register (16 bit) */
+#define B53_EG_MIR_CTL 0x1C
+
+/* Egress mirror divider register (16 bit) */
+#define B53_EG_MIR_DIV 0x1E
+
+/* Egress mirror MAC address register (48 bit) */
+#define B53_EG_MIR_MAC 0x20
+
/* Device ID register (8 or 32 bit) */
#define B53_DEVICE_ID 0x30
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 31d0170..2be9632 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -61,30 +61,10 @@ static void bcm_sf2_imp_vlan_setup(struct dsa_switch *ds, int cpu_port)
}
}
-static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+static void bcm_sf2_brcm_hdr_setup(struct bcm_sf2_priv *priv, int port)
{
- struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
u32 reg, val;
- /* Enable the port memories */
- reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
- reg &= ~P_TXQ_PSM_VDD(port);
- core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
-
- /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
- reg = core_readl(priv, CORE_IMP_CTL);
- reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
- reg &= ~(RX_DIS | TX_DIS);
- core_writel(priv, reg, CORE_IMP_CTL);
-
- /* Enable forwarding */
- core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
-
- /* Enable IMP port in dumb mode */
- reg = core_readl(priv, CORE_SWITCH_CTRL);
- reg |= MII_DUMB_FWDG_EN;
- core_writel(priv, reg, CORE_SWITCH_CTRL);
-
/* Resolve which bit controls the Broadcom tag */
switch (port) {
case 8:
@@ -119,11 +99,43 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
reg = core_readl(priv, CORE_BRCM_HDR_TX_DIS);
reg &= ~(1 << port);
core_writel(priv, reg, CORE_BRCM_HDR_TX_DIS);
+}
+
+static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ u32 reg, offset;
+
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_IMP;
+ else
+ offset = CORE_STS_OVERRIDE_IMP2;
+
+ /* Enable the port memories */
+ reg = core_readl(priv, CORE_MEM_PSM_VDD_CTRL);
+ reg &= ~P_TXQ_PSM_VDD(port);
+ core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+
+ /* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
+ reg = core_readl(priv, CORE_IMP_CTL);
+ reg |= (RX_BCST_EN | RX_MCST_EN | RX_UCST_EN);
+ reg &= ~(RX_DIS | TX_DIS);
+ core_writel(priv, reg, CORE_IMP_CTL);
+
+ /* Enable forwarding */
+ core_writel(priv, SW_FWDG_EN, CORE_SWMODE);
+
+ /* Enable IMP port in dumb mode */
+ reg = core_readl(priv, CORE_SWITCH_CTRL);
+ reg |= MII_DUMB_FWDG_EN;
+ core_writel(priv, reg, CORE_SWITCH_CTRL);
+
+ bcm_sf2_brcm_hdr_setup(priv, port);
/* Force link status for IMP port */
- reg = core_readl(priv, CORE_STS_OVERRIDE_IMP);
+ reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS);
- core_writel(priv, reg, CORE_STS_OVERRIDE_IMP);
+ core_writel(priv, reg, offset);
}
static void bcm_sf2_eee_enable_set(struct dsa_switch *ds, int port, bool enable)
@@ -217,6 +229,7 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
s8 cpu_port = ds->dst[ds->index].cpu_port;
+ unsigned int i;
u32 reg;
/* Clear the memory power down */
@@ -224,6 +237,18 @@ static int bcm_sf2_port_setup(struct dsa_switch *ds, int port,
reg &= ~P_TXQ_PSM_VDD(port);
core_writel(priv, reg, CORE_MEM_PSM_VDD_CTRL);
+ /* Enable Broadcom tags for that port if requested */
+ if (priv->brcm_tag_mask & BIT(port))
+ bcm_sf2_brcm_hdr_setup(priv, port);
+
+ /* Configure Traffic Class to QoS mapping, allow each priority to map
+ * to a different queue number
+ */
+ reg = core_readl(priv, CORE_PORT_TC2_QOS_MAP_PORT(port));
+ for (i = 0; i < 8; i++)
+ reg |= i << (PRT_TO_QID_SHIFT * i);
+ core_writel(priv, reg, CORE_PORT_TC2_QOS_MAP_PORT(port));
+
/* Clear the Rx and Tx disable bits and set to no spanning tree */
core_writel(priv, 0, CORE_G_PCTL_PORT(port));
@@ -503,6 +528,9 @@ static void bcm_sf2_identify_ports(struct bcm_sf2_priv *priv,
if (mode == PHY_INTERFACE_MODE_MOCA)
priv->moca_port = port_num;
+
+ if (of_property_read_bool(port, "brcm,use-bcm-hdr"))
+ priv->brcm_tag_mask |= 1 << port_num;
}
}
@@ -591,7 +619,12 @@ static void bcm_sf2_sw_adjust_link(struct dsa_switch *ds, int port,
struct ethtool_eee *p = &priv->port_sts[port].eee;
u32 id_mode_dis = 0, port_mode;
const char *str = NULL;
- u32 reg;
+ u32 reg, offset;
+
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ else
+ offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
switch (phydev->interface) {
case PHY_INTERFACE_MODE_RGMII:
@@ -662,7 +695,7 @@ force_link:
if (phydev->duplex == DUPLEX_FULL)
reg |= DUPLX_MODE;
- core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ core_writel(priv, reg, offset);
if (!phydev->is_pseudo_fixed_link)
p->eee_enabled = bcm_sf2_eee_init(ds, port, phydev);
@@ -672,9 +705,14 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
struct fixed_phy_status *status)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- u32 duplex, pause;
+ u32 duplex, pause, offset;
u32 reg;
+ if (priv->type == BCM7445_DEVICE_ID)
+ offset = CORE_STS_OVERRIDE_GMIIP_PORT(port);
+ else
+ offset = CORE_STS_OVERRIDE_GMIIP2_PORT(port);
+
duplex = core_readl(priv, CORE_DUPSTS);
pause = core_readl(priv, CORE_PAUSESTS);
@@ -703,13 +741,13 @@ static void bcm_sf2_sw_fixed_link_update(struct dsa_switch *ds, int port,
status->duplex = !!(duplex & (1 << port));
}
- reg = core_readl(priv, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ reg = core_readl(priv, offset);
reg |= SW_OVERRIDE;
if (status->link)
reg |= LINK_STS;
else
reg &= ~LINK_STS;
- core_writel(priv, reg, CORE_STS_OVERRIDE_GMIIP_PORT(port));
+ core_writel(priv, reg, offset);
if ((pause & (1 << port)) &&
(pause & (1 << (port + PAUSESTS_TX_PAUSE_SHIFT)))) {
@@ -1007,12 +1045,80 @@ static const struct dsa_switch_ops bcm_sf2_ops = {
.port_fdb_dump = b53_fdb_dump,
.port_fdb_add = b53_fdb_add,
.port_fdb_del = b53_fdb_del,
+ .get_rxnfc = bcm_sf2_get_rxnfc,
+ .set_rxnfc = bcm_sf2_set_rxnfc,
+ .port_mirror_add = b53_mirror_add,
+ .port_mirror_del = b53_mirror_del,
+};
+
+struct bcm_sf2_of_data {
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
+};
+
+/* Register offsets for the SWITCH_REG_* block */
+static const u16 bcm_sf2_7445_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0C,
+ [REG_SWITCH_REVISION] = 0x18,
+ [REG_PHY_REVISION] = 0x1C,
+ [REG_SPHY_CNTRL] = 0x2C,
+ [REG_RGMII_0_CNTRL] = 0x34,
+ [REG_RGMII_1_CNTRL] = 0x40,
+ [REG_RGMII_2_CNTRL] = 0x4c,
+ [REG_LED_0_CNTRL] = 0x90,
+ [REG_LED_1_CNTRL] = 0x94,
+ [REG_LED_2_CNTRL] = 0x98,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7445_data = {
+ .type = BCM7445_DEVICE_ID,
+ .core_reg_align = 0,
+ .reg_offsets = bcm_sf2_7445_reg_offsets,
+};
+
+static const u16 bcm_sf2_7278_reg_offsets[] = {
+ [REG_SWITCH_CNTRL] = 0x00,
+ [REG_SWITCH_STATUS] = 0x04,
+ [REG_DIR_DATA_WRITE] = 0x08,
+ [REG_DIR_DATA_READ] = 0x0c,
+ [REG_SWITCH_REVISION] = 0x10,
+ [REG_PHY_REVISION] = 0x14,
+ [REG_SPHY_CNTRL] = 0x24,
+ [REG_RGMII_0_CNTRL] = 0xe0,
+ [REG_RGMII_1_CNTRL] = 0xec,
+ [REG_RGMII_2_CNTRL] = 0xf8,
+ [REG_LED_0_CNTRL] = 0x40,
+ [REG_LED_1_CNTRL] = 0x4c,
+ [REG_LED_2_CNTRL] = 0x58,
+};
+
+static const struct bcm_sf2_of_data bcm_sf2_7278_data = {
+ .type = BCM7278_DEVICE_ID,
+ .core_reg_align = 1,
+ .reg_offsets = bcm_sf2_7278_reg_offsets,
};
+static const struct of_device_id bcm_sf2_of_match[] = {
+ { .compatible = "brcm,bcm7445-switch-v4.0",
+ .data = &bcm_sf2_7445_data
+ },
+ { .compatible = "brcm,bcm7278-switch-v4.0",
+ .data = &bcm_sf2_7278_data
+ },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
+
static int bcm_sf2_sw_probe(struct platform_device *pdev)
{
const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
struct device_node *dn = pdev->dev.of_node;
+ const struct of_device_id *of_id = NULL;
+ const struct bcm_sf2_of_data *data;
struct b53_platform_data *pdata;
struct dsa_switch_ops *ops;
struct bcm_sf2_priv *priv;
@@ -1040,11 +1146,22 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
if (!pdata)
return -ENOMEM;
+ of_id = of_match_node(bcm_sf2_of_match, dn);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ data = of_id->data;
+
+ /* Set SWITCH_REG register offsets and SWITCH_CORE align factor */
+ priv->type = data->type;
+ priv->reg_offsets = data->reg_offsets;
+ priv->core_reg_align = data->core_reg_align;
+
/* Auto-detection using standard registers will not work, so
* provide an indication of what kind of device we are for
* b53_common to work with
*/
- pdata->chip_id = BCM7445_DEVICE_ID;
+ pdata->chip_id = priv->type;
dev->pdata = pdata;
priv->dev = dev;
@@ -1055,6 +1172,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
spin_lock_init(&priv->indir_lock);
mutex_init(&priv->stats_mutex);
+ mutex_init(&priv->cfp.lock);
+
+ /* CFP rule #0 cannot be used for specific classifications, flag it as
+ * permanently used
+ */
+ set_bit(0, priv->cfp.used);
bcm_sf2_identify_ports(priv, dn->child);
@@ -1084,6 +1207,12 @@ static int bcm_sf2_sw_probe(struct platform_device *pdev)
return ret;
}
+ ret = bcm_sf2_cfp_rst(priv);
+ if (ret) {
+ pr_err("failed to reset CFP\n");
+ goto out_mdio;
+ }
+
/* Disable all interrupts and request them */
bcm_sf2_intr_disable(priv);
@@ -1190,11 +1319,6 @@ static int bcm_sf2_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(bcm_sf2_pm_ops,
bcm_sf2_suspend, bcm_sf2_resume);
-static const struct of_device_id bcm_sf2_of_match[] = {
- { .compatible = "brcm,bcm7445-switch-v4.0" },
- { /* sentinel */ },
-};
-MODULE_DEVICE_TABLE(of, bcm_sf2_of_match);
static struct platform_driver bcm_sf2_driver = {
.probe = bcm_sf2_sw_probe,
diff --git a/drivers/net/dsa/bcm_sf2.h b/drivers/net/dsa/bcm_sf2.h
index 4469267..7d3030e 100644
--- a/drivers/net/dsa/bcm_sf2.h
+++ b/drivers/net/dsa/bcm_sf2.h
@@ -52,6 +52,13 @@ struct bcm_sf2_port_status {
struct ethtool_eee eee;
};
+struct bcm_sf2_cfp_priv {
+ /* Mutex protecting concurrent accesses to the CFP registers */
+ struct mutex lock;
+ DECLARE_BITMAP(used, CFP_NUM_RULES);
+ unsigned int rules_cnt;
+};
+
struct bcm_sf2_priv {
/* Base registers, keep those in order with BCM_SF2_REGS_NAME */
void __iomem *core;
@@ -61,6 +68,11 @@ struct bcm_sf2_priv {
void __iomem *fcb;
void __iomem *acb;
+ /* Register offsets indirection tables */
+ u32 type;
+ const u16 *reg_offsets;
+ unsigned int core_reg_align;
+
/* spinlock protecting access to the indirect registers */
spinlock_t indir_lock;
@@ -95,6 +107,12 @@ struct bcm_sf2_priv {
struct device_node *master_mii_dn;
struct mii_bus *slave_mii_bus;
struct mii_bus *master_mii_bus;
+
+ /* Bitmask of ports needing BRCM tags */
+ unsigned int brcm_tag_mask;
+
+ /* CFP rules context */
+ struct bcm_sf2_cfp_priv cfp;
};
static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
@@ -104,6 +122,11 @@ static inline struct bcm_sf2_priv *bcm_sf2_to_priv(struct dsa_switch *ds)
return dev->priv;
}
+static inline u32 bcm_sf2_mangle_addr(struct bcm_sf2_priv *priv, u32 off)
+{
+ return off << priv->core_reg_align;
+}
+
#define SF2_IO_MACRO(name) \
static inline u32 name##_readl(struct bcm_sf2_priv *priv, u32 off) \
{ \
@@ -125,7 +148,7 @@ static inline u64 name##_readq(struct bcm_sf2_priv *priv, u32 off) \
{ \
u32 indir, dir; \
spin_lock(&priv->indir_lock); \
- dir = __raw_readl(priv->name + off); \
+ dir = name##_readl(priv, off); \
indir = reg_readl(priv, REG_DIR_DATA_READ); \
spin_unlock(&priv->indir_lock); \
return (u64)indir << 32 | dir; \
@@ -135,7 +158,7 @@ static inline void name##_writeq(struct bcm_sf2_priv *priv, u64 val, \
{ \
spin_lock(&priv->indir_lock); \
reg_writel(priv, upper_32_bits(val), REG_DIR_DATA_WRITE); \
- __raw_writel(lower_32_bits(val), priv->name + off); \
+ name##_writel(priv, lower_32_bits(val), off); \
spin_unlock(&priv->indir_lock); \
}
@@ -153,8 +176,28 @@ static inline void intrl2_##which##_mask_set(struct bcm_sf2_priv *priv, \
priv->irq##which##_mask |= (mask); \
} \
-SF2_IO_MACRO(core);
-SF2_IO_MACRO(reg);
+static inline u32 core_readl(struct bcm_sf2_priv *priv, u32 off)
+{
+ u32 tmp = bcm_sf2_mangle_addr(priv, off);
+ return __raw_readl(priv->core + tmp);
+}
+
+static inline void core_writel(struct bcm_sf2_priv *priv, u32 val, u32 off)
+{
+ u32 tmp = bcm_sf2_mangle_addr(priv, off);
+ __raw_writel(val, priv->core + tmp);
+}
+
+static inline u32 reg_readl(struct bcm_sf2_priv *priv, u16 off)
+{
+ return __raw_readl(priv->reg + priv->reg_offsets[off]);
+}
+
+static inline void reg_writel(struct bcm_sf2_priv *priv, u32 val, u16 off)
+{
+ __raw_writel(val, priv->reg + priv->reg_offsets[off]);
+}
+
SF2_IO64_MACRO(core);
SF2_IO_MACRO(intrl2_0);
SF2_IO_MACRO(intrl2_1);
@@ -164,4 +207,11 @@ SF2_IO_MACRO(acb);
SWITCH_INTR_L2(0);
SWITCH_INTR_L2(1);
+/* RXNFC */
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc);
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv);
+
#endif /* __BCM_SF2_H */
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
new file mode 100644
index 0000000..c71be3e
--- /dev/null
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -0,0 +1,613 @@
+/*
+ * Broadcom Starfighter 2 DSA switch CFP support
+ *
+ * Copyright (C) 2016, Broadcom
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/list.h>
+#include <net/dsa.h>
+#include <linux/ethtool.h>
+#include <linux/if_ether.h>
+#include <linux/in.h>
+#include <linux/bitmap.h>
+
+#include "bcm_sf2.h"
+#include "bcm_sf2_regs.h"
+
+struct cfp_udf_layout {
+ u8 slices[UDF_NUM_SLICES];
+ u32 mask_value;
+
+};
+
+/* UDF slices layout for a TCPv4/UDPv4 specification */
+static const struct cfp_udf_layout udf_tcpip4_layout = {
+ .slices = {
+ /* End of L2, byte offset 12, src IP[0:15] */
+ CFG_UDF_EOL2 | 6,
+ /* End of L2, byte offset 14, src IP[16:31] */
+ CFG_UDF_EOL2 | 7,
+ /* End of L2, byte offset 16, dst IP[0:15] */
+ CFG_UDF_EOL2 | 8,
+ /* End of L2, byte offset 18, dst IP[16:31] */
+ CFG_UDF_EOL2 | 9,
+ /* End of L3, byte offset 0, src port */
+ CFG_UDF_EOL3 | 0,
+ /* End of L3, byte offset 2, dst port */
+ CFG_UDF_EOL3 | 1,
+ 0, 0, 0
+ },
+ .mask_value = L3_FRAMING_MASK | IPPROTO_MASK | IP_FRAG,
+};
+
+static inline unsigned int bcm_sf2_get_num_udf_slices(const u8 *layout)
+{
+ unsigned int i, count = 0;
+
+ for (i = 0; i < UDF_NUM_SLICES; i++) {
+ if (layout[i] != 0)
+ count++;
+ }
+
+ return count;
+}
+
+static void bcm_sf2_cfp_udf_set(struct bcm_sf2_priv *priv,
+ unsigned int slice_num,
+ const u8 *layout)
+{
+ u32 offset = CORE_UDF_0_A_0_8_PORT_0 + slice_num * UDF_SLICE_OFFSET;
+ unsigned int i;
+
+ for (i = 0; i < UDF_NUM_SLICES; i++)
+ core_writel(priv, layout[i], offset + i * 4);
+}
+
+static int bcm_sf2_cfp_op(struct bcm_sf2_priv *priv, unsigned int op)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+ reg |= OP_STR_DONE | op;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ reg = core_readl(priv, CORE_CFP_ACC);
+ if (!(reg & OP_STR_DONE))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline void bcm_sf2_cfp_rule_addr_set(struct bcm_sf2_priv *priv,
+ unsigned int addr)
+{
+ u32 reg;
+
+ WARN_ON(addr >= CFP_NUM_RULES);
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+ reg |= addr << XCESS_ADDR_SHIFT;
+ core_writel(priv, reg, CORE_CFP_ACC);
+}
+
+static inline unsigned int bcm_sf2_cfp_rule_size(struct bcm_sf2_priv *priv)
+{
+ /* Entry #0 is reserved */
+ return CFP_NUM_RULES - 1;
+}
+
+static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct ethtool_tcpip4_spec *v4_spec;
+ const struct cfp_udf_layout *layout;
+ unsigned int slice_num, rule_index;
+ unsigned int queue_num, port_num;
+ u8 ip_proto, ip_frag;
+ u8 num_udf;
+ u32 reg;
+ int ret;
+
+ /* Check for unsupported extensions */
+ if ((fs->flow_type & FLOW_EXT) &&
+ (fs->m_ext.vlan_etype || fs->m_ext.data[1]))
+ return -EINVAL;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ test_bit(fs->location, priv->cfp.used))
+ return -EBUSY;
+
+ if (fs->location != RX_CLS_LOC_ANY &&
+ fs->location > bcm_sf2_cfp_rule_size(priv))
+ return -EINVAL;
+
+ ip_frag = be32_to_cpu(fs->m_ext.data[0]);
+
+ /* We do not support discarding packets, check that the
+ * destination port is enabled and that we are within the
+ * number of ports supported by the switch
+ */
+ port_num = fs->ring_cookie / 8;
+
+ if (fs->ring_cookie == RX_CLS_FLOW_DISC ||
+ !(BIT(port_num) & ds->enabled_port_mask) ||
+ port_num >= priv->hw_params.num_ports)
+ return -EINVAL;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V4_FLOW:
+ ip_proto = IPPROTO_TCP;
+ v4_spec = &fs->h_u.tcp_ip4_spec;
+ break;
+ case UDP_V4_FLOW:
+ ip_proto = IPPROTO_UDP;
+ v4_spec = &fs->h_u.udp_ip4_spec;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* We only use one UDF slice for now */
+ slice_num = 1;
+ layout = &udf_tcpip4_layout;
+ num_udf = bcm_sf2_get_num_udf_slices(layout->slices);
+
+ /* Apply the UDF layout for this filter */
+ bcm_sf2_cfp_udf_set(priv, slice_num, layout->slices);
+
+ /* Apply to all packets received through this port */
+ core_writel(priv, BIT(port), CORE_CFP_DATA_PORT(7));
+
+ /* S-Tag status [31:30]
+ * C-Tag status [29:28]
+ * L2 framing [27:26]
+ * L3 framing [25:24]
+ * IP ToS [23:16]
+ * IP proto [15:08]
+ * IP Fragm [7]
+ * Non 1st frag [6]
+ * IP Authen [5]
+ * TTL range [4:3]
+ * PPPoE session [2]
+ * Reserved [1]
+ * UDF_Valid[8] [0]
+ */
+ core_writel(priv, v4_spec->tos << 16 | ip_proto << 8 | ip_frag << 7,
+ CORE_CFP_DATA_PORT(6));
+
+ /* UDF_Valid[7:0] [31:24]
+ * S-Tag [23:8]
+ * C-Tag [7:0]
+ */
+ core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_DATA_PORT(5));
+
+ /* C-Tag [31:24]
+ * UDF_n_A8 [23:8]
+ * UDF_n_A7 [7:0]
+ */
+ core_writel(priv, 0, CORE_CFP_DATA_PORT(4));
+
+ /* UDF_n_A7 [31:24]
+ * UDF_n_A6 [23:8]
+ * UDF_n_A5 [7:0]
+ */
+ core_writel(priv, be16_to_cpu(v4_spec->pdst) >> 8,
+ CORE_CFP_DATA_PORT(3));
+
+ /* UDF_n_A5 [31:24]
+ * UDF_n_A4 [23:8]
+ * UDF_n_A3 [7:0]
+ */
+ reg = (be16_to_cpu(v4_spec->pdst) & 0xff) << 24 |
+ (u32)be16_to_cpu(v4_spec->psrc) << 8 |
+ (be32_to_cpu(v4_spec->ip4dst) & 0x0000ff00) >> 8;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(2));
+
+ /* UDF_n_A3 [31:24]
+ * UDF_n_A2 [23:8]
+ * UDF_n_A1 [7:0]
+ */
+ reg = (u32)(be32_to_cpu(v4_spec->ip4dst) & 0xff) << 24 |
+ (u32)(be32_to_cpu(v4_spec->ip4dst) >> 16) << 8 |
+ (be32_to_cpu(v4_spec->ip4src) & 0x0000ff00) >> 8;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(1));
+
+ /* UDF_n_A1 [31:24]
+ * UDF_n_A0 [23:8]
+ * Reserved [7:4]
+ * Slice ID [3:2]
+ * Slice valid [1:0]
+ */
+ reg = (u32)(be32_to_cpu(v4_spec->ip4src) & 0xff) << 24 |
+ (u32)(be32_to_cpu(v4_spec->ip4src) >> 16) << 8 |
+ SLICE_NUM(slice_num) | SLICE_VALID;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+ /* Source port map match */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(7));
+
+ /* Mask with the specific layout for IPv4 packets */
+ core_writel(priv, layout->mask_value, CORE_CFP_MASK_PORT(6));
+
+ /* Mask all but valid UDFs */
+ core_writel(priv, GENMASK(num_udf - 1, 0) << 24, CORE_CFP_MASK_PORT(5));
+
+ /* Mask all */
+ core_writel(priv, 0, CORE_CFP_MASK_PORT(4));
+
+ /* All other UDFs should be matched with the filter */
+ core_writel(priv, 0xff, CORE_CFP_MASK_PORT(3));
+ core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(2));
+ core_writel(priv, 0xffffffff, CORE_CFP_MASK_PORT(1));
+ core_writel(priv, 0xffffff0f, CORE_CFP_MASK_PORT(0));
+
+ /* Locate the first rule available */
+ if (fs->location == RX_CLS_LOC_ANY)
+ rule_index = find_first_zero_bit(priv->cfp.used,
+ bcm_sf2_cfp_rule_size(priv));
+ else
+ rule_index = fs->location;
+
+ /* Insert into TCAM now */
+ bcm_sf2_cfp_rule_addr_set(priv, rule_index);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret) {
+ pr_err("TCAM entry at addr %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Replace ARL derived destination with DST_MAP derived, define
+ * which port and queue this should be forwarded to.
+ *
+ * We have a small oddity where Port 6 just does not have a
+ * valid bit here (so we subtract by one).
+ */
+ queue_num = fs->ring_cookie % 8;
+ if (port_num >= 7)
+ port_num -= 1;
+
+ reg = CHANGE_FWRD_MAP_IB_REP_ARL | BIT(port_num + DST_MAP_IB_SHIFT) |
+ CHANGE_TC | queue_num << NEW_TC_SHIFT;
+
+ core_writel(priv, reg, CORE_ACT_POL_DATA0);
+
+ /* Set classification ID that needs to be put in Broadcom tag */
+ core_writel(priv, rule_index << CHAIN_ID_SHIFT,
+ CORE_ACT_POL_DATA1);
+
+ core_writel(priv, 0, CORE_ACT_POL_DATA2);
+
+ /* Configure policer RAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | ACT_POL_RAM);
+ if (ret) {
+ pr_err("Policer entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Disable the policer */
+ core_writel(priv, POLICER_MODE_DISABLE, CORE_RATE_METER0);
+
+ /* Now the rate meter */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | RATE_METER_RAM);
+ if (ret) {
+ pr_err("Meter entry at %d failed\n", rule_index);
+ return ret;
+ }
+
+ /* Turn on CFP for this rule now */
+ reg = core_readl(priv, CORE_CFP_CTL_REG);
+ reg |= BIT(port);
+ core_writel(priv, reg, CORE_CFP_CTL_REG);
+
+ /* Flag the rule as being used and return it */
+ set_bit(rule_index, priv->cfp.used);
+ fs->location = rule_index;
+
+ return 0;
+}
+
+static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
+ u32 loc)
+{
+ int ret;
+ u32 reg;
+
+ /* Refuse deletion of unused rules, and the default reserved rule */
+ if (!test_bit(loc, priv->cfp.used) || loc == 0)
+ return -EINVAL;
+
+ /* Indicate which rule we want to read */
+ bcm_sf2_cfp_rule_addr_set(priv, loc);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ /* Clear its valid bits */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+ reg &= ~SLICE_VALID;
+ core_writel(priv, reg, CORE_CFP_DATA_PORT(0));
+
+ /* Write back this entry into the TCAM now */
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_WRITE | TCAM_SEL);
+ if (ret)
+ return ret;
+
+ clear_bit(loc, priv->cfp.used);
+
+ return 0;
+}
+
+static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
+{
+ unsigned int i;
+
+ for (i = 0; i < sizeof(flow->m_u); i++)
+ flow->m_u.hdata[i] ^= 0xff;
+
+ flow->m_ext.vlan_etype ^= cpu_to_be16(~0);
+ flow->m_ext.vlan_tci ^= cpu_to_be16(~0);
+ flow->m_ext.data[0] ^= cpu_to_be32(~0);
+ flow->m_ext.data[1] ^= cpu_to_be32(~0);
+}
+
+static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rxnfc *nfc, bool search)
+{
+ struct ethtool_tcpip4_spec *v4_spec;
+ unsigned int queue_num;
+ u16 src_dst_port;
+ u32 reg, ipv4;
+ int ret;
+
+ if (!search) {
+ bcm_sf2_cfp_rule_addr_set(priv, nfc->fs.location);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | ACT_POL_RAM);
+ if (ret)
+ return ret;
+
+ reg = core_readl(priv, CORE_ACT_POL_DATA0);
+
+ ret = bcm_sf2_cfp_op(priv, OP_SEL_READ | TCAM_SEL);
+ if (ret)
+ return ret;
+ } else {
+ reg = core_readl(priv, CORE_ACT_POL_DATA0);
+ }
+
+ /* Extract the destination port */
+ nfc->fs.ring_cookie = fls((reg >> DST_MAP_IB_SHIFT) &
+ DST_MAP_IB_MASK) - 1;
+
+ /* There is no Port 6, so we compensate for that here */
+ if (nfc->fs.ring_cookie >= 6)
+ nfc->fs.ring_cookie++;
+ nfc->fs.ring_cookie *= 8;
+
+ /* Extract the destination queue */
+ queue_num = (reg >> NEW_TC_SHIFT) & NEW_TC_MASK;
+ nfc->fs.ring_cookie += queue_num;
+
+ /* Extract the IP protocol */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(6));
+ switch ((reg & IPPROTO_MASK) >> IPPROTO_SHIFT) {
+ case IPPROTO_TCP:
+ nfc->fs.flow_type = TCP_V4_FLOW;
+ v4_spec = &nfc->fs.h_u.tcp_ip4_spec;
+ break;
+ case IPPROTO_UDP:
+ nfc->fs.flow_type = UDP_V4_FLOW;
+ v4_spec = &nfc->fs.h_u.udp_ip4_spec;
+ break;
+ default:
+ /* Clear to exit the search process */
+ if (search)
+ core_readl(priv, CORE_CFP_DATA_PORT(7));
+ return -EINVAL;
+ }
+
+ v4_spec->tos = (reg >> 16) & IPPROTO_MASK;
+ nfc->fs.m_ext.data[0] = cpu_to_be32((reg >> 7) & 1);
+
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(3));
+ /* src port [15:8] */
+ src_dst_port = reg << 8;
+
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(2));
+ /* src port [7:0] */
+ src_dst_port |= (reg >> 24);
+
+ v4_spec->pdst = cpu_to_be16(src_dst_port);
+ nfc->fs.m_u.tcp_ip4_spec.pdst = cpu_to_be16(~0);
+ v4_spec->psrc = cpu_to_be16((u16)(reg >> 8));
+ nfc->fs.m_u.tcp_ip4_spec.psrc = cpu_to_be16(~0);
+
+ /* IPv4 dst [15:8] */
+ ipv4 = (u16)(reg & 0xff) << 8;
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(1));
+ /* IPv4 dst [31:16] */
+ ipv4 |= (u32)((reg >> 8) & 0xffffff) << 16;
+ /* IPv4 dst [7:0] */
+ ipv4 |= (reg >> 24) & 0xff;
+ v4_spec->ip4dst = cpu_to_be32(ipv4);
+ nfc->fs.m_u.tcp_ip4_spec.ip4dst = cpu_to_be32(~0);
+
+ /* IPv4 src [15:8] */
+ ipv4 = (u16)(reg & 0xff) << 8;
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(0));
+
+ if (!(reg & SLICE_VALID))
+ return -EINVAL;
+
+ /* IPv4 src [7:0] */
+ ipv4 |= (reg >> 24) & 0xff;
+ /* IPv4 src [31:16] */
+ ipv4 |= ((reg >> 8) & 0xffffff) << 16;
+ v4_spec->ip4src = cpu_to_be32(ipv4);
+ nfc->fs.m_u.tcp_ip4_spec.ip4src = cpu_to_be32(~0);
+
+ /* Read last to avoid next entry clobbering the results during search
+ * operations
+ */
+ reg = core_readl(priv, CORE_CFP_DATA_PORT(7));
+ if (!(reg & 1 << port))
+ return -EINVAL;
+
+ bcm_sf2_invert_masks(&nfc->fs);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+
+ return 0;
+}
+
+/* We implement the search doing a TCAM search operation */
+static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
+ int port, struct ethtool_rxnfc *nfc,
+ u32 *rule_locs)
+{
+ unsigned int index = 1, rules_cnt = 0;
+ int ret;
+ u32 reg;
+
+ /* Do not poll on OP_STR_DONE to be self-clearing for search
+ * operations, we cannot use bcm_sf2_cfp_op here because it completes
+ * on clearing OP_STR_DONE which won't clear until the entire search
+ * operation is over.
+ */
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg &= ~(XCESS_ADDR_MASK << XCESS_ADDR_SHIFT);
+ reg |= index << XCESS_ADDR_SHIFT;
+ reg &= ~(OP_SEL_MASK | RAM_SEL_MASK);
+ reg |= OP_SEL_SEARCH | TCAM_SEL | OP_STR_DONE;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ /* Wait for results to be ready */
+ reg = core_readl(priv, CORE_CFP_ACC);
+
+ /* Extract the address we are searching */
+ index = reg >> XCESS_ADDR_SHIFT;
+ index &= XCESS_ADDR_MASK;
+
+ /* We have a valid search result, so flag it accordingly */
+ if (reg & SEARCH_STS) {
+ ret = bcm_sf2_cfp_rule_get(priv, port, nfc, true);
+ if (ret)
+ continue;
+
+ rule_locs[rules_cnt] = index;
+ rules_cnt++;
+ }
+
+ /* Search is over break out */
+ if (!(reg & OP_STR_DONE))
+ break;
+
+ } while (index < CFP_NUM_RULES);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+ nfc->rule_cnt = rules_cnt;
+
+ return 0;
+}
+
+int bcm_sf2_get_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+ mutex_lock(&priv->cfp.lock);
+
+ switch (nfc->cmd) {
+ case ETHTOOL_GRXCLSRLCNT:
+ /* Subtract the default, unusable rule */
+ nfc->rule_cnt = bitmap_weight(priv->cfp.used,
+ CFP_NUM_RULES) - 1;
+ /* We support specifying rule locations */
+ nfc->data |= RX_CLS_LOC_SPECIAL;
+ break;
+ case ETHTOOL_GRXCLSRULE:
+ ret = bcm_sf2_cfp_rule_get(priv, port, nfc, false);
+ break;
+ case ETHTOOL_GRXCLSRLALL:
+ ret = bcm_sf2_cfp_rule_get_all(priv, port, nfc, rule_locs);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->cfp.lock);
+
+ return ret;
+}
+
+int bcm_sf2_set_rxnfc(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ int ret = 0;
+
+ mutex_lock(&priv->cfp.lock);
+
+ switch (nfc->cmd) {
+ case ETHTOOL_SRXCLSRLINS:
+ ret = bcm_sf2_cfp_rule_set(ds, port, &nfc->fs);
+ break;
+
+ case ETHTOOL_SRXCLSRLDEL:
+ ret = bcm_sf2_cfp_rule_del(priv, port, nfc->fs.location);
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ mutex_unlock(&priv->cfp.lock);
+
+ return ret;
+}
+
+int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
+{
+ unsigned int timeout = 1000;
+ u32 reg;
+
+ reg = core_readl(priv, CORE_CFP_ACC);
+ reg |= TCAM_RESET;
+ core_writel(priv, reg, CORE_CFP_ACC);
+
+ do {
+ reg = core_readl(priv, CORE_CFP_ACC);
+ if (!(reg & TCAM_RESET))
+ break;
+
+ cpu_relax();
+ } while (timeout--);
+
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ return 0;
+}
diff --git a/drivers/net/dsa/bcm_sf2_regs.h b/drivers/net/dsa/bcm_sf2_regs.h
index 838fe37..2605245 100644
--- a/drivers/net/dsa/bcm_sf2_regs.h
+++ b/drivers/net/dsa/bcm_sf2_regs.h
@@ -12,22 +12,36 @@
#define __BCM_SF2_REGS_H
/* Register set relative to 'REG' */
-#define REG_SWITCH_CNTRL 0x00
-#define MDIO_MASTER_SEL (1 << 0)
-#define REG_SWITCH_STATUS 0x04
-#define REG_DIR_DATA_WRITE 0x08
-#define REG_DIR_DATA_READ 0x0C
+enum bcm_sf2_reg_offs {
+ REG_SWITCH_CNTRL = 0,
+ REG_SWITCH_STATUS,
+ REG_DIR_DATA_WRITE,
+ REG_DIR_DATA_READ,
+ REG_SWITCH_REVISION,
+ REG_PHY_REVISION,
+ REG_SPHY_CNTRL,
+ REG_RGMII_0_CNTRL,
+ REG_RGMII_1_CNTRL,
+ REG_RGMII_2_CNTRL,
+ REG_LED_0_CNTRL,
+ REG_LED_1_CNTRL,
+ REG_LED_2_CNTRL,
+ REG_SWITCH_REG_MAX,
+};
+
+/* Relative to REG_SWITCH_CNTRL */
+#define MDIO_MASTER_SEL (1 << 0)
-#define REG_SWITCH_REVISION 0x18
+/* Relative to REG_SWITCH_REVISION */
#define SF2_REV_MASK 0xffff
#define SWITCH_TOP_REV_SHIFT 16
#define SWITCH_TOP_REV_MASK 0xffff
-#define REG_PHY_REVISION 0x1C
+/* Relative to REG_PHY_REVISION */
#define PHY_REVISION_MASK 0xffff
-#define REG_SPHY_CNTRL 0x2C
+/* Relative to REG_SPHY_CNTRL */
#define IDDQ_BIAS (1 << 0)
#define EXT_PWR_DOWN (1 << 1)
#define FORCE_DLL_EN (1 << 2)
@@ -37,13 +51,8 @@
#define PHY_PHYAD_SHIFT 8
#define PHY_PHYAD_MASK 0x1F
-#define REG_RGMII_0_BASE 0x34
-#define REG_RGMII_CNTRL 0x00
-#define REG_RGMII_IB_STATUS 0x04
-#define REG_RGMII_RX_CLOCK_DELAY_CNTRL 0x08
-#define REG_RGMII_CNTRL_SIZE 0x0C
-#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_BASE + \
- ((x) * REG_RGMII_CNTRL_SIZE))
+#define REG_RGMII_CNTRL_P(x) (REG_RGMII_0_CNTRL + (x))
+
/* Relative to REG_RGMII_CNTRL */
#define RGMII_MODE_EN (1 << 0)
#define ID_MODE_DIS (1 << 1)
@@ -61,8 +70,8 @@
#define LPI_COUNT_SHIFT 9
#define LPI_COUNT_MASK 0x3F
-#define REG_LED_CNTRL_BASE 0x90
-#define REG_LED_CNTRL(x) (REG_LED_CNTRL_BASE + (x) * 4)
+#define REG_LED_CNTRL(x) (REG_LED_0_CNTRL + (x))
+
#define SPDLNK_SRC_SEL (1 << 24)
/* Register set relative to 'INTRL2_0' and 'INTRL2_1' */
@@ -125,6 +134,9 @@
#define GMII_SPEED_UP_2G (1 << 6)
#define MII_SW_OR (1 << 7)
+/* Alternate layout for e.g: 7278 */
+#define CORE_STS_OVERRIDE_IMP2 0x39040
+
#define CORE_NEW_CTRL 0x00084
#define IP_MC (1 << 0)
#define OUTRANGEERR_DISCARD (1 << 1)
@@ -142,6 +154,7 @@
#define SW_LEARN_CNTL(x) (1 << (x))
#define CORE_STS_OVERRIDE_GMIIP_PORT(x) (0x160 + (x) * 4)
+#define CORE_STS_OVERRIDE_GMIIP2_PORT(x) (0x39000 + (x) * 8)
#define LINK_STS (1 << 0)
#define DUPLX_MODE (1 << 1)
#define SPEED_SHIFT 2
@@ -225,6 +238,10 @@
#define P_TXQ_PSM_VDD(x) (P_TXQ_PSM_VDD_MASK << \
((x) * P_TXQ_PSM_VDD_SHIFT))
+#define CORE_PORT_TC2_QOS_MAP_PORT(x) (0xc1c0 + ((x) * 0x10))
+#define PRT_TO_QID_MASK 0x3
+#define PRT_TO_QID_SHIFT 3
+
#define CORE_PORT_VLAN_CTL_PORT(x) (0xc400 + ((x) * 0x8))
#define PORT_VLAN_CTRL_MASK 0x1ff
@@ -238,4 +255,150 @@
#define CORE_EEE_EN_CTRL 0x24800
#define CORE_EEE_LPI_INDICATE 0x24810
+#define CORE_CFP_ACC 0x28000
+#define OP_STR_DONE (1 << 0)
+#define OP_SEL_SHIFT 1
+#define OP_SEL_READ (1 << OP_SEL_SHIFT)
+#define OP_SEL_WRITE (2 << OP_SEL_SHIFT)
+#define OP_SEL_SEARCH (4 << OP_SEL_SHIFT)
+#define OP_SEL_MASK (7 << OP_SEL_SHIFT)
+#define CFP_RAM_CLEAR (1 << 4)
+#define RAM_SEL_SHIFT 10
+#define TCAM_SEL (1 << RAM_SEL_SHIFT)
+#define ACT_POL_RAM (2 << RAM_SEL_SHIFT)
+#define RATE_METER_RAM (4 << RAM_SEL_SHIFT)
+#define GREEN_STAT_RAM (8 << RAM_SEL_SHIFT)
+#define YELLOW_STAT_RAM (16 << RAM_SEL_SHIFT)
+#define RED_STAT_RAM (24 << RAM_SEL_SHIFT)
+#define RAM_SEL_MASK (0x1f << RAM_SEL_SHIFT)
+#define TCAM_RESET (1 << 15)
+#define XCESS_ADDR_SHIFT 16
+#define XCESS_ADDR_MASK 0xff
+#define SEARCH_STS (1 << 27)
+#define RD_STS_SHIFT 28
+#define RD_STS_TCAM (1 << RD_STS_SHIFT)
+#define RD_STS_ACT_POL_RAM (2 << RD_STS_SHIFT)
+#define RD_STS_RATE_METER_RAM (4 << RD_STS_SHIFT)
+#define RD_STS_STAT_RAM (8 << RD_STS_SHIFT)
+
+#define CORE_CFP_RATE_METER_GLOBAL_CTL 0x28010
+
+#define CORE_CFP_DATA_PORT_0 0x28040
+#define CORE_CFP_DATA_PORT(x) (CORE_CFP_DATA_PORT_0 + \
+ (x) * 0x10)
+
+/* UDF_DATA7 */
+#define L3_FRAMING_SHIFT 24
+#define L3_FRAMING_MASK (0x3 << L3_FRAMING_SHIFT)
+#define IPPROTO_SHIFT 8
+#define IPPROTO_MASK (0xff << IPPROTO_SHIFT)
+#define IP_FRAG (1 << 7)
+
+/* UDF_DATA0 */
+#define SLICE_VALID 3
+#define SLICE_NUM_SHIFT 2
+#define SLICE_NUM(x) ((x) << SLICE_NUM_SHIFT)
+
+#define CORE_CFP_MASK_PORT_0 0x280c0
+
+#define CORE_CFP_MASK_PORT(x) (CORE_CFP_MASK_PORT_0 + \
+ (x) * 0x10)
+
+#define CORE_ACT_POL_DATA0 0x28140
+#define VLAN_BYP (1 << 0)
+#define EAP_BYP (1 << 1)
+#define STP_BYP (1 << 2)
+#define REASON_CODE_SHIFT 3
+#define REASON_CODE_MASK 0x3f
+#define LOOP_BK_EN (1 << 9)
+#define NEW_TC_SHIFT 10
+#define NEW_TC_MASK 0x7
+#define CHANGE_TC (1 << 13)
+#define DST_MAP_IB_SHIFT 14
+#define DST_MAP_IB_MASK 0x1ff
+#define CHANGE_FWRD_MAP_IB_SHIFT 24
+#define CHANGE_FWRD_MAP_IB_MASK 0x3
+#define CHANGE_FWRD_MAP_IB_NO_DEST (0 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_REM_ARL (1 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_REP_ARL (2 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define CHANGE_FWRD_MAP_IB_ADD_DST (3 << CHANGE_FWRD_MAP_IB_SHIFT)
+#define NEW_DSCP_IB_SHIFT 26
+#define NEW_DSCP_IB_MASK 0x3f
+
+#define CORE_ACT_POL_DATA1 0x28150
+#define CHANGE_DSCP_IB (1 << 0)
+#define DST_MAP_OB_SHIFT 1
+#define DST_MAP_OB_MASK 0x3ff
+#define CHANGE_FWRD_MAP_OB_SHIT 11
+#define CHANGE_FWRD_MAP_OB_MASK 0x3
+#define NEW_DSCP_OB_SHIFT 13
+#define NEW_DSCP_OB_MASK 0x3f
+#define CHANGE_DSCP_OB (1 << 19)
+#define CHAIN_ID_SHIFT 20
+#define CHAIN_ID_MASK 0xff
+#define CHANGE_COLOR (1 << 28)
+#define NEW_COLOR_SHIFT 29
+#define NEW_COLOR_MASK 0x3
+#define NEW_COLOR_GREEN (0 << NEW_COLOR_SHIFT)
+#define NEW_COLOR_YELLOW (1 << NEW_COLOR_SHIFT)
+#define NEW_COLOR_RED (2 << NEW_COLOR_SHIFT)
+#define RED_DEFAULT (1 << 31)
+
+#define CORE_ACT_POL_DATA2 0x28160
+#define MAC_LIMIT_BYPASS (1 << 0)
+#define CHANGE_TC_O (1 << 1)
+#define NEW_TC_O_SHIFT 2
+#define NEW_TC_O_MASK 0x7
+#define SPCP_RMK_DISABLE (1 << 5)
+#define CPCP_RMK_DISABLE (1 << 6)
+#define DEI_RMK_DISABLE (1 << 7)
+
+#define CORE_RATE_METER0 0x28180
+#define COLOR_MODE (1 << 0)
+#define POLICER_ACTION (1 << 1)
+#define COUPLING_FLAG (1 << 2)
+#define POLICER_MODE_SHIFT 3
+#define POLICER_MODE_MASK 0x3
+#define POLICER_MODE_RFC2698 (0 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_RFC4115 (1 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_MEF (2 << POLICER_MODE_SHIFT)
+#define POLICER_MODE_DISABLE (3 << POLICER_MODE_SHIFT)
+
+#define CORE_RATE_METER1 0x28190
+#define EIR_TK_BKT_MASK 0x7fffff
+
+#define CORE_RATE_METER2 0x281a0
+#define EIR_BKT_SIZE_MASK 0xfffff
+
+#define CORE_RATE_METER3 0x281b0
+#define EIR_REF_CNT_MASK 0x7ffff
+
+#define CORE_RATE_METER4 0x281c0
+#define CIR_TK_BKT_MASK 0x7fffff
+
+#define CORE_RATE_METER5 0x281d0
+#define CIR_BKT_SIZE_MASK 0xfffff
+
+#define CORE_RATE_METER6 0x281e0
+#define CIR_REF_CNT_MASK 0x7ffff
+
+#define CORE_CFP_CTL_REG 0x28400
+#define CFP_EN_MAP_MASK 0x1ff
+
+/* IPv4 slices, 3 of them */
+#define CORE_UDF_0_A_0_8_PORT_0 0x28440
+#define CFG_UDF_OFFSET_MASK 0x1f
+#define CFG_UDF_OFFSET_BASE_SHIFT 5
+#define CFG_UDF_SOF (0 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define CFG_UDF_EOL2 (2 << CFG_UDF_OFFSET_BASE_SHIFT)
+#define CFG_UDF_EOL3 (3 << CFG_UDF_OFFSET_BASE_SHIFT)
+
+/* Number of slices for IPv4, IPv6 and non-IP */
+#define UDF_NUM_SLICES 9
+
+/* Spacing between different slices */
+#define UDF_SLICE_OFFSET 0x40
+
+#define CFP_NUM_RULES 256
+
#endif /* __BCM_SF2_REGS_H */
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index eea8e01..bf38537 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -222,26 +222,62 @@ int mv88e6xxx_write(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val)
return 0;
}
+static int mv88e6165_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
+{
+ return mv88e6xxx_read(chip, addr, reg, val);
+}
+
+static int mv88e6165_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val)
+{
+ return mv88e6xxx_write(chip, addr, reg, val);
+}
+
+static struct mii_bus *mv88e6xxx_default_mdio_bus(struct mv88e6xxx_chip *chip)
+{
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+
+ mdio_bus = list_first_entry(&chip->mdios, struct mv88e6xxx_mdio_bus,
+ list);
+ if (!mdio_bus)
+ return NULL;
+
+ return mdio_bus->bus;
+}
+
static int mv88e6xxx_phy_read(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 *val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
if (!chip->info->ops->phy_read)
return -EOPNOTSUPP;
- return chip->info->ops->phy_read(chip, addr, reg, val);
+ return chip->info->ops->phy_read(chip, bus, addr, reg, val);
}
static int mv88e6xxx_phy_write(struct mv88e6xxx_chip *chip, int phy,
int reg, u16 val)
{
int addr = phy; /* PHY devices addresses start at 0x0 */
+ struct mii_bus *bus;
+
+ bus = mv88e6xxx_default_mdio_bus(chip);
+ if (!bus)
+ return -EOPNOTSUPP;
if (!chip->info->ops->phy_write)
return -EOPNOTSUPP;
- return chip->info->ops->phy_write(chip, addr, reg, val);
+ return chip->info->ops->phy_write(chip, bus, addr, reg, val);
}
static int mv88e6xxx_phy_page_get(struct mv88e6xxx_chip *chip, int phy, u8 page)
@@ -611,8 +647,9 @@ static void mv88e6xxx_ppu_state_destroy(struct mv88e6xxx_chip *chip)
del_timer_sync(&chip->ppu_timer);
}
-static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 *val)
+static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
int err;
@@ -625,8 +662,9 @@ static int mv88e6xxx_phy_ppu_read(struct mv88e6xxx_chip *chip, int addr,
return err;
}
-static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip, int addr,
- int reg, u16 val)
+static int mv88e6xxx_phy_ppu_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val)
{
int err;
@@ -664,6 +702,11 @@ static bool mv88e6xxx_6320_family(struct mv88e6xxx_chip *chip)
return chip->info->family == MV88E6XXX_FAMILY_6320;
}
+static bool mv88e6xxx_6341_family(struct mv88e6xxx_chip *chip)
+{
+ return chip->info->family == MV88E6XXX_FAMILY_6341;
+}
+
static bool mv88e6xxx_6351_family(struct mv88e6xxx_chip *chip)
{
return chip->info->family == MV88E6XXX_FAMILY_6351;
@@ -1209,8 +1252,8 @@ static int _mv88e6xxx_atu_remove(struct mv88e6xxx_chip *chip, u16 fid,
static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
{
- struct net_device *bridge = chip->ports[port].bridge_dev;
struct dsa_switch *ds = chip->ds;
+ struct net_device *bridge = ds->ports[port].bridge_dev;
u16 output_ports = 0;
int i;
@@ -1220,7 +1263,7 @@ static int _mv88e6xxx_port_based_vlan_map(struct mv88e6xxx_chip *chip, int port)
} else {
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
/* allow sending frames to every group member */
- if (bridge && chip->ports[i].bridge_dev == bridge)
+ if (bridge && ds->ports[i].bridge_dev == bridge)
output_ports |= BIT(i);
/* allow sending frames to CPU port and DSA link(s) */
@@ -1688,7 +1731,8 @@ static int _mv88e6xxx_vtu_new(struct mv88e6xxx_chip *chip, u16 vid,
: GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
if (mv88e6xxx_6097_family(chip) || mv88e6xxx_6165_family(chip) ||
- mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip)) {
+ mv88e6xxx_6351_family(chip) || mv88e6xxx_6352_family(chip) ||
+ mv88e6xxx_6341_family(chip)) {
struct mv88e6xxx_vtu_entry vstp;
/* Adding a VTU entry requires a valid STU entry. As VSTP is not
@@ -1782,17 +1826,17 @@ static int mv88e6xxx_port_check_hw_vlan(struct dsa_switch *ds, int port,
GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
continue;
- if (chip->ports[i].bridge_dev ==
- chip->ports[port].bridge_dev)
+ if (ds->ports[i].bridge_dev ==
+ ds->ports[port].bridge_dev)
break; /* same bridge, check next VLAN */
- if (!chip->ports[i].bridge_dev)
+ if (!ds->ports[i].bridge_dev)
continue;
netdev_warn(ds->ports[port].netdev,
"hardware VLAN %d already used by %s\n",
vlan.vid,
- netdev_name(chip->ports[i].bridge_dev));
+ netdev_name(ds->ports[i].bridge_dev));
err = -EOPNOTSUPP;
goto unlock;
}
@@ -2282,18 +2326,16 @@ static int mv88e6xxx_port_fdb_dump(struct dsa_switch *ds, int port,
}
static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+ struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
int i, err = 0;
mutex_lock(&chip->reg_lock);
- /* Assign the bridge and remap each port's VLANTable */
- chip->ports[port].bridge_dev = bridge;
-
+ /* Remap each port's VLANTable */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i) {
- if (chip->ports[i].bridge_dev == bridge) {
+ if (ds->ports[i].bridge_dev == br) {
err = _mv88e6xxx_port_based_vlan_map(chip, i);
if (err)
break;
@@ -2305,19 +2347,17 @@ static int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port,
return err;
}
-static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port)
+static void mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port,
+ struct net_device *br)
{
struct mv88e6xxx_chip *chip = ds->priv;
- struct net_device *bridge = chip->ports[port].bridge_dev;
int i;
mutex_lock(&chip->reg_lock);
- /* Unassign the bridge and remap each port's VLANTable */
- chip->ports[port].bridge_dev = NULL;
-
+ /* Remap each port's VLANTable */
for (i = 0; i < mv88e6xxx_num_ports(chip); ++i)
- if (i == port || chip->ports[i].bridge_dev == bridge)
+ if (i == port || ds->ports[i].bridge_dev == br)
if (_mv88e6xxx_port_based_vlan_map(chip, i))
netdev_warn(ds->ports[i].netdev,
"failed to remap\n");
@@ -2543,7 +2583,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
mv88e6xxx_6095_family(chip) || mv88e6xxx_6320_family(chip) ||
- mv88e6xxx_6185_family(chip))
+ mv88e6xxx_6185_family(chip) || mv88e6xxx_6341_family(chip))
reg = PORT_CONTROL_2_MAP_DA;
if (mv88e6xxx_6095_family(chip) || mv88e6xxx_6185_family(chip)) {
@@ -2597,7 +2637,7 @@ static int mv88e6xxx_setup_port(struct mv88e6xxx_chip *chip, int port)
if (mv88e6xxx_6352_family(chip) || mv88e6xxx_6351_family(chip) ||
mv88e6xxx_6165_family(chip) || mv88e6xxx_6097_family(chip) ||
- mv88e6xxx_6320_family(chip)) {
+ mv88e6xxx_6320_family(chip) || mv88e6xxx_6341_family(chip)) {
/* Port ATU control: disable limiting the number of
* address database entries that this port is allowed
* to use.
@@ -2821,7 +2861,7 @@ static int mv88e6xxx_setup(struct dsa_switch *ds)
int i;
chip->ds = ds;
- ds->slave_mii_bus = chip->mdio_bus;
+ ds->slave_mii_bus = mv88e6xxx_default_mdio_bus(chip);
mutex_lock(&chip->reg_lock);
@@ -2878,15 +2918,16 @@ static int mv88e6xxx_set_addr(struct dsa_switch *ds, u8 *addr)
static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
{
- struct mv88e6xxx_chip *chip = bus->priv;
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
u16 val;
int err;
- if (phy >= mv88e6xxx_num_ports(chip))
- return 0xffff;
+ if (!chip->info->ops->phy_read)
+ return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_phy_read(chip, phy, reg, &val);
+ err = chip->info->ops->phy_read(chip, bus, phy, reg, &val);
mutex_unlock(&chip->reg_lock);
return err ? err : val;
@@ -2894,34 +2935,39 @@ static int mv88e6xxx_mdio_read(struct mii_bus *bus, int phy, int reg)
static int mv88e6xxx_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
{
- struct mv88e6xxx_chip *chip = bus->priv;
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
+ struct mv88e6xxx_chip *chip = mdio_bus->chip;
int err;
- if (phy >= mv88e6xxx_num_ports(chip))
- return 0xffff;
+ if (!chip->info->ops->phy_write)
+ return -EOPNOTSUPP;
mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_phy_write(chip, phy, reg, val);
+ err = chip->info->ops->phy_write(chip, bus, phy, reg, val);
mutex_unlock(&chip->reg_lock);
return err;
}
static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
- struct device_node *np)
+ struct device_node *np,
+ bool external)
{
static int index;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
struct mii_bus *bus;
int err;
- if (np)
- chip->mdio_np = of_get_child_by_name(np, "mdio");
-
- bus = devm_mdiobus_alloc(chip->dev);
+ bus = devm_mdiobus_alloc_size(chip->dev, sizeof(*mdio_bus));
if (!bus)
return -ENOMEM;
- bus->priv = (void *)chip;
+ mdio_bus = bus->priv;
+ mdio_bus->bus = bus;
+ mdio_bus->chip = chip;
+ INIT_LIST_HEAD(&mdio_bus->list);
+ mdio_bus->external = external;
+
if (np) {
bus->name = np->full_name;
snprintf(bus->id, MII_BUS_ID_SIZE, "%s", np->full_name);
@@ -2934,183 +2980,73 @@ static int mv88e6xxx_mdio_register(struct mv88e6xxx_chip *chip,
bus->write = mv88e6xxx_mdio_write;
bus->parent = chip->dev;
- if (chip->mdio_np)
- err = of_mdiobus_register(bus, chip->mdio_np);
+ if (np)
+ err = of_mdiobus_register(bus, np);
else
err = mdiobus_register(bus);
if (err) {
dev_err(chip->dev, "Cannot register MDIO bus (%d)\n", err);
- goto out;
+ return err;
}
- chip->mdio_bus = bus;
-
- return 0;
-
-out:
- if (chip->mdio_np)
- of_node_put(chip->mdio_np);
-
- return err;
-}
-
-static void mv88e6xxx_mdio_unregister(struct mv88e6xxx_chip *chip)
-
-{
- struct mii_bus *bus = chip->mdio_bus;
-
- mdiobus_unregister(bus);
-
- if (chip->mdio_np)
- of_node_put(chip->mdio_np);
-}
-#ifdef CONFIG_NET_DSA_HWMON
-
-static int mv88e61xx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- u16 val;
- int ret;
-
- *temp = 0;
-
- mutex_lock(&chip->reg_lock);
-
- ret = mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x6);
- if (ret < 0)
- goto error;
-
- /* Enable temperature sensor */
- ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
- if (ret < 0)
- goto error;
-
- ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val | (1 << 5));
- if (ret < 0)
- goto error;
-
- /* Wait for temperature to stabilize */
- usleep_range(10000, 12000);
-
- ret = mv88e6xxx_phy_read(chip, 0x0, 0x1a, &val);
- if (ret < 0)
- goto error;
-
- /* Disable temperature sensor */
- ret = mv88e6xxx_phy_write(chip, 0x0, 0x1a, val & ~(1 << 5));
- if (ret < 0)
- goto error;
-
- *temp = ((val & 0x1f) - 5) * 5;
-
-error:
- mv88e6xxx_phy_write(chip, 0x0, 0x16, 0x0);
- mutex_unlock(&chip->reg_lock);
- return ret;
-}
-
-static int mv88e63xx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int ret;
-
- *temp = 0;
-
- mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_phy_page_read(chip, phy, 6, 27, &val);
- mutex_unlock(&chip->reg_lock);
- if (ret < 0)
- return ret;
-
- *temp = (val & 0xff) - 25;
+ if (external)
+ list_add_tail(&mdio_bus->list, &chip->mdios);
+ else
+ list_add(&mdio_bus->list, &chip->mdios);
return 0;
}
-static int mv88e6xxx_get_temp(struct dsa_switch *ds, int *temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP))
- return -EOPNOTSUPP;
-
- if (mv88e6xxx_6320_family(chip) || mv88e6xxx_6352_family(chip))
- return mv88e63xx_get_temp(ds, temp);
-
- return mv88e61xx_get_temp(ds, temp);
-}
+static const struct of_device_id mv88e6xxx_mdio_external_match[] = {
+ { .compatible = "marvell,mv88e6xxx-mdio-external",
+ .data = (void *)true },
+ { },
+};
-static int mv88e6xxx_get_temp_limit(struct dsa_switch *ds, int *temp)
+static int mv88e6xxx_mdios_register(struct mv88e6xxx_chip *chip,
+ struct device_node *np)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int ret;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
- return -EOPNOTSUPP;
-
- *temp = 0;
+ const struct of_device_id *match;
+ struct device_node *child;
+ int err;
- mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
- mutex_unlock(&chip->reg_lock);
- if (ret < 0)
- return ret;
+ /* Always register one mdio bus for the internal/default mdio
+ * bus. This maybe represented in the device tree, but is
+ * optional.
+ */
+ child = of_get_child_by_name(np, "mdio");
+ err = mv88e6xxx_mdio_register(chip, child, false);
+ if (err)
+ return err;
- *temp = (((val >> 8) & 0x1f) * 5) - 25;
+ /* Walk the device tree, and see if there are any other nodes
+ * which say they are compatible with the external mdio
+ * bus.
+ */
+ for_each_available_child_of_node(np, child) {
+ match = of_match_node(mv88e6xxx_mdio_external_match, child);
+ if (match) {
+ err = mv88e6xxx_mdio_register(chip, child, true);
+ if (err)
+ return err;
+ }
+ }
return 0;
}
-static int mv88e6xxx_set_temp_limit(struct dsa_switch *ds, int temp)
-{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int err;
+static void mv88e6xxx_mdios_unregister(struct mv88e6xxx_chip *chip)
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
- return -EOPNOTSUPP;
-
- mutex_lock(&chip->reg_lock);
- err = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
- if (err)
- goto unlock;
- temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
- err = mv88e6xxx_phy_page_write(chip, phy, 6, 26,
- (val & 0xe0ff) | (temp << 8));
-unlock:
- mutex_unlock(&chip->reg_lock);
-
- return err;
-}
-
-static int mv88e6xxx_get_temp_alarm(struct dsa_switch *ds, bool *alarm)
{
- struct mv88e6xxx_chip *chip = ds->priv;
- int phy = mv88e6xxx_6320_family(chip) ? 3 : 0;
- u16 val;
- int ret;
-
- if (!mv88e6xxx_has(chip, MV88E6XXX_FLAG_TEMP_LIMIT))
- return -EOPNOTSUPP;
-
- *alarm = false;
-
- mutex_lock(&chip->reg_lock);
- ret = mv88e6xxx_phy_page_read(chip, phy, 6, 26, &val);
- mutex_unlock(&chip->reg_lock);
- if (ret < 0)
- return ret;
+ struct mv88e6xxx_mdio_bus *mdio_bus;
+ struct mii_bus *bus;
- *alarm = !!(val & 0x40);
+ list_for_each_entry(mdio_bus, &chip->mdios, list) {
+ bus = mdio_bus->bus;
- return 0;
+ mdiobus_unregister(bus);
+ }
}
-#endif /* CONFIG_NET_DSA_HWMON */
static int mv88e6xxx_get_eeprom_len(struct dsa_switch *ds)
{
@@ -3233,8 +3169,8 @@ static const struct mv88e6xxx_ops mv88e6097_ops = {
static const struct mv88e6xxx_ops mv88e6123_ops = {
/* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_read,
- .phy_write = mv88e6xxx_write,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -3280,8 +3216,8 @@ static const struct mv88e6xxx_ops mv88e6131_ops = {
static const struct mv88e6xxx_ops mv88e6161_ops = {
/* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_read,
- .phy_write = mv88e6xxx_write,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -3305,8 +3241,8 @@ static const struct mv88e6xxx_ops mv88e6161_ops = {
static const struct mv88e6xxx_ops mv88e6165_ops = {
/* MV88E6XXX_FAMILY_6165 */
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
- .phy_read = mv88e6xxx_read,
- .phy_write = mv88e6xxx_write,
+ .phy_read = mv88e6165_phy_read,
+ .phy_write = mv88e6165_phy_write,
.port_set_link = mv88e6xxx_port_set_link,
.port_set_duplex = mv88e6xxx_port_set_duplex,
.port_set_speed = mv88e6185_port_set_speed,
@@ -3453,6 +3389,8 @@ static const struct mv88e6xxx_ops mv88e6185_ops = {
static const struct mv88e6xxx_ops mv88e6190_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3478,6 +3416,8 @@ static const struct mv88e6xxx_ops mv88e6190_ops = {
static const struct mv88e6xxx_ops mv88e6190x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3503,6 +3443,8 @@ static const struct mv88e6xxx_ops mv88e6190x_ops = {
static const struct mv88e6xxx_ops mv88e6191_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3556,6 +3498,8 @@ static const struct mv88e6xxx_ops mv88e6240_ops = {
static const struct mv88e6xxx_ops mv88e6290_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3712,8 +3656,66 @@ static const struct mv88e6xxx_ops mv88e6352_ops = {
.reset = mv88e6352_g1_reset,
};
+static const struct mv88e6xxx_ops mv88e6141_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
+static const struct mv88e6xxx_ops mv88e6341_ops = {
+ /* MV88E6XXX_FAMILY_6341 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
+ .set_switch_mac = mv88e6xxx_g2_set_switch_mac,
+ .phy_read = mv88e6xxx_g2_smi_phy_read,
+ .phy_write = mv88e6xxx_g2_smi_phy_write,
+ .port_set_link = mv88e6xxx_port_set_link,
+ .port_set_duplex = mv88e6xxx_port_set_duplex,
+ .port_set_rgmii_delay = mv88e6390_port_set_rgmii_delay,
+ .port_set_speed = mv88e6390_port_set_speed,
+ .port_tag_remap = mv88e6095_port_tag_remap,
+ .port_set_frame_mode = mv88e6351_port_set_frame_mode,
+ .port_set_egress_unknowns = mv88e6351_port_set_egress_unknowns,
+ .port_set_ether_type = mv88e6351_port_set_ether_type,
+ .port_jumbo_config = mv88e6165_port_jumbo_config,
+ .port_egress_rate_limiting = mv88e6097_port_egress_rate_limiting,
+ .port_pause_config = mv88e6097_port_pause_config,
+ .stats_snapshot = mv88e6390_g1_stats_snapshot,
+ .stats_get_sset_count = mv88e6320_stats_get_sset_count,
+ .stats_get_strings = mv88e6320_stats_get_strings,
+ .stats_get_stats = mv88e6390_stats_get_stats,
+ .g1_set_cpu_port = mv88e6390_g1_set_cpu_port,
+ .g1_set_egress_port = mv88e6390_g1_set_egress_port,
+ .mgmt_rsvd2cpu = mv88e6390_g1_mgmt_rsvd2cpu,
+ .reset = mv88e6352_g1_reset,
+};
+
static const struct mv88e6xxx_ops mv88e6390_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3741,6 +3743,8 @@ static const struct mv88e6xxx_ops mv88e6390_ops = {
static const struct mv88e6xxx_ops mv88e6390x_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -3768,6 +3772,8 @@ static const struct mv88e6xxx_ops mv88e6390x_ops = {
static const struct mv88e6xxx_ops mv88e6391_ops = {
/* MV88E6XXX_FAMILY_6390 */
+ .get_eeprom = mv88e6xxx_g2_get_eeprom8,
+ .set_eeprom = mv88e6xxx_g2_set_eeprom8,
.set_switch_mac = mv88e6xxx_g2_set_switch_mac,
.phy_read = mv88e6xxx_g2_smi_phy_read,
.phy_write = mv88e6xxx_g2_smi_phy_write,
@@ -4093,6 +4099,34 @@ static const struct mv88e6xxx_info mv88e6xxx_table[] = {
.ops = &mv88e6321_ops,
},
+ [MV88E6141] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6141,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .port_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 3750,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+ .ops = &mv88e6141_ops,
+ },
+
+ [MV88E6341] = {
+ .prod_num = PORT_SWITCH_ID_PROD_NUM_6341,
+ .family = MV88E6XXX_FAMILY_6341,
+ .name = "Marvell 88E6341",
+ .num_databases = 4096,
+ .num_ports = 6,
+ .port_base_addr = 0x10,
+ .global1_addr = 0x1b,
+ .age_time_coeff = 3750,
+ .tag_protocol = DSA_TAG_PROTO_EDSA,
+ .flags = MV88E6XXX_FLAGS_FAMILY_6341,
+ .ops = &mv88e6341_ops,
+ },
+
[MV88E6350] = {
.prod_num = PORT_SWITCH_ID_PROD_NUM_6350,
.family = MV88E6XXX_FAMILY_6351,
@@ -4222,6 +4256,7 @@ static struct mv88e6xxx_chip *mv88e6xxx_alloc_chip(struct device *dev)
chip->dev = dev;
mutex_init(&chip->reg_lock);
+ INIT_LIST_HEAD(&chip->mdios);
return chip;
}
@@ -4296,7 +4331,7 @@ static const char *mv88e6xxx_drv_probe(struct device *dsa_dev,
mv88e6xxx_phy_init(chip);
- err = mv88e6xxx_mdio_register(chip, NULL);
+ err = mv88e6xxx_mdios_register(chip, NULL);
if (err)
goto free;
@@ -4372,12 +4407,6 @@ static const struct dsa_switch_ops mv88e6xxx_switch_ops = {
.get_sset_count = mv88e6xxx_get_sset_count,
.set_eee = mv88e6xxx_set_eee,
.get_eee = mv88e6xxx_get_eee,
-#ifdef CONFIG_NET_DSA_HWMON
- .get_temp = mv88e6xxx_get_temp,
- .get_temp_limit = mv88e6xxx_get_temp_limit,
- .set_temp_limit = mv88e6xxx_set_temp_limit,
- .get_temp_alarm = mv88e6xxx_get_temp_alarm,
-#endif
.get_eeprom_len = mv88e6xxx_get_eeprom_len,
.get_eeprom = mv88e6xxx_get_eeprom,
.set_eeprom = mv88e6xxx_set_eeprom,
@@ -4407,23 +4436,21 @@ static struct dsa_switch_driver mv88e6xxx_switch_drv = {
.ops = &mv88e6xxx_switch_ops,
};
-static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip,
- struct device_node *np)
+static int mv88e6xxx_register_switch(struct mv88e6xxx_chip *chip)
{
struct device *dev = chip->dev;
struct dsa_switch *ds;
- ds = devm_kzalloc(dev, sizeof(*ds), GFP_KERNEL);
+ ds = dsa_switch_alloc(dev, DSA_MAX_PORTS);
if (!ds)
return -ENOMEM;
- ds->dev = dev;
ds->priv = chip;
ds->ops = &mv88e6xxx_switch_ops;
dev_set_drvdata(dev, ds);
- return dsa_register_switch(ds, np);
+ return dsa_register_switch(ds, dev);
}
static void mv88e6xxx_unregister_switch(struct mv88e6xxx_chip *chip)
@@ -4503,18 +4530,18 @@ static int mv88e6xxx_probe(struct mdio_device *mdiodev)
}
}
- err = mv88e6xxx_mdio_register(chip, np);
+ err = mv88e6xxx_mdios_register(chip, np);
if (err)
goto out_g2_irq;
- err = mv88e6xxx_register_switch(chip, np);
+ err = mv88e6xxx_register_switch(chip);
if (err)
goto out_mdio;
return 0;
out_mdio:
- mv88e6xxx_mdio_unregister(chip);
+ mv88e6xxx_mdios_unregister(chip);
out_g2_irq:
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT) && chip->irq > 0)
mv88e6xxx_g2_irq_free(chip);
@@ -4535,7 +4562,7 @@ static void mv88e6xxx_remove(struct mdio_device *mdiodev)
mv88e6xxx_phy_destroy(chip);
mv88e6xxx_unregister_switch(chip);
- mv88e6xxx_mdio_unregister(chip);
+ mv88e6xxx_mdios_unregister(chip);
if (chip->irq > 0) {
if (mv88e6xxx_has(chip, MV88E6XXX_FLAG_G2_INT))
diff --git a/drivers/net/dsa/mv88e6xxx/global2.c b/drivers/net/dsa/mv88e6xxx/global2.c
index 3e77071..353e26b 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.c
+++ b/drivers/net/dsa/mv88e6xxx/global2.c
@@ -218,7 +218,8 @@ static int mv88e6xxx_g2_clear_pot(struct mv88e6xxx_chip *chip)
}
/* Offset 0x14: EEPROM Command
- * Offset 0x15: EEPROM Data
+ * Offset 0x15: EEPROM Data (for 16-bit data access)
+ * Offset 0x15: EEPROM Addr (for 8-bit data access)
*/
static int mv88e6xxx_g2_eeprom_wait(struct mv88e6xxx_chip *chip)
@@ -239,6 +240,50 @@ static int mv88e6xxx_g2_eeprom_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
return mv88e6xxx_g2_eeprom_wait(chip);
}
+static int mv88e6xxx_g2_eeprom_read8(struct mv88e6xxx_chip *chip,
+ u16 addr, u8 *data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_READ;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_eeprom_cmd(chip, cmd);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_read(chip, GLOBAL2_EEPROM_CMD, &cmd);
+ if (err)
+ return err;
+
+ *data = cmd & 0xff;
+
+ return 0;
+}
+
+static int mv88e6xxx_g2_eeprom_write8(struct mv88e6xxx_chip *chip,
+ u16 addr, u8 data)
+{
+ u16 cmd = GLOBAL2_EEPROM_CMD_OP_WRITE | GLOBAL2_EEPROM_CMD_WRITE_EN;
+ int err;
+
+ err = mv88e6xxx_g2_eeprom_wait(chip);
+ if (err)
+ return err;
+
+ err = mv88e6xxx_g2_write(chip, GLOBAL2_EEPROM_ADDR, addr);
+ if (err)
+ return err;
+
+ return mv88e6xxx_g2_eeprom_cmd(chip, cmd | data);
+}
+
static int mv88e6xxx_g2_eeprom_read16(struct mv88e6xxx_chip *chip,
u8 addr, u16 *data)
{
@@ -273,6 +318,52 @@ static int mv88e6xxx_g2_eeprom_write16(struct mv88e6xxx_chip *chip,
return mv88e6xxx_g2_eeprom_cmd(chip, cmd);
}
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ int err;
+
+ eeprom->len = 0;
+
+ while (len) {
+ err = mv88e6xxx_g2_eeprom_read8(chip, offset, data);
+ if (err)
+ return err;
+
+ eeprom->len++;
+ offset++;
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data)
+{
+ unsigned int offset = eeprom->offset;
+ unsigned int len = eeprom->len;
+ int err;
+
+ eeprom->len = 0;
+
+ while (len) {
+ err = mv88e6xxx_g2_eeprom_write8(chip, offset, *data);
+ if (err)
+ return err;
+
+ eeprom->len++;
+ offset++;
+ data++;
+ len--;
+ }
+
+ return 0;
+}
+
int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data)
{
@@ -410,12 +501,17 @@ static int mv88e6xxx_g2_smi_phy_cmd(struct mv88e6xxx_chip *chip, u16 cmd)
return mv88e6xxx_g2_smi_phy_wait(chip);
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 *val)
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val)
{
u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_READ_DATA | (addr << 5) | reg;
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
int err;
+ if (mdio_bus->external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
@@ -427,12 +523,17 @@ int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
return mv88e6xxx_g2_read(chip, GLOBAL2_SMI_PHY_DATA, val);
}
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 val)
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val)
{
u16 cmd = GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA | (addr << 5) | reg;
+ struct mv88e6xxx_mdio_bus *mdio_bus = bus->priv;
int err;
+ if (mdio_bus->external)
+ cmd |= GLOBAL2_SMI_PHY_CMD_EXTERNAL;
+
err = mv88e6xxx_g2_smi_phy_wait(chip);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/global2.h b/drivers/net/dsa/mv88e6xxx/global2.h
index 9aefb7d..00e6352 100644
--- a/drivers/net/dsa/mv88e6xxx/global2.h
+++ b/drivers/net/dsa/mv88e6xxx/global2.h
@@ -23,15 +23,24 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
return 0;
}
-int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 *val);
-int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 val);
+int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip, u8 *addr);
+
+int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom, u8 *data);
+
int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
int mv88e6xxx_g2_set_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
+
int mv88e6xxx_g2_setup(struct mv88e6xxx_chip *chip);
int mv88e6xxx_g2_irq_setup(struct mv88e6xxx_chip *chip);
void mv88e6xxx_g2_irq_free(struct mv88e6xxx_chip *chip);
@@ -50,12 +59,14 @@ static inline int mv88e6xxx_g2_require(struct mv88e6xxx_chip *chip)
}
static inline int mv88e6xxx_g2_smi_phy_read(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
int addr, int reg, u16 *val)
{
return -EOPNOTSUPP;
}
static inline int mv88e6xxx_g2_smi_phy_write(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
int addr, int reg, u16 val)
{
return -EOPNOTSUPP;
@@ -67,6 +78,20 @@ static inline int mv88e6xxx_g2_set_switch_mac(struct mv88e6xxx_chip *chip,
return -EOPNOTSUPP;
}
+static inline int mv88e6xxx_g2_get_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int mv88e6xxx_g2_set_eeprom8(struct mv88e6xxx_chip *chip,
+ struct ethtool_eeprom *eeprom,
+ u8 *data)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int mv88e6xxx_g2_get_eeprom16(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom,
u8 *data)
diff --git a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
index a224d66..9c5c047 100644
--- a/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
+++ b/drivers/net/dsa/mv88e6xxx/mv88e6xxx.h
@@ -87,6 +87,7 @@
#define PORT_SWITCH_ID_PROD_NUM_6131 0x106
#define PORT_SWITCH_ID_PROD_NUM_6320 0x115
#define PORT_SWITCH_ID_PROD_NUM_6123 0x121
+#define PORT_SWITCH_ID_PROD_NUM_6141 0x340
#define PORT_SWITCH_ID_PROD_NUM_6161 0x161
#define PORT_SWITCH_ID_PROD_NUM_6165 0x165
#define PORT_SWITCH_ID_PROD_NUM_6171 0x171
@@ -100,6 +101,7 @@
#define PORT_SWITCH_ID_PROD_NUM_6240 0x240
#define PORT_SWITCH_ID_PROD_NUM_6290 0x290
#define PORT_SWITCH_ID_PROD_NUM_6321 0x310
+#define PORT_SWITCH_ID_PROD_NUM_6341 0x341
#define PORT_SWITCH_ID_PROD_NUM_6352 0x352
#define PORT_SWITCH_ID_PROD_NUM_6350 0x371
#define PORT_SWITCH_ID_PROD_NUM_6351 0x375
@@ -382,10 +384,12 @@
#define GLOBAL2_EEPROM_CMD_WRITE_EN BIT(10)
#define GLOBAL2_EEPROM_CMD_ADDR_MASK 0xff
#define GLOBAL2_EEPROM_DATA 0x15
+#define GLOBAL2_EEPROM_ADDR 0x15 /* 6390, 6341 */
#define GLOBAL2_PTP_AVB_OP 0x16
#define GLOBAL2_PTP_AVB_DATA 0x17
#define GLOBAL2_SMI_PHY_CMD 0x18
#define GLOBAL2_SMI_PHY_CMD_BUSY BIT(15)
+#define GLOBAL2_SMI_PHY_CMD_EXTERNAL BIT(13)
#define GLOBAL2_SMI_PHY_CMD_MODE_22 BIT(12)
#define GLOBAL2_SMI_PHY_CMD_OP_22_WRITE_DATA ((0x1 << 10) | \
GLOBAL2_SMI_PHY_CMD_MODE_22 | \
@@ -418,6 +422,7 @@ enum mv88e6xxx_model {
MV88E6097,
MV88E6123,
MV88E6131,
+ MV88E6141,
MV88E6161,
MV88E6165,
MV88E6171,
@@ -432,6 +437,7 @@ enum mv88e6xxx_model {
MV88E6290,
MV88E6320,
MV88E6321,
+ MV88E6341,
MV88E6350,
MV88E6351,
MV88E6352,
@@ -447,6 +453,7 @@ enum mv88e6xxx_family {
MV88E6XXX_FAMILY_6165, /* 6123 6161 6165 */
MV88E6XXX_FAMILY_6185, /* 6108 6121 6122 6131 6152 6155 6182 6185 */
MV88E6XXX_FAMILY_6320, /* 6320 6321 */
+ MV88E6XXX_FAMILY_6341, /* 6141 6341 */
MV88E6XXX_FAMILY_6351, /* 6171 6175 6350 6351 */
MV88E6XXX_FAMILY_6352, /* 6172 6176 6240 6352 */
MV88E6XXX_FAMILY_6390, /* 6190 6190X 6191 6290 6390 6390X */
@@ -496,12 +503,6 @@ enum mv88e6xxx_cap {
*/
MV88E6XXX_CAP_STU,
- /* Internal temperature sensor.
- * Available from any enabled port's PHY register 26, page 6.
- */
- MV88E6XXX_CAP_TEMP,
- MV88E6XXX_CAP_TEMP_LIMIT,
-
/* VLAN Table Unit.
* The VTU is used to program 802.1Q VLANs. See GLOBAL_VTU_OP.
*/
@@ -532,8 +533,6 @@ enum mv88e6xxx_cap {
#define MV88E6XXX_FLAG_G2_POT BIT_ULL(MV88E6XXX_CAP_G2_POT)
#define MV88E6XXX_FLAG_STU BIT_ULL(MV88E6XXX_CAP_STU)
-#define MV88E6XXX_FLAG_TEMP BIT_ULL(MV88E6XXX_CAP_TEMP)
-#define MV88E6XXX_FLAG_TEMP_LIMIT BIT_ULL(MV88E6XXX_CAP_TEMP_LIMIT)
#define MV88E6XXX_FLAG_VTU BIT_ULL(MV88E6XXX_CAP_VTU)
/* Ingress Rate Limit unit */
@@ -585,7 +584,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
@@ -604,13 +602,25 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_2X | \
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
- MV88E6XXX_FLAG_TEMP | \
- MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
MV88E6XXX_FLAGS_PVT)
+#define MV88E6XXX_FLAGS_FAMILY_6341 \
+ (MV88E6XXX_FLAG_EEE | \
+ MV88E6XXX_FLAG_G1_ATU_FID | \
+ MV88E6XXX_FLAG_G1_VTU_FID | \
+ MV88E6XXX_FLAG_GLOBAL2 | \
+ MV88E6XXX_FLAG_G2_INT | \
+ MV88E6XXX_FLAG_G2_POT | \
+ MV88E6XXX_FLAG_STU | \
+ MV88E6XXX_FLAG_VTU | \
+ MV88E6XXX_FLAGS_IRL | \
+ MV88E6XXX_FLAGS_MULTI_CHIP | \
+ MV88E6XXX_FLAGS_PVT | \
+ MV88E6XXX_FLAGS_SERDES)
+
#define MV88E6XXX_FLAGS_FAMILY_6351 \
(MV88E6XXX_FLAG_G1_ATU_FID | \
MV88E6XXX_FLAG_G1_VTU_FID | \
@@ -620,7 +630,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
@@ -636,8 +645,6 @@ enum mv88e6xxx_cap {
MV88E6XXX_FLAG_G2_MGMT_EN_0X | \
MV88E6XXX_FLAG_G2_POT | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
- MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
@@ -650,8 +657,6 @@ struct mv88e6xxx_ops;
(MV88E6XXX_FLAG_EEE | \
MV88E6XXX_FLAG_GLOBAL2 | \
MV88E6XXX_FLAG_STU | \
- MV88E6XXX_FLAG_TEMP | \
- MV88E6XXX_FLAG_TEMP_LIMIT | \
MV88E6XXX_FLAG_VTU | \
MV88E6XXX_FLAGS_IRL | \
MV88E6XXX_FLAGS_MULTI_CHIP | \
@@ -690,10 +695,6 @@ struct mv88e6xxx_vtu_entry {
struct mv88e6xxx_bus_ops;
-struct mv88e6xxx_priv_port {
- struct net_device *bridge_dev;
-};
-
struct mv88e6xxx_irq {
u16 masked;
struct irq_chip chip;
@@ -734,8 +735,6 @@ struct mv88e6xxx_chip {
*/
struct mutex stats_mutex;
- struct mv88e6xxx_priv_port ports[DSA_MAX_PORTS];
-
/* A switch may have a GPIO line tied to its reset pin. Parse
* this from the device tree, and use it before performing
* switch soft reset.
@@ -745,11 +744,8 @@ struct mv88e6xxx_chip {
/* set to size of eeprom if supported by the switch */
int eeprom_len;
- /* Device node for the MDIO bus */
- struct device_node *mdio_np;
-
- /* And the MDIO bus itself */
- struct mii_bus *mdio_bus;
+ /* List of mdio busses */
+ struct list_head mdios;
/* There can be two interrupt controllers, which are chained
* off a GPIO as interrupt source
@@ -765,6 +761,13 @@ struct mv88e6xxx_bus_ops {
int (*write)(struct mv88e6xxx_chip *chip, int addr, int reg, u16 val);
};
+struct mv88e6xxx_mdio_bus {
+ struct mii_bus *bus;
+ struct mv88e6xxx_chip *chip;
+ struct list_head list;
+ bool external;
+};
+
struct mv88e6xxx_ops {
int (*get_eeprom)(struct mv88e6xxx_chip *chip,
struct ethtool_eeprom *eeprom, u8 *data);
@@ -773,10 +776,12 @@ struct mv88e6xxx_ops {
int (*set_switch_mac)(struct mv88e6xxx_chip *chip, u8 *addr);
- int (*phy_read)(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 *val);
- int (*phy_write)(struct mv88e6xxx_chip *chip, int addr, int reg,
- u16 val);
+ int (*phy_read)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 *val);
+ int (*phy_write)(struct mv88e6xxx_chip *chip,
+ struct mii_bus *bus,
+ int addr, int reg, u16 val);
/* PHY Polling Unit (PPU) operations */
int (*ppu_enable)(struct mv88e6xxx_chip *chip);
diff --git a/drivers/net/dsa/qca8k.c b/drivers/net/dsa/qca8k.c
index 54d270d..a4fd4cc 100644
--- a/drivers/net/dsa/qca8k.c
+++ b/drivers/net/dsa/qca8k.c
@@ -746,17 +746,14 @@ qca8k_port_stp_state_set(struct dsa_switch *ds, int port, u8 state)
}
static int
-qca8k_port_bridge_join(struct dsa_switch *ds, int port,
- struct net_device *bridge)
+qca8k_port_bridge_join(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int port_mask = BIT(QCA8K_CPU_PORT);
int i;
- priv->port_sts[port].bridge_dev = bridge;
-
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
- if (priv->port_sts[i].bridge_dev != bridge)
+ if (ds->ports[i].bridge_dev != br)
continue;
/* Add this port to the portvlan mask of the other ports
* in the bridge
@@ -775,14 +772,13 @@ qca8k_port_bridge_join(struct dsa_switch *ds, int port,
}
static void
-qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
+qca8k_port_bridge_leave(struct dsa_switch *ds, int port, struct net_device *br)
{
struct qca8k_priv *priv = (struct qca8k_priv *)ds->priv;
int i;
for (i = 1; i < QCA8K_NUM_PORTS; i++) {
- if (priv->port_sts[i].bridge_dev !=
- priv->port_sts[port].bridge_dev)
+ if (ds->ports[i].bridge_dev != br)
continue;
/* Remove this port to the portvlan mask of the other ports
* in the bridge
@@ -791,7 +787,7 @@ qca8k_port_bridge_leave(struct dsa_switch *ds, int port)
QCA8K_PORT_LOOKUP_CTRL(i),
BIT(port));
}
- priv->port_sts[port].bridge_dev = NULL;
+
/* Set the cpu port to be the only one in the portvlan mask of
* this port
*/
@@ -954,17 +950,16 @@ qca8k_sw_probe(struct mdio_device *mdiodev)
if (id != QCA8K_ID_QCA8337)
return -ENODEV;
- priv->ds = devm_kzalloc(&mdiodev->dev, sizeof(*priv->ds), GFP_KERNEL);
+ priv->ds = dsa_switch_alloc(&mdiodev->dev, DSA_MAX_PORTS);
if (!priv->ds)
return -ENOMEM;
priv->ds->priv = priv;
- priv->ds->dev = &mdiodev->dev;
priv->ds->ops = &qca8k_switch_ops;
mutex_init(&priv->reg_mutex);
dev_set_drvdata(&mdiodev->dev, priv);
- return dsa_register_switch(priv->ds, priv->ds->dev->of_node);
+ return dsa_register_switch(priv->ds, &mdiodev->dev);
}
static void
diff --git a/drivers/net/dsa/qca8k.h b/drivers/net/dsa/qca8k.h
index 2014647..1ed4fac 100644
--- a/drivers/net/dsa/qca8k.h
+++ b/drivers/net/dsa/qca8k.h
@@ -157,7 +157,6 @@ enum qca8k_fdb_cmd {
struct ar8xxx_port_status {
struct ethtool_eee eee;
- struct net_device *bridge_dev;
int enabled;
};
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 1f2de4e..2c80611 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -41,7 +41,48 @@
#define DRV_NAME "dummy"
#define DRV_VERSION "1.0"
+#undef pr_fmt
+#define pr_fmt(fmt) DRV_NAME ": " fmt
+
static int numdummies = 1;
+static int num_vfs;
+
+struct vf_data_storage {
+ u8 vf_mac[ETH_ALEN];
+ u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+ u16 pf_qos;
+ __be16 vlan_proto;
+ u16 min_tx_rate;
+ u16 max_tx_rate;
+ u8 spoofchk_enabled;
+ bool rss_query_enabled;
+ u8 trusted;
+ int link_state;
+};
+
+struct dummy_priv {
+ struct vf_data_storage *vfinfo;
+};
+
+static int dummy_num_vf(struct device *dev)
+{
+ return num_vfs;
+}
+
+static struct bus_type dummy_bus = {
+ .name = "dummy",
+ .num_vf = dummy_num_vf,
+};
+
+static void release_dummy_parent(struct device *dev)
+{
+}
+
+static struct device dummy_parent = {
+ .init_name = "dummy",
+ .bus = &dummy_bus,
+ .release = release_dummy_parent,
+};
/* fake multicast ability */
static void set_multicast_list(struct net_device *dev)
@@ -90,10 +131,25 @@ static netdev_tx_t dummy_xmit(struct sk_buff *skb, struct net_device *dev)
static int dummy_dev_init(struct net_device *dev)
{
+ struct dummy_priv *priv = netdev_priv(dev);
+
dev->dstats = netdev_alloc_pcpu_stats(struct pcpu_dstats);
if (!dev->dstats)
return -ENOMEM;
+ priv->vfinfo = NULL;
+
+ if (!num_vfs)
+ return 0;
+
+ dev->dev.parent = &dummy_parent;
+ priv->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage),
+ GFP_KERNEL);
+ if (!priv->vfinfo) {
+ free_percpu(dev->dstats);
+ return -ENOMEM;
+ }
+
return 0;
}
@@ -111,6 +167,117 @@ static int dummy_change_carrier(struct net_device *dev, bool new_carrier)
return 0;
}
+static int dummy_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
+ return -EINVAL;
+
+ memcpy(priv->vfinfo[vf].vf_mac, mac, ETH_ALEN);
+
+ return 0;
+}
+
+static int dummy_set_vf_vlan(struct net_device *dev, int vf,
+ u16 vlan, u8 qos, __be16 vlan_proto)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if ((vf >= num_vfs) || (vlan > 4095) || (qos > 7))
+ return -EINVAL;
+
+ priv->vfinfo[vf].pf_vlan = vlan;
+ priv->vfinfo[vf].pf_qos = qos;
+ priv->vfinfo[vf].vlan_proto = vlan_proto;
+
+ return 0;
+}
+
+static int dummy_set_vf_rate(struct net_device *dev, int vf, int min, int max)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].min_tx_rate = min;
+ priv->vfinfo[vf].max_tx_rate = max;
+
+ return 0;
+}
+
+static int dummy_set_vf_spoofchk(struct net_device *dev, int vf, bool val)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].spoofchk_enabled = val;
+
+ return 0;
+}
+
+static int dummy_set_vf_rss_query_en(struct net_device *dev, int vf, bool val)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].rss_query_enabled = val;
+
+ return 0;
+}
+
+static int dummy_set_vf_trust(struct net_device *dev, int vf, bool val)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].trusted = val;
+
+ return 0;
+}
+
+static int dummy_get_vf_config(struct net_device *dev,
+ int vf, struct ifla_vf_info *ivi)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ ivi->vf = vf;
+ memcpy(&ivi->mac, priv->vfinfo[vf].vf_mac, ETH_ALEN);
+ ivi->vlan = priv->vfinfo[vf].pf_vlan;
+ ivi->qos = priv->vfinfo[vf].pf_qos;
+ ivi->spoofchk = priv->vfinfo[vf].spoofchk_enabled;
+ ivi->linkstate = priv->vfinfo[vf].link_state;
+ ivi->min_tx_rate = priv->vfinfo[vf].min_tx_rate;
+ ivi->max_tx_rate = priv->vfinfo[vf].max_tx_rate;
+ ivi->rss_query_en = priv->vfinfo[vf].rss_query_enabled;
+ ivi->trusted = priv->vfinfo[vf].trusted;
+ ivi->vlan_proto = priv->vfinfo[vf].vlan_proto;
+
+ return 0;
+}
+
+static int dummy_set_vf_link_state(struct net_device *dev, int vf, int state)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ if (vf >= num_vfs)
+ return -EINVAL;
+
+ priv->vfinfo[vf].link_state = state;
+
+ return 0;
+}
+
static const struct net_device_ops dummy_netdev_ops = {
.ndo_init = dummy_dev_init,
.ndo_uninit = dummy_dev_uninit,
@@ -120,6 +287,14 @@ static const struct net_device_ops dummy_netdev_ops = {
.ndo_set_mac_address = eth_mac_addr,
.ndo_get_stats64 = dummy_get_stats64,
.ndo_change_carrier = dummy_change_carrier,
+ .ndo_set_vf_mac = dummy_set_vf_mac,
+ .ndo_set_vf_vlan = dummy_set_vf_vlan,
+ .ndo_set_vf_rate = dummy_set_vf_rate,
+ .ndo_set_vf_spoofchk = dummy_set_vf_spoofchk,
+ .ndo_set_vf_trust = dummy_set_vf_trust,
+ .ndo_get_vf_config = dummy_get_vf_config,
+ .ndo_set_vf_link_state = dummy_set_vf_link_state,
+ .ndo_set_vf_rss_query_en = dummy_set_vf_rss_query_en,
};
static void dummy_get_drvinfo(struct net_device *dev,
@@ -133,6 +308,14 @@ static const struct ethtool_ops dummy_ethtool_ops = {
.get_drvinfo = dummy_get_drvinfo,
};
+static void dummy_free_netdev(struct net_device *dev)
+{
+ struct dummy_priv *priv = netdev_priv(dev);
+
+ kfree(priv->vfinfo);
+ free_netdev(dev);
+}
+
static void dummy_setup(struct net_device *dev)
{
ether_setup(dev);
@@ -140,7 +323,7 @@ static void dummy_setup(struct net_device *dev)
/* Initialize the device structure. */
dev->netdev_ops = &dummy_netdev_ops;
dev->ethtool_ops = &dummy_ethtool_ops;
- dev->destructor = free_netdev;
+ dev->destructor = dummy_free_netdev;
/* Fill in device structure with ethernet-generic values. */
dev->flags |= IFF_NOARP;
@@ -171,6 +354,7 @@ static int dummy_validate(struct nlattr *tb[], struct nlattr *data[])
static struct rtnl_link_ops dummy_link_ops __read_mostly = {
.kind = DRV_NAME,
+ .priv_size = sizeof(struct dummy_priv),
.setup = dummy_setup,
.validate = dummy_validate,
};
@@ -179,12 +363,16 @@ static struct rtnl_link_ops dummy_link_ops __read_mostly = {
module_param(numdummies, int, 0);
MODULE_PARM_DESC(numdummies, "Number of dummy pseudo devices");
+module_param(num_vfs, int, 0);
+MODULE_PARM_DESC(num_vfs, "Number of dummy VFs per dummy device");
+
static int __init dummy_init_one(void)
{
struct net_device *dev_dummy;
int err;
- dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
+ dev_dummy = alloc_netdev(sizeof(struct dummy_priv),
+ "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
if (!dev_dummy)
return -ENOMEM;
@@ -203,6 +391,21 @@ static int __init dummy_init_module(void)
{
int i, err = 0;
+ if (num_vfs) {
+ err = bus_register(&dummy_bus);
+ if (err < 0) {
+ pr_err("registering dummy bus failed\n");
+ return err;
+ }
+
+ err = device_register(&dummy_parent);
+ if (err < 0) {
+ pr_err("registering dummy parent device failed\n");
+ bus_unregister(&dummy_bus);
+ return err;
+ }
+ }
+
rtnl_lock();
err = __rtnl_link_register(&dummy_link_ops);
if (err < 0)
@@ -218,12 +421,22 @@ static int __init dummy_init_module(void)
out:
rtnl_unlock();
+ if (err && num_vfs) {
+ device_unregister(&dummy_parent);
+ bus_unregister(&dummy_bus);
+ }
+
return err;
}
static void __exit dummy_cleanup_module(void)
{
rtnl_link_unregister(&dummy_link_ops);
+
+ if (num_vfs) {
+ device_unregister(&dummy_parent);
+ bus_unregister(&dummy_bus);
+ }
}
module_init(dummy_init_module);
diff --git a/drivers/net/ethernet/3com/typhoon.c b/drivers/net/ethernet/3com/typhoon.c
index 1986ad1..084a6d5 100644
--- a/drivers/net/ethernet/3com/typhoon.c
+++ b/drivers/net/ethernet/3com/typhoon.c
@@ -1753,7 +1753,7 @@ typhoon_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
iowrite32(TYPHOON_INTR_NONE,
tp->ioaddr + TYPHOON_REG_INTR_MASK);
typhoon_post_pci_writes(tp->ioaddr);
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index e4c28fe..8c08f9d 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -29,6 +29,7 @@ source "drivers/net/ethernet/amazon/Kconfig"
source "drivers/net/ethernet/amd/Kconfig"
source "drivers/net/ethernet/apm/Kconfig"
source "drivers/net/ethernet/apple/Kconfig"
+source "drivers/net/ethernet/aquantia/Kconfig"
source "drivers/net/ethernet/arc/Kconfig"
source "drivers/net/ethernet/atheros/Kconfig"
source "drivers/net/ethernet/aurora/Kconfig"
@@ -170,7 +171,6 @@ source "drivers/net/ethernet/sgi/Kconfig"
source "drivers/net/ethernet/smsc/Kconfig"
source "drivers/net/ethernet/stmicro/Kconfig"
source "drivers/net/ethernet/sun/Kconfig"
-source "drivers/net/ethernet/synopsys/Kconfig"
source "drivers/net/ethernet/tehuti/Kconfig"
source "drivers/net/ethernet/ti/Kconfig"
source "drivers/net/ethernet/tile/Kconfig"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index 24330f4..26dce5b 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_NET_VENDOR_AMAZON) += amazon/
obj-$(CONFIG_NET_VENDOR_AMD) += amd/
obj-$(CONFIG_NET_XGENE) += apm/
obj-$(CONFIG_NET_VENDOR_APPLE) += apple/
+obj-$(CONFIG_NET_VENDOR_AQUANTIA) += aquantia/
obj-$(CONFIG_NET_VENDOR_ARC) += arc/
obj-$(CONFIG_NET_VENDOR_ATHEROS) += atheros/
obj-$(CONFIG_NET_VENDOR_AURORA) += aurora/
@@ -81,7 +82,6 @@ obj-$(CONFIG_NET_VENDOR_SGI) += sgi/
obj-$(CONFIG_NET_VENDOR_SMSC) += smsc/
obj-$(CONFIG_NET_VENDOR_STMICRO) += stmicro/
obj-$(CONFIG_NET_VENDOR_SUN) += sun/
-obj-$(CONFIG_NET_VENDOR_SYNOPSYS) += synopsys/
obj-$(CONFIG_NET_VENDOR_TEHUTI) += tehuti/
obj-$(CONFIG_NET_VENDOR_TI) += ti/
obj-$(CONFIG_TILE_NET) += tile/
diff --git a/drivers/net/ethernet/adi/bfin_mac.c b/drivers/net/ethernet/adi/bfin_mac.c
index 8816452..a817313 100644
--- a/drivers/net/ethernet/adi/bfin_mac.c
+++ b/drivers/net/ethernet/adi/bfin_mac.c
@@ -1274,7 +1274,7 @@ static int bfin_mac_poll(struct napi_struct *napi, int budget)
}
if (i < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, i);
if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
enable_irq(IRQ_MAC_RX);
}
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 831bab3..87a11b9 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -3575,7 +3575,7 @@ static int et131x_poll(struct napi_struct *napi, int budget)
et131x_handle_send_pkts(adapter);
if (work_done < budget) {
- napi_complete(&adapter->napi);
+ napi_complete_done(&adapter->napi, work_done);
et131x_enable_interrupts(adapter);
}
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 25864bf..527908c 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -513,7 +513,7 @@ static int tse_poll(struct napi_struct *napi, int budget)
if (rxcomplete < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rxcomplete);
netdev_dbg(priv->dev,
"NAPI Complete, did %d packets with budget %d\n",
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-common.h b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
index 5b7ba25..8a280e7 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-common.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-common.h
@@ -891,6 +891,8 @@
#define PCS_V1_WINDOW_SELECT 0x03fc
#define PCS_V2_WINDOW_DEF 0x9060
#define PCS_V2_WINDOW_SELECT 0x9064
+#define PCS_V2_RV_WINDOW_DEF 0x1060
+#define PCS_V2_RV_WINDOW_SELECT 0x1064
/* PCS register entry bit positions and sizes */
#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
index aaf0350..a7d16db 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-dev.c
@@ -1151,7 +1151,7 @@ static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
mmd_data = XPCS16_IOREAD(pdata, offset);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
@@ -1183,7 +1183,7 @@ static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
spin_lock_irqsave(&pdata->xpcs_lock, flags);
- XPCS32_IOWRITE(pdata, PCS_V2_WINDOW_SELECT, index);
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
XPCS16_IOWRITE(pdata, offset, mmd_data);
spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
}
@@ -3407,8 +3407,10 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
/* Flush Tx queues */
ret = xgbe_flush_tx_queues(pdata);
- if (ret)
+ if (ret) {
+ netdev_err(pdata->netdev, "error flushing TX queues\n");
return ret;
+ }
/*
* Initialize DMA related features
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index f8648e4..3aa457c 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1070,7 +1070,9 @@ static int xgbe_start(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_start\n");
- hw_if->init(pdata);
+ ret = hw_if->init(pdata);
+ if (ret)
+ return ret;
xgbe_napi_enable(pdata, 1);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
index e76b7f6..c2730f1 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-pci.c
@@ -265,6 +265,7 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
struct xgbe_prv_data *pdata;
struct device *dev = &pdev->dev;
void __iomem * const *iomap_table;
+ struct pci_dev *rdev;
unsigned int ma_lo, ma_hi;
unsigned int reg;
int bar_mask;
@@ -326,8 +327,20 @@ static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
if (netif_msg_probe(pdata))
dev_dbg(dev, "xpcs_regs = %p\n", pdata->xpcs_regs);
+ /* Set the PCS indirect addressing definition registers */
+ rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
+ if (rdev &&
+ (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
+ pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
+ } else {
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ }
+ pci_dev_put(rdev);
+
/* Configure the PCS indirect addressing support */
- reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
pdata->xpcs_window <<= 6;
pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe.h b/drivers/net/ethernet/amd/xgbe/xgbe.h
index f52a9bd..0010881 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe.h
+++ b/drivers/net/ethernet/amd/xgbe/xgbe.h
@@ -955,6 +955,8 @@ struct xgbe_prv_data {
/* XPCS indirect addressing lock */
spinlock_t xpcs_lock;
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
unsigned int xpcs_window;
unsigned int xpcs_window_size;
unsigned int xpcs_window_mask;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index de59db6..ab43c5272 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -840,7 +840,7 @@ static int xgene_enet_napi(struct napi_struct *napi, const int budget)
processed = xgene_enet_process_ring(ring, budget);
if (processed != budget) {
- napi_complete(napi);
+ napi_complete_done(napi, processed);
enable_irq(ring->irq);
}
diff --git a/drivers/net/ethernet/aquantia/Kconfig b/drivers/net/ethernet/aquantia/Kconfig
new file mode 100644
index 0000000..cdf78e0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/Kconfig
@@ -0,0 +1,24 @@
+#
+# aQuantia device configuration
+#
+
+config NET_VENDOR_AQUANTIA
+ bool "aQuantia devices"
+ default y
+ ---help---
+ Set this to y if you have an Ethernet network cards that uses the aQuantia
+ AQC107/AQC108 chipset.
+
+ This option does not build any drivers; it casues the aQuantia
+ drivers that can be built to appear in the list of Ethernet drivers.
+
+
+if NET_VENDOR_AQUANTIA
+
+config AQTION
+ tristate "aQuantia AQtion(tm) Support"
+ depends on PCI && X86_64
+ ---help---
+ This enables the support for the aQuantia AQtion(tm) Ethernet card.
+
+endif # NET_VENDOR_AQUANTIA
diff --git a/drivers/net/ethernet/aquantia/Makefile b/drivers/net/ethernet/aquantia/Makefile
new file mode 100644
index 0000000..4f4897b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the aQuantia device drivers.
+#
+
+obj-$(CONFIG_AQTION) += atlantic/
diff --git a/drivers/net/ethernet/aquantia/atlantic/Makefile b/drivers/net/ethernet/aquantia/atlantic/Makefile
new file mode 100644
index 0000000..e4ae696
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/Makefile
@@ -0,0 +1,42 @@
+################################################################################
+#
+# aQuantia Ethernet Controller AQtion Linux Driver
+# Copyright(c) 2014-2017 aQuantia Corporation.
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms and conditions of the GNU General Public License,
+# version 2, as published by the Free Software Foundation.
+#
+# This program is distributed in the hope it will be useful, but WITHOUT
+# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+# more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program. If not, see <http://www.gnu.org/licenses/>.
+#
+# The full GNU General Public License is included in this distribution in
+# the file called "COPYING".
+#
+# Contact Information: <rdc-drv@aquantia.com>
+# aQuantia Corporation, 105 E. Tasman Dr. San Jose, CA 95134, USA
+#
+################################################################################
+
+#
+# Makefile for the AQtion(tm) Ethernet driver
+#
+
+obj-$(CONFIG_AQTION) += atlantic.o
+
+atlantic-objs := aq_main.o \
+ aq_nic.o \
+ aq_pci_func.o \
+ aq_vec.o \
+ aq_ring.o \
+ aq_hw_utils.o \
+ aq_ethtool.o \
+ hw_atl/hw_atl_a0.o \
+ hw_atl/hw_atl_b0.o \
+ hw_atl/hw_atl_utils.o \
+ hw_atl/hw_atl_llh.o
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
new file mode 100644
index 0000000..5f99237
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_cfg.h
@@ -0,0 +1,77 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_cfg.h: Definition of configuration parameters and constants. */
+
+#ifndef AQ_CFG_H
+#define AQ_CFG_H
+
+#define AQ_CFG_VECS_DEF 4U
+#define AQ_CFG_TCS_DEF 1U
+
+#define AQ_CFG_TXDS_DEF 4096U
+#define AQ_CFG_RXDS_DEF 1024U
+
+#define AQ_CFG_IS_POLLING_DEF 0U
+
+#define AQ_CFG_FORCE_LEGACY_INT 0U
+
+#define AQ_CFG_IS_INTERRUPT_MODERATION_DEF 1U
+#define AQ_CFG_INTERRUPT_MODERATION_RATE_DEF 0xFFFFU
+#define AQ_CFG_IRQ_MASK 0x1FFU
+
+#define AQ_CFG_VECS_MAX 8U
+#define AQ_CFG_TCS_MAX 8U
+
+#define AQ_CFG_TX_FRAME_MAX (16U * 1024U)
+#define AQ_CFG_RX_FRAME_MAX (4U * 1024U)
+
+/* LRO */
+#define AQ_CFG_IS_LRO_DEF 1U
+
+/* RSS */
+#define AQ_CFG_RSS_INDIRECTION_TABLE_MAX 128U
+#define AQ_CFG_RSS_HASHKEY_SIZE 320U
+
+#define AQ_CFG_IS_RSS_DEF 1U
+#define AQ_CFG_NUM_RSS_QUEUES_DEF AQ_CFG_VECS_DEF
+#define AQ_CFG_RSS_BASE_CPU_NUM_DEF 0U
+
+#define AQ_CFG_PCI_FUNC_MSIX_IRQS 9U
+#define AQ_CFG_PCI_FUNC_PORTS 2U
+
+#define AQ_CFG_SERVICE_TIMER_INTERVAL (2 * HZ)
+#define AQ_CFG_POLLING_TIMER_INTERVAL ((unsigned int)(2 * HZ))
+
+#define AQ_CFG_SKB_FRAGS_MAX 32U
+
+#define AQ_CFG_NAPI_WEIGHT 64U
+
+#define AQ_CFG_MULTICAST_ADDRESS_MAX 32U
+
+/*#define AQ_CFG_MAC_ADDR_PERMANENT {0x30, 0x0E, 0xE3, 0x12, 0x34, 0x56}*/
+
+#define AQ_CFG_FC_MODE 3U
+
+#define AQ_CFG_SPEED_MSK 0xFFFFU /* 0xFFFFU==auto_neg */
+
+#define AQ_CFG_IS_AUTONEG_DEF 1U
+#define AQ_CFG_MTU_DEF 1514U
+
+#define AQ_CFG_LOCK_TRYS 100U
+
+#define AQ_CFG_DRV_AUTHOR "aQuantia"
+#define AQ_CFG_DRV_DESC "aQuantia Corporation(R) Network Driver"
+#define AQ_CFG_DRV_NAME "aquantia"
+#define AQ_CFG_DRV_VERSION __stringify(NIC_MAJOR_DRIVER_VERSION)"."\
+ __stringify(NIC_MINOR_DRIVER_VERSION)"."\
+ __stringify(NIC_BUILD_DRIVER_VERSION)"."\
+ __stringify(NIC_REVISION_DRIVER_VERSION)
+
+#endif /* AQ_CFG_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_common.h b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
new file mode 100644
index 0000000..9eb5e22
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_common.h
@@ -0,0 +1,23 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_common.h: Basic includes for all files in project. */
+
+#ifndef AQ_COMMON_H
+#define AQ_COMMON_H
+
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+#include "ver.h"
+#include "aq_nic.h"
+#include "aq_cfg.h"
+#include "aq_utils.h"
+
+#endif /* AQ_COMMON_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
new file mode 100644
index 0000000..a761e91
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -0,0 +1,262 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ethtool.c: Definition of ethertool related functions. */
+
+#include "aq_ethtool.h"
+#include "aq_nic.h"
+
+static void aq_ethtool_get_regs(struct net_device *ndev,
+ struct ethtool_regs *regs, void *p)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+ memset(p, 0, regs_count * sizeof(u32));
+ aq_nic_get_regs(aq_nic, regs, p);
+}
+
+static int aq_ethtool_get_regs_len(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+ return regs_count * sizeof(u32);
+}
+
+static u32 aq_ethtool_get_link(struct net_device *ndev)
+{
+ return ethtool_op_get_link(ndev);
+}
+
+static int aq_ethtool_get_link_ksettings(struct net_device *ndev,
+ struct ethtool_link_ksettings *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ aq_nic_get_link_ksettings(aq_nic, cmd);
+ cmd->base.speed = netif_carrier_ok(ndev) ?
+ aq_nic_get_link_speed(aq_nic) : 0U;
+
+ return 0;
+}
+
+static int
+aq_ethtool_set_link_ksettings(struct net_device *ndev,
+ const struct ethtool_link_ksettings *cmd)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ return aq_nic_set_link_ksettings(aq_nic, cmd);
+}
+
+/* there "5U" is number of queue[#] stats lines (InPackets+...+InErrors) */
+static const unsigned int aq_ethtool_stat_queue_lines = 5U;
+static const unsigned int aq_ethtool_stat_queue_chars =
+ 5U * ETH_GSTRING_LEN;
+static const char aq_ethtool_stat_names[][ETH_GSTRING_LEN] = {
+ "InPackets",
+ "InUCast",
+ "InMCast",
+ "InBCast",
+ "InErrors",
+ "OutPackets",
+ "OutUCast",
+ "OutMCast",
+ "OutBCast",
+ "InUCastOctects",
+ "OutUCastOctects",
+ "InMCastOctects",
+ "OutMCastOctects",
+ "InBCastOctects",
+ "OutBCastOctects",
+ "InOctects",
+ "OutOctects",
+ "InPacketsDma",
+ "OutPacketsDma",
+ "InOctetsDma",
+ "OutOctetsDma",
+ "InDroppedDma",
+ "Queue[0] InPackets",
+ "Queue[0] OutPackets",
+ "Queue[0] InJumboPackets",
+ "Queue[0] InLroPackets",
+ "Queue[0] InErrors",
+ "Queue[1] InPackets",
+ "Queue[1] OutPackets",
+ "Queue[1] InJumboPackets",
+ "Queue[1] InLroPackets",
+ "Queue[1] InErrors",
+ "Queue[2] InPackets",
+ "Queue[2] OutPackets",
+ "Queue[2] InJumboPackets",
+ "Queue[2] InLroPackets",
+ "Queue[2] InErrors",
+ "Queue[3] InPackets",
+ "Queue[3] OutPackets",
+ "Queue[3] InJumboPackets",
+ "Queue[3] InLroPackets",
+ "Queue[3] InErrors",
+ "Queue[4] InPackets",
+ "Queue[4] OutPackets",
+ "Queue[4] InJumboPackets",
+ "Queue[4] InLroPackets",
+ "Queue[4] InErrors",
+ "Queue[5] InPackets",
+ "Queue[5] OutPackets",
+ "Queue[5] InJumboPackets",
+ "Queue[5] InLroPackets",
+ "Queue[5] InErrors",
+ "Queue[6] InPackets",
+ "Queue[6] OutPackets",
+ "Queue[6] InJumboPackets",
+ "Queue[6] InLroPackets",
+ "Queue[6] InErrors",
+ "Queue[7] InPackets",
+ "Queue[7] OutPackets",
+ "Queue[7] InJumboPackets",
+ "Queue[7] InLroPackets",
+ "Queue[7] InErrors",
+};
+
+static void aq_ethtool_stats(struct net_device *ndev,
+ struct ethtool_stats *stats, u64 *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+/* ASSERT: Need add lines to aq_ethtool_stat_names if AQ_CFG_VECS_MAX > 8 */
+ BUILD_BUG_ON(AQ_CFG_VECS_MAX > 8);
+ memset(data, 0, ARRAY_SIZE(aq_ethtool_stat_names) * sizeof(u64));
+ aq_nic_get_stats(aq_nic, data);
+}
+
+static void aq_ethtool_get_drvinfo(struct net_device *ndev,
+ struct ethtool_drvinfo *drvinfo)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ struct pci_dev *pdev = to_pci_dev(ndev->dev.parent);
+ u32 firmware_version = aq_nic_get_fw_version(aq_nic);
+ u32 regs_count = aq_nic_get_regs_count(aq_nic);
+
+ strlcat(drvinfo->driver, AQ_CFG_DRV_NAME, sizeof(drvinfo->driver));
+ strlcat(drvinfo->version, AQ_CFG_DRV_VERSION, sizeof(drvinfo->version));
+
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%u.%u.%u", firmware_version >> 24,
+ (firmware_version >> 16) & 0xFFU, firmware_version & 0xFFFFU);
+
+ strlcpy(drvinfo->bus_info, pdev ? pci_name(pdev) : "",
+ sizeof(drvinfo->bus_info));
+ drvinfo->n_stats = ARRAY_SIZE(aq_ethtool_stat_names) -
+ (AQ_CFG_VECS_MAX - cfg->vecs) * aq_ethtool_stat_queue_lines;
+ drvinfo->testinfo_len = 0;
+ drvinfo->regdump_len = regs_count;
+ drvinfo->eedump_len = 0;
+}
+
+static void aq_ethtool_get_strings(struct net_device *ndev,
+ u32 stringset, u8 *data)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ if (stringset == ETH_SS_STATS)
+ memcpy(data, *aq_ethtool_stat_names,
+ sizeof(aq_ethtool_stat_names) -
+ (AQ_CFG_VECS_MAX - cfg->vecs) *
+ aq_ethtool_stat_queue_chars);
+}
+
+static int aq_ethtool_get_sset_count(struct net_device *ndev, int stringset)
+{
+ int ret = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ switch (stringset) {
+ case ETH_SS_STATS:
+ ret = ARRAY_SIZE(aq_ethtool_stat_names) -
+ (AQ_CFG_VECS_MAX - cfg->vecs) *
+ aq_ethtool_stat_queue_lines;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ }
+ return ret;
+}
+
+static u32 aq_ethtool_get_rss_indir_size(struct net_device *ndev)
+{
+ return AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+}
+
+static u32 aq_ethtool_get_rss_key_size(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+
+ return sizeof(cfg->aq_rss.hash_secret_key);
+}
+
+static int aq_ethtool_get_rss(struct net_device *ndev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ unsigned int i = 0U;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+ if (indir) {
+ for (i = 0; i < AQ_CFG_RSS_INDIRECTION_TABLE_MAX; i++)
+ indir[i] = cfg->aq_rss.indirection_table[i];
+ }
+ if (key)
+ memcpy(key, cfg->aq_rss.hash_secret_key,
+ sizeof(cfg->aq_rss.hash_secret_key));
+ return 0;
+}
+
+static int aq_ethtool_get_rxnfc(struct net_device *ndev,
+ struct ethtool_rxnfc *cmd,
+ u32 *rule_locs)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *cfg = aq_nic_get_cfg(aq_nic);
+ int err = 0;
+
+ switch (cmd->cmd) {
+ case ETHTOOL_GRXRINGS:
+ cmd->data = cfg->vecs;
+ break;
+
+ default:
+ err = -EOPNOTSUPP;
+ break;
+ }
+
+ return err;
+}
+
+const struct ethtool_ops aq_ethtool_ops = {
+ .get_link = aq_ethtool_get_link,
+ .get_regs_len = aq_ethtool_get_regs_len,
+ .get_regs = aq_ethtool_get_regs,
+ .get_drvinfo = aq_ethtool_get_drvinfo,
+ .get_strings = aq_ethtool_get_strings,
+ .get_rxfh_indir_size = aq_ethtool_get_rss_indir_size,
+ .get_rxfh_key_size = aq_ethtool_get_rss_key_size,
+ .get_rxfh = aq_ethtool_get_rss,
+ .get_rxnfc = aq_ethtool_get_rxnfc,
+ .get_sset_count = aq_ethtool_get_sset_count,
+ .get_ethtool_stats = aq_ethtool_stats,
+ .get_link_ksettings = aq_ethtool_get_link_ksettings,
+ .set_link_ksettings = aq_ethtool_set_link_ksettings,
+};
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
new file mode 100644
index 0000000..21c126e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.h
@@ -0,0 +1,19 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ethtool.h: Declaration of ethertool related functions. */
+
+#ifndef AQ_ETHTOOL_H
+#define AQ_ETHTOOL_H
+
+#include "aq_common.h"
+
+extern const struct ethtool_ops aq_ethtool_ops;
+
+#endif /* AQ_ETHTOOL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
new file mode 100644
index 0000000..fce0fd3
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -0,0 +1,177 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw.h: Declaraion of abstract interface for NIC hardware specific
+ * functions.
+ */
+
+#ifndef AQ_HW_H
+#define AQ_HW_H
+
+#include "aq_common.h"
+
+/* NIC H/W capabilities */
+struct aq_hw_caps_s {
+ u64 hw_features;
+ u64 link_speed_msk;
+ unsigned int hw_priv_flags;
+ u32 rxds;
+ u32 txds;
+ u32 txhwb_alignment;
+ u32 irq_mask;
+ u32 vecs;
+ u32 mtu;
+ u32 mac_regs_count;
+ u8 ports;
+ u8 msix_irqs;
+ u8 tcs;
+ u8 rxd_alignment;
+ u8 rxd_size;
+ u8 txd_alignment;
+ u8 txd_size;
+ u8 tx_rings;
+ u8 rx_rings;
+ bool flow_control;
+ bool is_64_dma;
+ u32 fw_ver_expected;
+};
+
+struct aq_hw_link_status_s {
+ unsigned int mbps;
+};
+
+#define AQ_HW_IRQ_INVALID 0U
+#define AQ_HW_IRQ_LEGACY 1U
+#define AQ_HW_IRQ_MSI 2U
+#define AQ_HW_IRQ_MSIX 3U
+
+#define AQ_HW_POWER_STATE_D0 0U
+#define AQ_HW_POWER_STATE_D3 3U
+
+#define AQ_HW_FLAG_STARTED 0x00000004U
+#define AQ_HW_FLAG_STOPPING 0x00000008U
+#define AQ_HW_FLAG_RESETTING 0x00000010U
+#define AQ_HW_FLAG_CLOSING 0x00000020U
+#define AQ_HW_LINK_DOWN 0x04000000U
+#define AQ_HW_FLAG_ERR_UNPLUG 0x40000000U
+#define AQ_HW_FLAG_ERR_HW 0x80000000U
+
+#define AQ_HW_FLAG_ERRORS (AQ_HW_FLAG_ERR_HW | AQ_HW_FLAG_ERR_UNPLUG)
+
+struct aq_hw_s {
+ struct aq_obj_s header;
+ struct aq_nic_cfg_s *aq_nic_cfg;
+ struct aq_pci_func_s *aq_pci_func;
+ void __iomem *mmio;
+ unsigned int not_ff_addr;
+ struct aq_hw_link_status_s aq_link_status;
+};
+
+struct aq_ring_s;
+struct aq_ring_param_s;
+struct aq_nic_cfg_s;
+struct sk_buff;
+
+struct aq_hw_ops {
+ struct aq_hw_s *(*create)(struct aq_pci_func_s *aq_pci_func,
+ unsigned int port, struct aq_hw_ops *ops);
+
+ void (*destroy)(struct aq_hw_s *self);
+
+ int (*get_hw_caps)(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps);
+
+ int (*hw_ring_tx_xmit)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ unsigned int frags);
+
+ int (*hw_ring_rx_receive)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_fill)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ unsigned int sw_tail_old);
+
+ int (*hw_ring_tx_head_update)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_get_mac_permanent)(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u8 *mac);
+
+ int (*hw_set_mac_address)(struct aq_hw_s *self, u8 *mac_addr);
+
+ int (*hw_get_link_status)(struct aq_hw_s *self,
+ struct aq_hw_link_status_s *link_status);
+
+ int (*hw_set_link_speed)(struct aq_hw_s *self, u32 speed);
+
+ int (*hw_reset)(struct aq_hw_s *self);
+
+ int (*hw_init)(struct aq_hw_s *self, struct aq_nic_cfg_s *aq_nic_cfg,
+ u8 *mac_addr);
+
+ int (*hw_start)(struct aq_hw_s *self);
+
+ int (*hw_stop)(struct aq_hw_s *self);
+
+ int (*hw_ring_tx_init)(struct aq_hw_s *self, struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+
+ int (*hw_ring_tx_start)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_tx_stop)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_init)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param);
+
+ int (*hw_ring_rx_start)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_ring_rx_stop)(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring);
+
+ int (*hw_irq_enable)(struct aq_hw_s *self, u64 mask);
+
+ int (*hw_irq_disable)(struct aq_hw_s *self, u64 mask);
+
+ int (*hw_irq_read)(struct aq_hw_s *self, u64 *mask);
+
+ int (*hw_packet_filter_set)(struct aq_hw_s *self,
+ unsigned int packet_filter);
+
+ int (*hw_multicast_list_set)(struct aq_hw_s *self,
+ u8 ar_mac[AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count);
+
+ int (*hw_interrupt_moderation_set)(struct aq_hw_s *self,
+ bool itr_enabled);
+
+ int (*hw_rss_set)(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+ int (*hw_rss_hash_set)(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params);
+
+ int (*hw_get_regs)(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps, u32 *regs_buff);
+
+ int (*hw_get_hw_stats)(struct aq_hw_s *self, u64 *data,
+ unsigned int *p_count);
+
+ int (*hw_get_fw_version)(struct aq_hw_s *self, u32 *fw_version);
+
+ int (*hw_deinit)(struct aq_hw_s *self);
+
+ int (*hw_set_power)(struct aq_hw_s *self, unsigned int power_state);
+};
+
+#endif /* AQ_HW_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
new file mode 100644
index 0000000..5f13465
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.c
@@ -0,0 +1,68 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw_utils.c: Definitions of helper functions used across
+ * hardware layer.
+ */
+
+#include "aq_hw_utils.h"
+#include "aq_hw.h"
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val)
+{
+ if (msk ^ ~0) {
+ u32 reg_old, reg_new;
+
+ reg_old = aq_hw_read_reg(aq_hw, addr);
+ reg_new = (reg_old & (~msk)) | (val << shift);
+
+ if (reg_old != reg_new)
+ aq_hw_write_reg(aq_hw, addr, reg_new);
+ } else {
+ aq_hw_write_reg(aq_hw, addr, val);
+ }
+}
+
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift)
+{
+ return ((aq_hw_read_reg(aq_hw, addr) & msk) >> shift);
+}
+
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg)
+{
+ u32 value = readl(hw->mmio + reg);
+
+ if ((~0U) == value && (~0U) == readl(hw->mmio + hw->not_ff_addr))
+ aq_utils_obj_set(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG);
+
+ return value;
+}
+
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value)
+{
+ writel(value, hw->mmio + reg);
+}
+
+int aq_hw_err_from_flags(struct aq_hw_s *hw)
+{
+ int err = 0;
+
+ if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ if (aq_utils_obj_test(&hw->header.flags, AQ_HW_FLAG_ERR_HW)) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
new file mode 100644
index 0000000..03b72dd
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw_utils.h
@@ -0,0 +1,47 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_hw_utils.h: Declaration of helper functions used across hardware
+ * layer.
+ */
+
+#ifndef AQ_HW_UTILS_H
+#define AQ_HW_UTILS_H
+
+#include "aq_common.h"
+
+#ifndef HIDWORD
+#define LODWORD(_qw) ((u32)(_qw))
+#define HIDWORD(_qw) ((u32)(((_qw) >> 32) & 0xffffffff))
+#endif
+
+#define AQ_HW_SLEEP(_US_) mdelay(_US_)
+
+#define AQ_HW_WAIT_FOR(_B_, _US_, _N_) \
+do { \
+ unsigned int AQ_HW_WAIT_FOR_i; \
+ for (AQ_HW_WAIT_FOR_i = _N_; (!(_B_)) && (AQ_HW_WAIT_FOR_i);\
+ --AQ_HW_WAIT_FOR_i) {\
+ udelay(_US_); \
+ } \
+ if (!AQ_HW_WAIT_FOR_i) {\
+ err = -ETIME; \
+ } \
+} while (0)
+
+struct aq_hw_s;
+
+void aq_hw_write_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk,
+ u32 shift, u32 val);
+u32 aq_hw_read_reg_bit(struct aq_hw_s *aq_hw, u32 addr, u32 msk, u32 shift);
+u32 aq_hw_read_reg(struct aq_hw_s *hw, u32 reg);
+void aq_hw_write_reg(struct aq_hw_s *hw, u32 reg, u32 value);
+int aq_hw_err_from_flags(struct aq_hw_s *hw);
+
+#endif /* AQ_HW_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.c b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
new file mode 100644
index 0000000..c17c70a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.c
@@ -0,0 +1,273 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_main.c: Main file for aQuantia Linux driver. */
+
+#include "aq_main.h"
+#include "aq_nic.h"
+#include "aq_pci_func.h"
+#include "aq_ethtool.h"
+#include "hw_atl/hw_atl_a0.h"
+#include "hw_atl/hw_atl_b0.h"
+
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+static const struct pci_device_id aq_pci_tbl[] = {
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_0001), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D100), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D107), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D108), },
+ { PCI_VDEVICE(AQUANTIA, HW_ATL_DEVICE_ID_D109), },
+ {}
+};
+
+MODULE_DEVICE_TABLE(pci, aq_pci_tbl);
+
+MODULE_LICENSE("GPL v2");
+MODULE_VERSION(AQ_CFG_DRV_VERSION);
+MODULE_AUTHOR(AQ_CFG_DRV_AUTHOR);
+MODULE_DESCRIPTION(AQ_CFG_DRV_DESC);
+
+static struct aq_hw_ops *aq_pci_probe_get_hw_ops_by_id(struct pci_dev *pdev)
+{
+ struct aq_hw_ops *ops = NULL;
+
+ ops = hw_atl_a0_get_ops_by_id(pdev);
+ if (!ops)
+ ops = hw_atl_b0_get_ops_by_id(pdev);
+
+ return ops;
+}
+
+static int aq_ndev_open(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = NULL;
+ int err = 0;
+
+ aq_nic = aq_nic_alloc_hot(ndev);
+ if (!aq_nic) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ err = aq_nic_init(aq_nic);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic_start(aq_nic);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ if (err < 0)
+ aq_nic_deinit(aq_nic);
+ return err;
+}
+
+static int aq_ndev_close(struct net_device *ndev)
+{
+ int err = 0;
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+
+ err = aq_nic_stop(aq_nic);
+ if (err < 0)
+ goto err_exit;
+ aq_nic_deinit(aq_nic);
+ aq_nic_free_hot_resources(aq_nic);
+
+err_exit:
+ return err;
+}
+
+static int aq_ndev_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = aq_nic_xmit(aq_nic, skb);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static int aq_ndev_change_mtu(struct net_device *ndev, int new_mtu)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ if (new_mtu == ndev->mtu) {
+ err = 0;
+ goto err_exit;
+ }
+ if (new_mtu < 68) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err = aq_nic_set_mtu(aq_nic, new_mtu + ETH_HLEN);
+ if (err < 0)
+ goto err_exit;
+ ndev->mtu = new_mtu;
+
+ if (netif_running(ndev)) {
+ aq_ndev_close(ndev);
+ aq_ndev_open(ndev);
+ }
+
+err_exit:
+ return err;
+}
+
+static int aq_ndev_set_features(struct net_device *ndev,
+ netdev_features_t features)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ struct aq_nic_cfg_s *aq_cfg = aq_nic_get_cfg(aq_nic);
+ bool is_lro = false;
+
+ if (aq_cfg->hw_features & NETIF_F_LRO) {
+ is_lro = features & NETIF_F_LRO;
+
+ if (aq_cfg->is_lro != is_lro) {
+ aq_cfg->is_lro = is_lro;
+
+ if (netif_running(ndev)) {
+ aq_ndev_close(ndev);
+ aq_ndev_open(ndev);
+ }
+ }
+ }
+
+ return 0;
+}
+
+static int aq_ndev_set_mac_address(struct net_device *ndev, void *addr)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = eth_mac_addr(ndev, addr);
+ if (err < 0)
+ goto err_exit;
+ err = aq_nic_set_mac(aq_nic, ndev);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static void aq_ndev_set_multicast_settings(struct net_device *ndev)
+{
+ struct aq_nic_s *aq_nic = netdev_priv(ndev);
+ int err = 0;
+
+ err = aq_nic_set_packet_filter(aq_nic, ndev->flags);
+ if (err < 0)
+ goto err_exit;
+
+ if (netdev_mc_count(ndev)) {
+ err = aq_nic_set_multicast_list(aq_nic, ndev);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:;
+}
+
+static const struct net_device_ops aq_ndev_ops = {
+ .ndo_open = aq_ndev_open,
+ .ndo_stop = aq_ndev_close,
+ .ndo_start_xmit = aq_ndev_start_xmit,
+ .ndo_set_rx_mode = aq_ndev_set_multicast_settings,
+ .ndo_change_mtu = aq_ndev_change_mtu,
+ .ndo_set_mac_address = aq_ndev_set_mac_address,
+ .ndo_set_features = aq_ndev_set_features
+};
+
+static int aq_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *pci_id)
+{
+ struct aq_hw_ops *aq_hw_ops = NULL;
+ struct aq_pci_func_s *aq_pci_func = NULL;
+ int err = 0;
+
+ err = pci_enable_device(pdev);
+ if (err < 0)
+ goto err_exit;
+ aq_hw_ops = aq_pci_probe_get_hw_ops_by_id(pdev);
+ aq_pci_func = aq_pci_func_alloc(aq_hw_ops, pdev,
+ &aq_ndev_ops, &aq_ethtool_ops);
+ if (!aq_pci_func) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ err = aq_pci_func_init(aq_pci_func);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ if (err < 0) {
+ if (aq_pci_func)
+ aq_pci_func_free(aq_pci_func);
+ }
+ return err;
+}
+
+static void aq_pci_remove(struct pci_dev *pdev)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+ aq_pci_func_deinit(aq_pci_func);
+ aq_pci_func_free(aq_pci_func);
+}
+
+static int aq_pci_suspend(struct pci_dev *pdev, pm_message_t pm_msg)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+
+ return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static int aq_pci_resume(struct pci_dev *pdev)
+{
+ struct aq_pci_func_s *aq_pci_func = pci_get_drvdata(pdev);
+ pm_message_t pm_msg = PMSG_RESTORE;
+
+ return aq_pci_func_change_pm_state(aq_pci_func, &pm_msg);
+}
+
+static struct pci_driver aq_pci_ops = {
+ .name = AQ_CFG_DRV_NAME,
+ .id_table = aq_pci_tbl,
+ .probe = aq_pci_probe,
+ .remove = aq_pci_remove,
+ .suspend = aq_pci_suspend,
+ .resume = aq_pci_resume,
+};
+
+static int __init aq_module_init(void)
+{
+ int err = 0;
+
+ err = pci_register_driver(&aq_pci_ops);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static void __exit aq_module_exit(void)
+{
+ pci_unregister_driver(&aq_pci_ops);
+}
+
+module_init(aq_module_init);
+module_exit(aq_module_exit);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_main.h b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
new file mode 100644
index 0000000..9748e7e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_main.h
@@ -0,0 +1,17 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_main.h: Main file for aQuantia Linux driver. */
+
+#ifndef AQ_MAIN_H
+#define AQ_MAIN_H
+
+#include "aq_common.h"
+
+#endif /* AQ_MAIN_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
new file mode 100644
index 0000000..bed25ab
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -0,0 +1,952 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic.c: Definition of common code for NIC. */
+
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include "aq_pci_func.h"
+#include "aq_nic_internal.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/timer.h>
+#include <linux/cpu.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <net/ip.h>
+
+static void aq_nic_rss_init(struct aq_nic_s *self, unsigned int num_rss_queues)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+ struct aq_rss_parameters *rss_params = &cfg->aq_rss;
+ int i = 0;
+
+ static u8 rss_key[40] = {
+ 0x1e, 0xad, 0x71, 0x87, 0x65, 0xfc, 0x26, 0x7d,
+ 0x0d, 0x45, 0x67, 0x74, 0xcd, 0x06, 0x1a, 0x18,
+ 0xb6, 0xc1, 0xf0, 0xc7, 0xbb, 0x18, 0xbe, 0xf8,
+ 0x19, 0x13, 0x4b, 0xa9, 0xd0, 0x3e, 0xfe, 0x70,
+ 0x25, 0x03, 0xab, 0x50, 0x6a, 0x8b, 0x82, 0x0c
+ };
+
+ rss_params->hash_secret_key_size = sizeof(rss_key);
+ memcpy(rss_params->hash_secret_key, rss_key, sizeof(rss_key));
+ rss_params->indirection_table_size = AQ_CFG_RSS_INDIRECTION_TABLE_MAX;
+
+ for (i = rss_params->indirection_table_size; i--;)
+ rss_params->indirection_table[i] = i & (num_rss_queues - 1);
+}
+
+/* Fills aq_nic_cfg with valid defaults */
+static void aq_nic_cfg_init_defaults(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ cfg->aq_hw_caps = &self->aq_hw_caps;
+
+ cfg->vecs = AQ_CFG_VECS_DEF;
+ cfg->tcs = AQ_CFG_TCS_DEF;
+
+ cfg->rxds = AQ_CFG_RXDS_DEF;
+ cfg->txds = AQ_CFG_TXDS_DEF;
+
+ cfg->is_polling = AQ_CFG_IS_POLLING_DEF;
+
+ cfg->is_interrupt_moderation = AQ_CFG_IS_INTERRUPT_MODERATION_DEF;
+ cfg->itr = cfg->is_interrupt_moderation ?
+ AQ_CFG_INTERRUPT_MODERATION_RATE_DEF : 0U;
+
+ cfg->is_rss = AQ_CFG_IS_RSS_DEF;
+ cfg->num_rss_queues = AQ_CFG_NUM_RSS_QUEUES_DEF;
+ cfg->aq_rss.base_cpu_number = AQ_CFG_RSS_BASE_CPU_NUM_DEF;
+ cfg->flow_control = AQ_CFG_FC_MODE;
+
+ cfg->mtu = AQ_CFG_MTU_DEF;
+ cfg->link_speed_msk = AQ_CFG_SPEED_MSK;
+ cfg->is_autoneg = AQ_CFG_IS_AUTONEG_DEF;
+
+ cfg->is_lro = AQ_CFG_IS_LRO_DEF;
+
+ cfg->vlan_id = 0U;
+
+ aq_nic_rss_init(self, cfg->num_rss_queues);
+}
+
+/* Checks hw_caps and 'corrects' aq_nic_cfg in runtime */
+int aq_nic_cfg_start(struct aq_nic_s *self)
+{
+ struct aq_nic_cfg_s *cfg = &self->aq_nic_cfg;
+
+ /*descriptors */
+ cfg->rxds = min(cfg->rxds, cfg->aq_hw_caps->rxds);
+ cfg->txds = min(cfg->txds, cfg->aq_hw_caps->txds);
+
+ /*rss rings */
+ cfg->vecs = min(cfg->vecs, cfg->aq_hw_caps->vecs);
+ cfg->vecs = min(cfg->vecs, num_online_cpus());
+ /* cfg->vecs should be power of 2 for RSS */
+ if (cfg->vecs >= 8U)
+ cfg->vecs = 8U;
+ else if (cfg->vecs >= 4U)
+ cfg->vecs = 4U;
+ else if (cfg->vecs >= 2U)
+ cfg->vecs = 2U;
+ else
+ cfg->vecs = 1U;
+
+ cfg->irq_type = aq_pci_func_get_irq_type(self->aq_pci_func);
+
+ if ((cfg->irq_type == AQ_HW_IRQ_LEGACY) ||
+ (self->aq_hw_caps.vecs == 1U) ||
+ (cfg->vecs == 1U)) {
+ cfg->is_rss = 0U;
+ cfg->vecs = 1U;
+ }
+
+ cfg->link_speed_msk &= self->aq_hw_caps.link_speed_msk;
+ cfg->hw_features = self->aq_hw_caps.hw_features;
+ return 0;
+}
+
+static void aq_nic_service_timer_cb(unsigned long param)
+{
+ struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct net_device *ndev = aq_nic_get_ndev(self);
+ int err = 0;
+ bool is_busy = false;
+ unsigned int i = 0U;
+ struct aq_hw_link_status_s link_status;
+ struct aq_ring_stats_rx_s stats_rx;
+ struct aq_ring_stats_tx_s stats_tx;
+
+ atomic_inc(&self->header.busy_count);
+ is_busy = true;
+ if (aq_utils_obj_test(&self->header.flags, AQ_NIC_FLAGS_IS_NOT_READY))
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_get_link_status(self->aq_hw, &link_status);
+ if (err < 0)
+ goto err_exit;
+
+ self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+
+ if (memcmp(&link_status, &self->link_status, sizeof(link_status))) {
+ if (link_status.mbps) {
+ aq_utils_obj_set(&self->header.flags,
+ AQ_NIC_FLAG_STARTED);
+ aq_utils_obj_clear(&self->header.flags,
+ AQ_NIC_LINK_DOWN);
+ netif_carrier_on(self->ndev);
+ } else {
+ netif_carrier_off(self->ndev);
+ aq_utils_obj_set(&self->header.flags, AQ_NIC_LINK_DOWN);
+ }
+
+ self->link_status = link_status;
+ }
+
+ memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
+ memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
+ for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ if (self->aq_vec[i])
+ aq_vec_add_stats(self->aq_vec[i], &stats_rx, &stats_tx);
+ }
+
+ ndev->stats.rx_packets = stats_rx.packets;
+ ndev->stats.rx_bytes = stats_rx.bytes;
+ ndev->stats.rx_errors = stats_rx.errors;
+ ndev->stats.tx_packets = stats_tx.packets;
+ ndev->stats.tx_bytes = stats_tx.bytes;
+ ndev->stats.tx_errors = stats_tx.errors;
+
+err_exit:
+ if (is_busy)
+ atomic_dec(&self->header.busy_count);
+ mod_timer(&self->service_timer,
+ jiffies + AQ_CFG_SERVICE_TIMER_INTERVAL);
+}
+
+static void aq_nic_polling_timer_cb(unsigned long param)
+{
+ struct aq_nic_s *self = (struct aq_nic_s *)param;
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_isr(i, (void *)aq_vec);
+
+ mod_timer(&self->polling_timer, jiffies +
+ AQ_CFG_POLLING_TIMER_INTERVAL);
+}
+
+static struct net_device *aq_nic_ndev_alloc(void)
+{
+ return alloc_etherdev_mq(sizeof(struct aq_nic_s), AQ_CFG_VECS_MAX);
+}
+
+struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *et_ops,
+ struct device *dev,
+ struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ const struct aq_hw_ops *aq_hw_ops)
+{
+ struct net_device *ndev = NULL;
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+
+ ndev = aq_nic_ndev_alloc();
+ self = netdev_priv(ndev);
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+
+ ndev->netdev_ops = ndev_ops;
+ ndev->ethtool_ops = et_ops;
+
+ SET_NETDEV_DEV(ndev, dev);
+
+ ndev->if_port = port;
+ self->ndev = ndev;
+
+ self->aq_pci_func = aq_pci_func;
+
+ self->aq_hw_ops = *aq_hw_ops;
+ self->port = (u8)port;
+
+ self->aq_hw = self->aq_hw_ops.create(aq_pci_func, self->port,
+ &self->aq_hw_ops);
+ err = self->aq_hw_ops.get_hw_caps(self->aq_hw, &self->aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ aq_nic_cfg_init_defaults(self);
+
+err_exit:
+ if (err < 0) {
+ aq_nic_free_hot_resources(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int aq_nic_ndev_register(struct aq_nic_s *self)
+{
+ int err = 0;
+ unsigned int i = 0U;
+
+ if (!self->ndev) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err = self->aq_hw_ops.hw_get_mac_permanent(self->aq_hw,
+ self->aq_nic_cfg.aq_hw_caps,
+ self->ndev->dev_addr);
+ if (err < 0)
+ goto err_exit;
+
+#if defined(AQ_CFG_MAC_ADDR_PERMANENT)
+ {
+ static u8 mac_addr_permanent[] = AQ_CFG_MAC_ADDR_PERMANENT;
+
+ ether_addr_copy(self->ndev->dev_addr, mac_addr_permanent);
+ }
+#endif
+ err = register_netdev(self->ndev);
+ if (err < 0)
+ goto err_exit;
+
+ self->is_ndev_registered = true;
+ netif_carrier_off(self->ndev);
+
+ for (i = AQ_CFG_VECS_MAX; i--;)
+ aq_nic_ndev_queue_stop(self, i);
+
+err_exit:
+ return err;
+}
+
+int aq_nic_ndev_init(struct aq_nic_s *self)
+{
+ struct aq_hw_caps_s *aq_hw_caps = self->aq_nic_cfg.aq_hw_caps;
+ struct aq_nic_cfg_s *aq_nic_cfg = &self->aq_nic_cfg;
+
+ self->ndev->hw_features |= aq_hw_caps->hw_features;
+ self->ndev->features = aq_hw_caps->hw_features;
+ self->ndev->priv_flags = aq_hw_caps->hw_priv_flags;
+ self->ndev->mtu = aq_nic_cfg->mtu - ETH_HLEN;
+
+ return 0;
+}
+
+void aq_nic_ndev_free(struct aq_nic_s *self)
+{
+ if (!self->ndev)
+ goto err_exit;
+
+ if (self->is_ndev_registered)
+ unregister_netdev(self->ndev);
+
+ if (self->aq_hw)
+ self->aq_hw_ops.destroy(self->aq_hw);
+
+ free_netdev(self->ndev);
+
+err_exit:;
+}
+
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev)
+{
+ struct aq_nic_s *self = NULL;
+ int err = 0;
+
+ if (!ndev) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ self = netdev_priv(ndev);
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ if (netif_running(ndev)) {
+ unsigned int i;
+
+ for (i = AQ_CFG_VECS_MAX; i--;)
+ netif_stop_subqueue(ndev, i);
+ }
+
+ for (self->aq_vecs = 0; self->aq_vecs < self->aq_nic_cfg.vecs;
+ self->aq_vecs++) {
+ self->aq_vec[self->aq_vecs] =
+ aq_vec_alloc(self, self->aq_vecs, &self->aq_nic_cfg);
+ if (!self->aq_vec[self->aq_vecs]) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_nic_free_hot_resources(self);
+ self = NULL;
+ }
+ return self;
+}
+
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+ struct aq_ring_s *ring)
+{
+ self->aq_ring_tx[idx] = ring;
+}
+
+struct device *aq_nic_get_dev(struct aq_nic_s *self)
+{
+ return self->ndev->dev.parent;
+}
+
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self)
+{
+ return self->ndev;
+}
+
+int aq_nic_init(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+
+ self->power_state = AQ_HW_POWER_STATE_D0;
+ err = self->aq_hw_ops.hw_reset(self->aq_hw);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_init(self->aq_hw, &self->aq_nic_cfg,
+ aq_nic_get_ndev(self)->dev_addr);
+ if (err < 0)
+ goto err_exit;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_init(aq_vec, &self->aq_hw_ops, self->aq_hw);
+
+err_exit:
+ return err;
+}
+
+void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx)
+{
+ netif_start_subqueue(self->ndev, idx);
+}
+
+void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx)
+{
+ netif_stop_subqueue(self->ndev, idx);
+}
+
+int aq_nic_start(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+
+ err = self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw,
+ self->packet_filter);
+ if (err < 0)
+ goto err_exit;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ err = aq_vec_start(aq_vec);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = self->aq_hw_ops.hw_start(self->aq_hw);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops.hw_interrupt_moderation_set(self->aq_hw,
+ self->aq_nic_cfg.is_interrupt_moderation);
+ if (err < 0)
+ goto err_exit;
+ setup_timer(&self->service_timer, &aq_nic_service_timer_cb,
+ (unsigned long)self);
+ mod_timer(&self->service_timer, jiffies +
+ AQ_CFG_SERVICE_TIMER_INTERVAL);
+
+ if (self->aq_nic_cfg.is_polling) {
+ setup_timer(&self->polling_timer, &aq_nic_polling_timer_cb,
+ (unsigned long)self);
+ mod_timer(&self->polling_timer, jiffies +
+ AQ_CFG_POLLING_TIMER_INTERVAL);
+ } else {
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ err = aq_pci_func_alloc_irq(self->aq_pci_func, i,
+ self->ndev->name, aq_vec,
+ aq_vec_get_affinity_mask(aq_vec));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = self->aq_hw_ops.hw_irq_enable(self->aq_hw,
+ AQ_CFG_IRQ_MASK);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_nic_ndev_queue_start(self, i);
+
+ err = netif_set_real_num_tx_queues(self->ndev, self->aq_vecs);
+ if (err < 0)
+ goto err_exit;
+
+ err = netif_set_real_num_rx_queues(self->ndev, self->aq_vecs);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static unsigned int aq_nic_map_skb_frag(struct aq_nic_s *self,
+ struct sk_buff *skb,
+ struct aq_ring_buff_s *dx)
+{
+ unsigned int ret = 0U;
+ unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
+ unsigned int frag_count = 0U;
+
+ dx->flags = 0U;
+ dx->len = skb_headlen(skb);
+ dx->pa = dma_map_single(aq_nic_get_dev(self), skb->data, dx->len,
+ DMA_TO_DEVICE);
+ dx->len_pkt = skb->len;
+ dx->is_sop = 1U;
+ dx->is_mapped = 1U;
+
+ ++ret;
+
+ if (skb->ip_summed == CHECKSUM_PARTIAL) {
+ dx->is_ip_cso = (htons(ETH_P_IP) == skb->protocol) ? 1U : 0U;
+ dx->is_tcp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_TCP) ? 1U : 0U;
+ dx->is_udp_cso =
+ (ip_hdr(skb)->protocol == IPPROTO_UDP) ? 1U : 0U;
+ }
+
+ for (; nr_frags--; ++frag_count) {
+ unsigned int frag_len;
+ dma_addr_t frag_pa;
+ skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_count];
+
+ frag_len = skb_frag_size(frag);
+
+ frag_pa = skb_frag_dma_map(aq_nic_get_dev(self), frag, 0,
+ frag_len, DMA_TO_DEVICE);
+
+ while (frag_len > AQ_CFG_TX_FRAME_MAX) {
+ ++dx;
+ ++ret;
+ dx->flags = 0U;
+ dx->len = AQ_CFG_TX_FRAME_MAX;
+ dx->pa = frag_pa;
+ dx->is_mapped = 1U;
+
+ frag_len -= AQ_CFG_TX_FRAME_MAX;
+ frag_pa += AQ_CFG_TX_FRAME_MAX;
+ }
+
+ ++dx;
+ ++ret;
+
+ dx->flags = 0U;
+ dx->len = frag_len;
+ dx->pa = frag_pa;
+ dx->is_mapped = 1U;
+ }
+
+ dx->is_eop = 1U;
+ dx->skb = skb;
+
+ return ret;
+}
+
+static unsigned int aq_nic_map_skb_lso(struct aq_nic_s *self,
+ struct sk_buff *skb,
+ struct aq_ring_buff_s *dx)
+{
+ dx->flags = 0U;
+ dx->len_pkt = skb->len;
+ dx->len_l2 = ETH_HLEN;
+ dx->len_l3 = ip_hdrlen(skb);
+ dx->len_l4 = tcp_hdrlen(skb);
+ dx->mss = skb_shinfo(skb)->gso_size;
+ dx->is_txc = 1U;
+ return 1U;
+}
+
+static unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
+ struct aq_ring_buff_s *dx)
+{
+ unsigned int ret = 0U;
+
+ if (unlikely(skb_is_gso(skb))) {
+ ret = aq_nic_map_skb_lso(self, skb, dx);
+ ++dx;
+ }
+
+ ret += aq_nic_map_skb_frag(self, skb, dx);
+
+ return ret;
+}
+
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
+__releases(&ring->lock)
+__acquires(&ring->lock)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int frags = 0U;
+ unsigned int vec = skb->queue_mapping % self->aq_nic_cfg.vecs;
+ unsigned int tc = 0U;
+ unsigned int trys = AQ_CFG_LOCK_TRYS;
+ int err = 0;
+ bool is_nic_in_bad_state;
+ bool is_busy = false;
+ struct aq_ring_buff_s buffers[AQ_CFG_SKB_FRAGS_MAX];
+
+ frags = skb_shinfo(skb)->nr_frags + 1;
+
+ ring = self->aq_ring_tx[AQ_NIC_TCVEC2RING(self, tc, vec)];
+
+ atomic_inc(&self->header.busy_count);
+ is_busy = true;
+
+ if (frags > AQ_CFG_SKB_FRAGS_MAX) {
+ dev_kfree_skb_any(skb);
+ goto err_exit;
+ }
+
+ is_nic_in_bad_state = aq_utils_obj_test(&self->header.flags,
+ AQ_NIC_FLAGS_IS_NOT_TX_READY) ||
+ (aq_ring_avail_dx(ring) <
+ AQ_CFG_SKB_FRAGS_MAX);
+
+ if (is_nic_in_bad_state) {
+ aq_nic_ndev_queue_stop(self, ring->idx);
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
+ do {
+ if (spin_trylock(&ring->header.lock)) {
+ frags = aq_nic_map_skb(self, skb, &buffers[0]);
+
+ aq_ring_tx_append_buffs(ring, &buffers[0], frags);
+
+ err = self->aq_hw_ops.hw_ring_tx_xmit(self->aq_hw,
+ ring, frags);
+ if (err >= 0) {
+ if (aq_ring_avail_dx(ring) <
+ AQ_CFG_SKB_FRAGS_MAX + 1)
+ aq_nic_ndev_queue_stop(self, ring->idx);
+ }
+ spin_unlock(&ring->header.lock);
+
+ if (err >= 0) {
+ ++ring->stats.tx.packets;
+ ring->stats.tx.bytes += skb->len;
+ }
+ break;
+ }
+ } while (--trys);
+
+ if (!trys) {
+ err = NETDEV_TX_BUSY;
+ goto err_exit;
+ }
+
+err_exit:
+ if (is_busy)
+ atomic_dec(&self->header.busy_count);
+ return err;
+}
+
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags)
+{
+ int err = 0;
+
+ err = self->aq_hw_ops.hw_packet_filter_set(self->aq_hw, flags);
+ if (err < 0)
+ goto err_exit;
+
+ self->packet_filter = flags;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev)
+{
+ struct netdev_hw_addr *ha = NULL;
+ unsigned int i = 0U;
+
+ self->mc_list.count = 0U;
+
+ netdev_for_each_mc_addr(ha, ndev) {
+ ether_addr_copy(self->mc_list.ar[i++], ha->addr);
+ ++self->mc_list.count;
+ }
+
+ return self->aq_hw_ops.hw_multicast_list_set(self->aq_hw,
+ self->mc_list.ar,
+ self->mc_list.count);
+}
+
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu)
+{
+ int err = 0;
+
+ if (new_mtu > self->aq_hw_caps.mtu) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ self->aq_nic_cfg.mtu = new_mtu;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev)
+{
+ return self->aq_hw_ops.hw_set_mac_address(self->aq_hw, ndev->dev_addr);
+}
+
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self)
+{
+ return self->link_status.mbps;
+}
+
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p)
+{
+ u32 *regs_buff = p;
+ int err = 0;
+
+ regs->version = 1;
+
+ err = self->aq_hw_ops.hw_get_regs(self->aq_hw,
+ &self->aq_hw_caps, regs_buff);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+int aq_nic_get_regs_count(struct aq_nic_s *self)
+{
+ return self->aq_hw_caps.mac_regs_count;
+}
+
+void aq_nic_get_stats(struct aq_nic_s *self, u64 *data)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+ unsigned int count = 0U;
+ int err = 0;
+
+ err = self->aq_hw_ops.hw_get_hw_stats(self->aq_hw, data, &count);
+ if (err < 0)
+ goto err_exit;
+
+ data += count;
+ count = 0U;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i]) {
+ data += count;
+ aq_vec_get_sw_stats(aq_vec, data, &count);
+ }
+
+err_exit:;
+ (void)err;
+}
+
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+ struct ethtool_link_ksettings *cmd)
+{
+ u32 supported, advertising;
+
+ cmd->base.port = PORT_TP;
+ /* This driver supports only 10G capable adapters, so DUPLEX_FULL */
+ cmd->base.duplex = DUPLEX_FULL;
+ cmd->base.autoneg = self->aq_nic_cfg.is_autoneg;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
+
+ supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_10G) ?
+ ADVERTISED_10000baseT_Full : 0U;
+ supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_1G) ?
+ ADVERTISED_1000baseT_Full : 0U;
+ supported |= (self->aq_hw_caps.link_speed_msk & AQ_NIC_RATE_100M) ?
+ ADVERTISED_100baseT_Full : 0U;
+ supported |= self->aq_hw_caps.flow_control ? SUPPORTED_Pause : 0;
+ supported |= SUPPORTED_Autoneg;
+ supported |= SUPPORTED_TP;
+
+ advertising = (self->aq_nic_cfg.is_autoneg) ?
+ ADVERTISED_Autoneg : 0U;
+ advertising |=
+ (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_10G) ?
+ ADVERTISED_10000baseT_Full : 0U;
+ advertising |=
+ (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_1G) ?
+ ADVERTISED_1000baseT_Full : 0U;
+
+ advertising |=
+ (self->aq_nic_cfg.link_speed_msk & AQ_NIC_RATE_100M) ?
+ ADVERTISED_100baseT_Full : 0U;
+ advertising |= (self->aq_nic_cfg.flow_control) ?
+ ADVERTISED_Pause : 0U;
+ advertising |= ADVERTISED_TP;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+}
+
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+ const struct ethtool_link_ksettings *cmd)
+{
+ u32 speed = 0U;
+ u32 rate = 0U;
+ int err = 0;
+
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ rate = self->aq_hw_caps.link_speed_msk;
+ self->aq_nic_cfg.is_autoneg = true;
+ } else {
+ speed = cmd->base.speed;
+
+ switch (speed) {
+ case SPEED_100:
+ rate = AQ_NIC_RATE_100M;
+ break;
+
+ case SPEED_1000:
+ rate = AQ_NIC_RATE_1G;
+ break;
+
+ case SPEED_2500:
+ rate = AQ_NIC_RATE_2GS;
+ break;
+
+ case SPEED_5000:
+ rate = AQ_NIC_RATE_5G;
+ break;
+
+ case SPEED_10000:
+ rate = AQ_NIC_RATE_10G;
+ break;
+
+ default:
+ err = -1;
+ goto err_exit;
+ break;
+ }
+ if (!(self->aq_hw_caps.link_speed_msk & rate)) {
+ err = -1;
+ goto err_exit;
+ }
+
+ self->aq_nic_cfg.is_autoneg = false;
+ }
+
+ err = self->aq_hw_ops.hw_set_link_speed(self->aq_hw, rate);
+ if (err < 0)
+ goto err_exit;
+
+ self->aq_nic_cfg.link_speed_msk = rate;
+
+err_exit:
+ return err;
+}
+
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self)
+{
+ return &self->aq_nic_cfg;
+}
+
+u32 aq_nic_get_fw_version(struct aq_nic_s *self)
+{
+ u32 fw_version = 0U;
+
+ self->aq_hw_ops.hw_get_fw_version(self->aq_hw, &fw_version);
+
+ return fw_version;
+}
+
+int aq_nic_stop(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_nic_ndev_queue_stop(self, i);
+
+ del_timer_sync(&self->service_timer);
+
+ self->aq_hw_ops.hw_irq_disable(self->aq_hw, AQ_CFG_IRQ_MASK);
+
+ if (self->aq_nic_cfg.is_polling)
+ del_timer_sync(&self->polling_timer);
+ else
+ aq_pci_func_free_irqs(self->aq_pci_func);
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_stop(aq_vec);
+
+ return self->aq_hw_ops.hw_stop(self->aq_hw);
+}
+
+void aq_nic_deinit(struct aq_nic_s *self)
+{
+ struct aq_vec_s *aq_vec = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U, aq_vec = self->aq_vec[0];
+ self->aq_vecs > i; ++i, aq_vec = self->aq_vec[i])
+ aq_vec_deinit(aq_vec);
+
+ if (self->power_state == AQ_HW_POWER_STATE_D0) {
+ (void)self->aq_hw_ops.hw_deinit(self->aq_hw);
+ } else {
+ (void)self->aq_hw_ops.hw_set_power(self->aq_hw,
+ self->power_state);
+ }
+
+err_exit:;
+}
+
+void aq_nic_free_hot_resources(struct aq_nic_s *self)
+{
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = AQ_DIMOF(self->aq_vec); i--;) {
+ if (self->aq_vec[i])
+ aq_vec_free(self->aq_vec[i]);
+ }
+
+err_exit:;
+}
+
+int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg)
+{
+ int err = 0;
+
+ if (!netif_running(self->ndev)) {
+ err = 0;
+ goto err_exit;
+ }
+ rtnl_lock();
+ if (pm_msg->event & PM_EVENT_SLEEP || pm_msg->event & PM_EVENT_FREEZE) {
+ self->power_state = AQ_HW_POWER_STATE_D3;
+ netif_device_detach(self->ndev);
+ netif_tx_stop_all_queues(self->ndev);
+
+ err = aq_nic_stop(self);
+ if (err < 0)
+ goto err_exit;
+
+ aq_nic_deinit(self);
+ } else {
+ err = aq_nic_init(self);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_start(self);
+ if (err < 0)
+ goto err_exit;
+
+ netif_device_attach(self->ndev);
+ netif_tx_start_all_queues(self->ndev);
+ }
+ rtnl_unlock();
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
new file mode 100644
index 0000000..7fc2a5e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.h
@@ -0,0 +1,110 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic.h: Declaration of common code for NIC. */
+
+#ifndef AQ_NIC_H
+#define AQ_NIC_H
+
+#include "aq_common.h"
+#include "aq_rss.h"
+
+struct aq_ring_s;
+struct aq_pci_func_s;
+struct aq_hw_ops;
+
+#define AQ_NIC_FC_OFF 0U
+#define AQ_NIC_FC_TX 1U
+#define AQ_NIC_FC_RX 2U
+#define AQ_NIC_FC_FULL 3U
+#define AQ_NIC_FC_AUTO 4U
+
+#define AQ_NIC_RATE_10G BIT(0)
+#define AQ_NIC_RATE_5G BIT(1)
+#define AQ_NIC_RATE_5GSR BIT(2)
+#define AQ_NIC_RATE_2GS BIT(3)
+#define AQ_NIC_RATE_1G BIT(4)
+#define AQ_NIC_RATE_100M BIT(5)
+
+struct aq_nic_cfg_s {
+ struct aq_hw_caps_s *aq_hw_caps;
+ u64 hw_features;
+ u32 rxds; /* rx ring size, descriptors # */
+ u32 txds; /* tx ring size, descriptors # */
+ u32 vecs; /* vecs==allocated irqs */
+ u32 irq_type;
+ u32 itr;
+ u32 num_rss_queues;
+ u32 mtu;
+ u32 ucp_0x364;
+ u32 flow_control;
+ u32 link_speed_msk;
+ u32 vlan_id;
+ u16 is_mc_list_enabled;
+ u16 mc_list_count;
+ bool is_autoneg;
+ bool is_interrupt_moderation;
+ bool is_polling;
+ bool is_rss;
+ bool is_lro;
+ u8 tcs;
+ struct aq_rss_parameters aq_rss;
+};
+
+#define AQ_NIC_FLAG_STARTED 0x00000004U
+#define AQ_NIC_FLAG_STOPPING 0x00000008U
+#define AQ_NIC_FLAG_RESETTING 0x00000010U
+#define AQ_NIC_FLAG_CLOSING 0x00000020U
+#define AQ_NIC_LINK_DOWN 0x04000000U
+#define AQ_NIC_FLAG_ERR_UNPLUG 0x40000000U
+#define AQ_NIC_FLAG_ERR_HW 0x80000000U
+
+#define AQ_NIC_TCVEC2RING(_NIC_, _TC_, _VEC_) \
+ ((_TC_) * AQ_CFG_TCS_MAX + (_VEC_))
+
+struct aq_nic_s *aq_nic_alloc_cold(const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *et_ops,
+ struct device *dev,
+ struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ const struct aq_hw_ops *aq_hw_ops);
+int aq_nic_ndev_init(struct aq_nic_s *self);
+struct aq_nic_s *aq_nic_alloc_hot(struct net_device *ndev);
+void aq_nic_set_tx_ring(struct aq_nic_s *self, unsigned int idx,
+ struct aq_ring_s *ring);
+struct device *aq_nic_get_dev(struct aq_nic_s *self);
+struct net_device *aq_nic_get_ndev(struct aq_nic_s *self);
+int aq_nic_init(struct aq_nic_s *self);
+int aq_nic_cfg_start(struct aq_nic_s *self);
+int aq_nic_ndev_register(struct aq_nic_s *self);
+void aq_nic_ndev_queue_start(struct aq_nic_s *self, unsigned int idx);
+void aq_nic_ndev_queue_stop(struct aq_nic_s *self, unsigned int idx);
+void aq_nic_ndev_free(struct aq_nic_s *self);
+int aq_nic_start(struct aq_nic_s *self);
+int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb);
+int aq_nic_get_regs(struct aq_nic_s *self, struct ethtool_regs *regs, void *p);
+int aq_nic_get_regs_count(struct aq_nic_s *self);
+void aq_nic_get_stats(struct aq_nic_s *self, u64 *data);
+int aq_nic_stop(struct aq_nic_s *self);
+void aq_nic_deinit(struct aq_nic_s *self);
+void aq_nic_free_hot_resources(struct aq_nic_s *self);
+int aq_nic_set_mtu(struct aq_nic_s *self, int new_mtu);
+int aq_nic_set_mac(struct aq_nic_s *self, struct net_device *ndev);
+int aq_nic_set_packet_filter(struct aq_nic_s *self, unsigned int flags);
+int aq_nic_set_multicast_list(struct aq_nic_s *self, struct net_device *ndev);
+unsigned int aq_nic_get_link_speed(struct aq_nic_s *self);
+void aq_nic_get_link_ksettings(struct aq_nic_s *self,
+ struct ethtool_link_ksettings *cmd);
+int aq_nic_set_link_ksettings(struct aq_nic_s *self,
+ const struct ethtool_link_ksettings *cmd);
+struct aq_nic_cfg_s *aq_nic_get_cfg(struct aq_nic_s *self);
+u32 aq_nic_get_fw_version(struct aq_nic_s *self);
+int aq_nic_change_pm_state(struct aq_nic_s *self, pm_message_t *pm_msg);
+
+#endif /* AQ_NIC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
new file mode 100644
index 0000000..f81738a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic_internal.h
@@ -0,0 +1,46 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_nic_internal.h: Definition of private object structure. */
+
+#ifndef AQ_NIC_INTERNAL_H
+#define AQ_NIC_INTERNAL_H
+
+struct aq_nic_s {
+ struct aq_obj_s header;
+ struct aq_vec_s *aq_vec[AQ_CFG_VECS_MAX];
+ struct aq_ring_s *aq_ring_tx[AQ_CFG_VECS_MAX * AQ_CFG_TCS_MAX];
+ struct aq_hw_s *aq_hw;
+ struct net_device *ndev;
+ struct aq_pci_func_s *aq_pci_func;
+ unsigned int aq_vecs;
+ unsigned int packet_filter;
+ unsigned int power_state;
+ bool is_ndev_registered;
+ u8 port;
+ struct aq_hw_ops aq_hw_ops;
+ struct aq_hw_caps_s aq_hw_caps;
+ struct aq_nic_cfg_s aq_nic_cfg;
+ struct timer_list service_timer;
+ struct timer_list polling_timer;
+ struct aq_hw_link_status_s link_status;
+ struct {
+ u32 count;
+ u8 ar[AQ_CFG_MULTICAST_ADDRESS_MAX][ETH_ALEN];
+ } mc_list;
+};
+
+#define AQ_NIC_FLAGS_IS_NOT_READY (AQ_NIC_FLAG_STOPPING | \
+ AQ_NIC_FLAG_RESETTING | AQ_NIC_FLAG_CLOSING | \
+ AQ_NIC_FLAG_ERR_UNPLUG | AQ_NIC_FLAG_ERR_HW)
+
+#define AQ_NIC_FLAGS_IS_NOT_TX_READY (AQ_NIC_FLAGS_IS_NOT_READY | \
+ AQ_NIC_LINK_DOWN)
+
+#endif /* AQ_NIC_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
new file mode 100644
index 0000000..da4bc09
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -0,0 +1,343 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_pci_func.c: Definition of PCI functions. */
+
+#include "aq_pci_func.h"
+#include "aq_nic.h"
+#include "aq_vec.h"
+#include "aq_hw.h"
+#include <linux/interrupt.h>
+
+struct aq_pci_func_s {
+ struct pci_dev *pdev;
+ struct aq_nic_s *port[AQ_CFG_PCI_FUNC_PORTS];
+ void __iomem *mmio;
+ void *aq_vec[AQ_CFG_PCI_FUNC_MSIX_IRQS];
+ resource_size_t mmio_pa;
+ unsigned int msix_entry_mask;
+ unsigned int irq_type;
+ unsigned int ports;
+ bool is_pci_enabled;
+ bool is_regions;
+ bool is_pci_using_dac;
+ struct aq_hw_caps_s aq_hw_caps;
+ struct msix_entry msix_entry[AQ_CFG_PCI_FUNC_MSIX_IRQS];
+};
+
+struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *aq_hw_ops,
+ struct pci_dev *pdev,
+ const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *eth_ops)
+{
+ struct aq_pci_func_s *self = NULL;
+ int err = 0;
+ unsigned int port = 0U;
+
+ if (!aq_hw_ops) {
+ err = -EFAULT;
+ goto err_exit;
+ }
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ pci_set_drvdata(pdev, self);
+ self->pdev = pdev;
+
+ err = aq_hw_ops->get_hw_caps(NULL, &self->aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ self->ports = self->aq_hw_caps.ports;
+
+ for (port = 0; port < self->ports; ++port) {
+ struct aq_nic_s *aq_nic = aq_nic_alloc_cold(ndev_ops, eth_ops,
+ &pdev->dev, self,
+ port, aq_hw_ops);
+
+ if (!aq_nic) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ self->port[port] = aq_nic;
+ }
+
+err_exit:
+ if (err < 0) {
+ if (self)
+ aq_pci_func_free(self);
+ self = NULL;
+ }
+
+ (void)err;
+ return self;
+}
+
+int aq_pci_func_init(struct aq_pci_func_s *self)
+{
+ int err = 0;
+ unsigned int bar = 0U;
+ unsigned int port = 0U;
+ unsigned int i = 0U;
+
+ err = pci_enable_device(self->pdev);
+ if (err < 0)
+ goto err_exit;
+
+ self->is_pci_enabled = true;
+
+ err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ if (!err) {
+ err = pci_set_consistent_dma_mask(self->pdev, DMA_BIT_MASK(64));
+ self->is_pci_using_dac = 1;
+ }
+ if (err) {
+ err = pci_set_dma_mask(self->pdev, DMA_BIT_MASK(32));
+ if (!err)
+ err = pci_set_consistent_dma_mask(self->pdev,
+ DMA_BIT_MASK(32));
+ self->is_pci_using_dac = 0;
+ }
+ if (err != 0) {
+ err = -ENOSR;
+ goto err_exit;
+ }
+
+ err = pci_request_regions(self->pdev, AQ_CFG_DRV_NAME "_mmio");
+ if (err < 0)
+ goto err_exit;
+
+ self->is_regions = true;
+
+ pci_set_master(self->pdev);
+
+ for (bar = 0; bar < 4; ++bar) {
+ if (IORESOURCE_MEM & pci_resource_flags(self->pdev, bar)) {
+ resource_size_t reg_sz;
+
+ self->mmio_pa = pci_resource_start(self->pdev, bar);
+ if (self->mmio_pa == 0U) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ reg_sz = pci_resource_len(self->pdev, bar);
+ if ((reg_sz <= 24 /*ATL_REGS_SIZE*/)) {
+ err = -EIO;
+ goto err_exit;
+ }
+
+ self->mmio = ioremap_nocache(self->mmio_pa, reg_sz);
+ if (!self->mmio) {
+ err = -EIO;
+ goto err_exit;
+ }
+ break;
+ }
+ }
+
+ for (i = 0; i < self->aq_hw_caps.msix_irqs; i++)
+ self->msix_entry[i].entry = i;
+
+ /*enable interrupts */
+#if AQ_CFG_FORCE_LEGACY_INT
+ self->irq_type = AQ_HW_IRQ_LEGACY;
+#else
+ err = pci_enable_msix(self->pdev, self->msix_entry,
+ self->aq_hw_caps.msix_irqs);
+
+ if (err >= 0) {
+ self->irq_type = AQ_HW_IRQ_MSIX;
+ } else {
+ err = pci_enable_msi(self->pdev);
+
+ if (err >= 0) {
+ self->irq_type = AQ_HW_IRQ_MSI;
+ } else {
+ self->irq_type = AQ_HW_IRQ_LEGACY;
+ err = 0;
+ }
+ }
+#endif
+
+ /* net device init */
+ for (port = 0; port < self->ports; ++port) {
+ if (!self->port[port])
+ continue;
+
+ err = aq_nic_cfg_start(self->port[port]);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_ndev_init(self->port[port]);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_nic_ndev_register(self->port[port]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0)
+ aq_pci_func_deinit(self);
+ return err;
+}
+
+int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+ char *name, void *aq_vec, cpumask_t *affinity_mask)
+{
+ int err = 0;
+
+ switch (self->irq_type) {
+ case AQ_HW_IRQ_MSIX:
+ err = request_irq(self->msix_entry[i].vector, aq_vec_isr, 0,
+ name, aq_vec);
+ break;
+
+ case AQ_HW_IRQ_MSI:
+ err = request_irq(self->pdev->irq, aq_vec_isr, 0, name, aq_vec);
+ break;
+
+ case AQ_HW_IRQ_LEGACY:
+ err = request_irq(self->pdev->irq, aq_vec_isr_legacy,
+ IRQF_SHARED, name, aq_vec);
+ break;
+
+ default:
+ err = -EFAULT;
+ break;
+ }
+
+ if (err >= 0) {
+ self->msix_entry_mask |= (1 << i);
+ self->aq_vec[i] = aq_vec;
+
+ if (self->irq_type == AQ_HW_IRQ_MSIX)
+ irq_set_affinity_hint(self->msix_entry[i].vector,
+ affinity_mask);
+ }
+
+ return err;
+}
+
+void aq_pci_func_free_irqs(struct aq_pci_func_s *self)
+{
+ unsigned int i = 0U;
+
+ for (i = 32U; i--;) {
+ if (!((1U << i) & self->msix_entry_mask))
+ continue;
+
+ switch (self->irq_type) {
+ case AQ_HW_IRQ_MSIX:
+ irq_set_affinity_hint(self->msix_entry[i].vector, NULL);
+ free_irq(self->msix_entry[i].vector, self->aq_vec[i]);
+ break;
+
+ case AQ_HW_IRQ_MSI:
+ free_irq(self->pdev->irq, self->aq_vec[i]);
+ break;
+
+ case AQ_HW_IRQ_LEGACY:
+ free_irq(self->pdev->irq, self->aq_vec[i]);
+ break;
+
+ default:
+ break;
+ }
+
+ self->msix_entry_mask &= ~(1U << i);
+ }
+}
+
+void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self)
+{
+ return self->mmio;
+}
+
+unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self)
+{
+ return self->irq_type;
+}
+
+void aq_pci_func_deinit(struct aq_pci_func_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ aq_pci_func_free_irqs(self);
+
+ switch (self->irq_type) {
+ case AQ_HW_IRQ_MSI:
+ pci_disable_msi(self->pdev);
+ break;
+
+ case AQ_HW_IRQ_MSIX:
+ pci_disable_msix(self->pdev);
+ break;
+
+ case AQ_HW_IRQ_LEGACY:
+ break;
+
+ default:
+ break;
+ }
+
+ if (self->is_regions)
+ pci_release_regions(self->pdev);
+
+ if (self->is_pci_enabled)
+ pci_disable_device(self->pdev);
+
+err_exit:;
+}
+
+void aq_pci_func_free(struct aq_pci_func_s *self)
+{
+ unsigned int port = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (port = 0; port < self->ports; ++port) {
+ if (!self->port[port])
+ continue;
+
+ aq_nic_ndev_free(self->port[port]);
+ }
+
+ kfree(self);
+
+err_exit:;
+}
+
+int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
+ pm_message_t *pm_msg)
+{
+ int err = 0;
+ unsigned int port = 0U;
+
+ if (!self) {
+ err = -EFAULT;
+ goto err_exit;
+ }
+ for (port = 0; port < self->ports; ++port) {
+ if (!self->port[port])
+ continue;
+
+ (void)aq_nic_change_pm_state(self->port[port], pm_msg);
+ }
+
+err_exit:
+ return err;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
new file mode 100644
index 0000000..ecb0337
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.h
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_pci_func.h: Declaration of PCI functions. */
+
+#ifndef AQ_PCI_FUNC_H
+#define AQ_PCI_FUNC_H
+
+#include "aq_common.h"
+
+struct aq_pci_func_s *aq_pci_func_alloc(struct aq_hw_ops *hw_ops,
+ struct pci_dev *pdev,
+ const struct net_device_ops *ndev_ops,
+ const struct ethtool_ops *eth_ops);
+int aq_pci_func_init(struct aq_pci_func_s *self);
+int aq_pci_func_alloc_irq(struct aq_pci_func_s *self, unsigned int i,
+ char *name, void *aq_vec,
+ cpumask_t *affinity_mask);
+void aq_pci_func_free_irqs(struct aq_pci_func_s *self);
+int aq_pci_func_start(struct aq_pci_func_s *self);
+void __iomem *aq_pci_func_get_mmio(struct aq_pci_func_s *self);
+unsigned int aq_pci_func_get_irq_type(struct aq_pci_func_s *self);
+void aq_pci_func_deinit(struct aq_pci_func_s *self);
+void aq_pci_func_free(struct aq_pci_func_s *self);
+int aq_pci_func_change_pm_state(struct aq_pci_func_s *self,
+ pm_message_t *pm_msg);
+
+#endif /* AQ_PCI_FUNC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
new file mode 100644
index 0000000..dea9e9b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -0,0 +1,375 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ring.c: Definition of functions for Rx/Tx rings. */
+
+#include "aq_ring.h"
+#include "aq_nic.h"
+#include "aq_hw.h"
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+static struct aq_ring_s *aq_ring_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic)
+{
+ int err = 0;
+
+ self->buff_ring =
+ kcalloc(self->size, sizeof(struct aq_ring_buff_s), GFP_KERNEL);
+
+ if (!self->buff_ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ self->dx_ring = dma_alloc_coherent(aq_nic_get_dev(aq_nic),
+ self->size * self->dx_size,
+ &self->dx_ring_pa, GFP_KERNEL);
+ if (!self->dx_ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->txds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->txd_size;
+
+ self = aq_ring_alloc(self, aq_nic);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ self->aq_nic = aq_nic;
+ self->idx = idx;
+ self->size = aq_nic_cfg->rxds;
+ self->dx_size = aq_nic_cfg->aq_hw_caps->rxd_size;
+
+ self = aq_ring_alloc(self, aq_nic);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_ring_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int aq_ring_init(struct aq_ring_s *self)
+{
+ self->hw_head = 0;
+ self->sw_head = 0;
+ self->sw_tail = 0;
+ return 0;
+}
+
+void aq_ring_tx_append_buffs(struct aq_ring_s *self,
+ struct aq_ring_buff_s *buffer,
+ unsigned int buffers)
+{
+ if (likely(self->sw_tail + buffers < self->size)) {
+ memcpy(&self->buff_ring[self->sw_tail], buffer,
+ sizeof(buffer[0]) * buffers);
+ } else {
+ unsigned int first_part = self->size - self->sw_tail;
+ unsigned int second_part = buffers - first_part;
+
+ memcpy(&self->buff_ring[self->sw_tail], buffer,
+ sizeof(buffer[0]) * first_part);
+
+ memcpy(&self->buff_ring[0], &buffer[first_part],
+ sizeof(buffer[0]) * second_part);
+ }
+}
+
+int aq_ring_tx_clean(struct aq_ring_s *self)
+{
+ struct device *dev = aq_nic_get_dev(self->aq_nic);
+
+ for (; self->sw_head != self->hw_head;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ if (likely(buff->is_mapped)) {
+ if (unlikely(buff->is_sop))
+ dma_unmap_single(dev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ else
+ dma_unmap_page(dev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ }
+
+ if (unlikely(buff->is_eop))
+ dev_kfree_skb_any(buff->skb);
+ }
+
+ if (aq_ring_avail_dx(self) > AQ_CFG_SKB_FRAGS_MAX)
+ aq_nic_ndev_queue_start(self->aq_nic, self->idx);
+
+ return 0;
+}
+
+static inline unsigned int aq_ring_dx_in_range(unsigned int h, unsigned int i,
+ unsigned int t)
+{
+ return (h < t) ? ((h < i) && (i < t)) : ((h < i) || (i < t));
+}
+
+#define AQ_SKB_ALIGN SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget)
+{
+ struct net_device *ndev = aq_nic_get_ndev(self->aq_nic);
+ int err = 0;
+ bool is_rsc_completed = true;
+
+ for (; (self->sw_head != self->hw_head) && budget;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head),
+ --budget, ++(*work_done)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ struct sk_buff *skb = NULL;
+ unsigned int next_ = 0U;
+ unsigned int i = 0U;
+ struct aq_ring_buff_s *buff_ = NULL;
+
+ if (buff->is_error) {
+ __free_pages(buff->page, 0);
+ continue;
+ }
+
+ if (buff->is_cleaned)
+ continue;
+
+ if (!buff->is_eop) {
+ for (next_ = buff->next,
+ buff_ = &self->buff_ring[next_]; true;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_]) {
+ is_rsc_completed =
+ aq_ring_dx_in_range(self->sw_head,
+ next_,
+ self->hw_head);
+
+ if (unlikely(!is_rsc_completed)) {
+ is_rsc_completed = false;
+ break;
+ }
+
+ if (buff_->is_eop)
+ break;
+ }
+
+ if (!is_rsc_completed) {
+ err = 0;
+ goto err_exit;
+ }
+ }
+
+ /* for single fragment packets use build_skb() */
+ if (buff->is_eop) {
+ skb = build_skb(page_address(buff->page),
+ buff->len + AQ_SKB_ALIGN);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ skb->dev = ndev;
+ skb_put(skb, buff->len);
+ } else {
+ skb = netdev_alloc_skb(ndev, ETH_HLEN);
+ if (unlikely(!skb)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+ skb_put(skb, ETH_HLEN);
+ memcpy(skb->data, page_address(buff->page), ETH_HLEN);
+
+ skb_add_rx_frag(skb, 0, buff->page, ETH_HLEN,
+ buff->len - ETH_HLEN,
+ SKB_TRUESIZE(buff->len - ETH_HLEN));
+
+ for (i = 1U, next_ = buff->next,
+ buff_ = &self->buff_ring[next_]; true;
+ next_ = buff_->next,
+ buff_ = &self->buff_ring[next_], ++i) {
+ skb_add_rx_frag(skb, i, buff_->page, 0,
+ buff_->len,
+ SKB_TRUESIZE(buff->len -
+ ETH_HLEN));
+ buff_->is_cleaned = 1;
+
+ if (buff_->is_eop)
+ break;
+ }
+ }
+
+ skb->protocol = eth_type_trans(skb, ndev);
+ if (unlikely(buff->is_cso_err)) {
+ ++self->stats.rx.errors;
+ __skb_mark_checksum_bad(skb);
+ } else {
+ if (buff->is_ip_cso) {
+ __skb_incr_checksum_unnecessary(skb);
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ __skb_incr_checksum_unnecessary(skb);
+ } else {
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ }
+
+ skb_set_hash(skb, buff->rss_hash,
+ buff->is_hash_l4 ? PKT_HASH_TYPE_L4 :
+ PKT_HASH_TYPE_NONE);
+
+ skb_record_rx_queue(skb, self->idx);
+
+ netif_receive_skb(skb);
+
+ ++self->stats.rx.packets;
+ self->stats.rx.bytes += skb->len;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_ring_rx_fill(struct aq_ring_s *self)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ int err = 0;
+ int i = 0;
+
+ for (i = aq_ring_avail_dx(self); i--;
+ self->sw_tail = aq_ring_next_dx(self, self->sw_tail)) {
+ buff = &self->buff_ring[self->sw_tail];
+
+ buff->flags = 0U;
+ buff->len = AQ_CFG_RX_FRAME_MAX;
+
+ buff->page = alloc_pages(GFP_ATOMIC | __GFP_COLD |
+ __GFP_COMP, 0);
+ if (!buff->page) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ buff->pa = dma_map_page(aq_nic_get_dev(self->aq_nic),
+ buff->page, 0,
+ AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
+
+ if (dma_mapping_error(aq_nic_get_dev(self->aq_nic), buff->pa)) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ buff = NULL;
+ }
+
+err_exit:
+ if (err < 0) {
+ if (buff && buff->page)
+ __free_pages(buff->page, 0);
+ }
+
+ return err;
+}
+
+void aq_ring_rx_deinit(struct aq_ring_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ for (; self->sw_head != self->sw_tail;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+
+ dma_unmap_page(aq_nic_get_dev(self->aq_nic), buff->pa,
+ AQ_CFG_RX_FRAME_MAX, DMA_FROM_DEVICE);
+
+ __free_pages(buff->page, 0);
+ }
+
+err_exit:;
+}
+
+void aq_ring_tx_deinit(struct aq_ring_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ for (; self->sw_head != self->sw_tail;
+ self->sw_head = aq_ring_next_dx(self, self->sw_head)) {
+ struct aq_ring_buff_s *buff = &self->buff_ring[self->sw_head];
+ struct device *ndev = aq_nic_get_dev(self->aq_nic);
+
+ if (likely(buff->is_mapped)) {
+ if (unlikely(buff->is_sop)) {
+ dma_unmap_single(ndev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ndev, buff->pa, buff->len,
+ DMA_TO_DEVICE);
+ }
+ }
+
+ if (unlikely(buff->is_eop))
+ dev_kfree_skb_any(buff->skb);
+ }
+err_exit:;
+}
+
+void aq_ring_free(struct aq_ring_s *self)
+{
+ if (!self)
+ goto err_exit;
+
+ kfree(self->buff_ring);
+
+ if (self->dx_ring)
+ dma_free_coherent(aq_nic_get_dev(self->aq_nic),
+ self->size * self->dx_size, self->dx_ring,
+ self->dx_ring_pa);
+
+err_exit:;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
new file mode 100644
index 0000000..0ac3f9e
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -0,0 +1,157 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_ring.h: Declaration of functions for Rx/Tx rings. */
+
+#ifndef AQ_RING_H
+#define AQ_RING_H
+
+#include "aq_common.h"
+
+struct page;
+
+/* TxC SOP DX EOP
+ * +----------+----------+----------+-----------
+ * 8bytes|len l3,l4 | pa | pa | pa
+ * +----------+----------+----------+-----------
+ * 4/8bytes|len pkt |len pkt | | skb
+ * +----------+----------+----------+-----------
+ * 4/8bytes|is_txc |len,flags |len |len,is_eop
+ * +----------+----------+----------+-----------
+ *
+ * This aq_ring_buff_s doesn't have endianness dependency.
+ * It is __packed for cache line optimizations.
+ */
+struct __packed aq_ring_buff_s {
+ union {
+ /* RX */
+ struct {
+ u32 rss_hash;
+ u16 next;
+ u8 is_hash_l4;
+ u8 rsvd1;
+ struct page *page;
+ };
+ /* EOP */
+ struct {
+ dma_addr_t pa_eop;
+ struct sk_buff *skb;
+ };
+ /* DX */
+ struct {
+ dma_addr_t pa;
+ };
+ /* SOP */
+ struct {
+ dma_addr_t pa_sop;
+ u32 len_pkt_sop;
+ };
+ /* TxC */
+ struct {
+ u32 mss;
+ u8 len_l2;
+ u8 len_l3;
+ u8 len_l4;
+ u8 rsvd2;
+ u32 len_pkt;
+ };
+ };
+ union {
+ struct {
+ u32 len:16;
+ u32 is_ip_cso:1;
+ u32 is_udp_cso:1;
+ u32 is_tcp_cso:1;
+ u32 is_cso_err:1;
+ u32 is_sop:1;
+ u32 is_eop:1;
+ u32 is_txc:1;
+ u32 is_mapped:1;
+ u32 is_cleaned:1;
+ u32 is_error:1;
+ u32 rsvd3:6;
+ };
+ u32 flags;
+ };
+};
+
+struct aq_ring_stats_rx_s {
+ u64 errors;
+ u64 packets;
+ u64 bytes;
+ u64 lro_packets;
+ u64 jumbo_packets;
+};
+
+struct aq_ring_stats_tx_s {
+ u64 errors;
+ u64 packets;
+ u64 bytes;
+};
+
+union aq_ring_stats_s {
+ struct aq_ring_stats_rx_s rx;
+ struct aq_ring_stats_tx_s tx;
+};
+
+struct aq_ring_s {
+ struct aq_obj_s header;
+ struct aq_ring_buff_s *buff_ring;
+ u8 *dx_ring; /* descriptors ring, dma shared mem */
+ struct aq_nic_s *aq_nic;
+ unsigned int idx; /* for HW layer registers operations */
+ unsigned int hw_head;
+ unsigned int sw_head;
+ unsigned int sw_tail;
+ unsigned int size; /* descriptors number */
+ unsigned int dx_size; /* TX or RX descriptor size, */
+ /* stored here for fater math */
+ union aq_ring_stats_s stats;
+ dma_addr_t dx_ring_pa;
+};
+
+struct aq_ring_param_s {
+ unsigned int vec_idx;
+ unsigned int cpu;
+ cpumask_t affinity_mask;
+};
+
+static inline unsigned int aq_ring_next_dx(struct aq_ring_s *self,
+ unsigned int dx)
+{
+ return (++dx >= self->size) ? 0U : dx;
+}
+
+static inline unsigned int aq_ring_avail_dx(struct aq_ring_s *self)
+{
+ return (((self->sw_tail >= self->sw_head)) ?
+ (self->size - 1) - self->sw_tail + self->sw_head :
+ self->sw_head - self->sw_tail - 1);
+}
+
+struct aq_ring_s *aq_ring_tx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+struct aq_ring_s *aq_ring_rx_alloc(struct aq_ring_s *self,
+ struct aq_nic_s *aq_nic,
+ unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_ring_init(struct aq_ring_s *self);
+void aq_ring_tx_deinit(struct aq_ring_s *self);
+void aq_ring_rx_deinit(struct aq_ring_s *self);
+void aq_ring_free(struct aq_ring_s *self);
+void aq_ring_tx_append_buffs(struct aq_ring_s *ring,
+ struct aq_ring_buff_s *buffer,
+ unsigned int buffers);
+int aq_ring_tx_clean(struct aq_ring_s *self);
+int aq_ring_rx_clean(struct aq_ring_s *self, int *work_done, int budget);
+int aq_ring_rx_fill(struct aq_ring_s *self);
+
+#endif /* AQ_RING_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_rss.h b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
new file mode 100644
index 0000000..1db6eb2
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_rss.h
@@ -0,0 +1,26 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_rss.h: Receive Side Scaling definitions. */
+
+#ifndef AQ_RSS_H
+#define AQ_RSS_H
+
+#include "aq_common.h"
+#include "aq_cfg.h"
+
+struct aq_rss_parameters {
+ u16 base_cpu_number;
+ u16 indirection_table_size;
+ u16 hash_secret_key_size;
+ u32 hash_secret_key[AQ_CFG_RSS_HASHKEY_SIZE / sizeof(u32)];
+ u8 indirection_table[AQ_CFG_RSS_INDIRECTION_TABLE_MAX];
+};
+
+#endif /* AQ_RSS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_utils.h b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
new file mode 100644
index 0000000..4446bd9
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_utils.h
@@ -0,0 +1,50 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_utils.h: Useful macro and structures used in all layers of driver. */
+
+#ifndef AQ_UTILS_H
+#define AQ_UTILS_H
+
+#include "aq_common.h"
+
+#define AQ_DIMOF(_ARY_) ARRAY_SIZE(_ARY_)
+
+struct aq_obj_s {
+ spinlock_t lock; /* spinlock for nic/rings processing */
+ atomic_t flags;
+ atomic_t busy_count;
+};
+
+static inline void aq_utils_obj_set(atomic_t *flags, u32 mask)
+{
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = atomic_read(flags);
+ flags_new = flags_old | (mask);
+ } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline void aq_utils_obj_clear(atomic_t *flags, u32 mask)
+{
+ unsigned long flags_old, flags_new;
+
+ do {
+ flags_old = atomic_read(flags);
+ flags_new = flags_old & ~(mask);
+ } while (atomic_cmpxchg(flags, flags_old, flags_new) != flags_old);
+}
+
+static inline bool aq_utils_obj_test(atomic_t *flags, u32 mask)
+{
+ return atomic_read(flags) & mask;
+}
+
+#endif /* AQ_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.c b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
new file mode 100644
index 0000000..cb30a639
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.c
@@ -0,0 +1,392 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_vec.c: Definition of common structure for vector of Rx and Tx rings.
+ * Definition of functions for Rx and Tx rings. Friendly module for aq_nic.
+ */
+
+#include "aq_vec.h"
+#include "aq_nic.h"
+#include "aq_ring.h"
+#include "aq_hw.h"
+
+#include <linux/netdevice.h>
+
+struct aq_vec_s {
+ struct aq_obj_s header;
+ struct aq_hw_ops *aq_hw_ops;
+ struct aq_hw_s *aq_hw;
+ struct aq_nic_s *aq_nic;
+ unsigned int tx_rings;
+ unsigned int rx_rings;
+ struct aq_ring_param_s aq_ring_param;
+ struct napi_struct napi;
+ struct aq_ring_s ring[AQ_CFG_TCS_MAX][2];
+};
+
+#define AQ_VEC_TX_ID 0
+#define AQ_VEC_RX_ID 1
+
+static int aq_vec_poll(struct napi_struct *napi, int budget)
+__releases(&self->lock)
+__acquires(&self->lock)
+{
+ struct aq_vec_s *self = container_of(napi, struct aq_vec_s, napi);
+ struct aq_ring_s *ring = NULL;
+ int work_done = 0;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int sw_tail_old = 0U;
+ bool was_tx_cleaned = false;
+
+ if (!self) {
+ err = -EINVAL;
+ } else if (spin_trylock(&self->header.lock)) {
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ if (self->aq_hw_ops->hw_ring_tx_head_update) {
+ err = self->aq_hw_ops->hw_ring_tx_head_update(
+ self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ if (ring[AQ_VEC_TX_ID].sw_head !=
+ ring[AQ_VEC_TX_ID].hw_head) {
+ err = aq_ring_tx_clean(&ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+ was_tx_cleaned = true;
+ }
+
+ err = self->aq_hw_ops->hw_ring_rx_receive(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ if (ring[AQ_VEC_RX_ID].sw_head !=
+ ring[AQ_VEC_RX_ID].hw_head) {
+ err = aq_ring_rx_clean(&ring[AQ_VEC_RX_ID],
+ &work_done,
+ budget - work_done);
+ if (err < 0)
+ goto err_exit;
+
+ sw_tail_old = ring[AQ_VEC_RX_ID].sw_tail;
+
+ err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_fill(
+ self->aq_hw,
+ &ring[AQ_VEC_RX_ID], sw_tail_old);
+ if (err < 0)
+ goto err_exit;
+ }
+ }
+
+ if (was_tx_cleaned)
+ work_done = budget;
+
+ if (work_done < budget) {
+ napi_complete_done(napi, work_done);
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw,
+ 1U << self->aq_ring_param.vec_idx);
+ }
+
+err_exit:
+ spin_unlock(&self->header.lock);
+ }
+
+ return work_done;
+}
+
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ struct aq_vec_s *self = NULL;
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ self->aq_nic = aq_nic;
+ self->aq_ring_param.vec_idx = idx;
+ self->aq_ring_param.cpu =
+ idx + aq_nic_cfg->aq_rss.base_cpu_number;
+
+ cpumask_set_cpu(self->aq_ring_param.cpu,
+ &self->aq_ring_param.affinity_mask);
+
+ self->tx_rings = 0;
+ self->rx_rings = 0;
+
+ netif_napi_add(aq_nic_get_ndev(aq_nic), &self->napi,
+ aq_vec_poll, AQ_CFG_NAPI_WEIGHT);
+
+ for (i = 0; i < aq_nic_cfg->tcs; ++i) {
+ unsigned int idx_ring = AQ_NIC_TCVEC2RING(self->nic,
+ self->tx_rings,
+ self->aq_ring_param.vec_idx);
+
+ ring = aq_ring_tx_alloc(&self->ring[i][AQ_VEC_TX_ID], aq_nic,
+ idx_ring, aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ++self->tx_rings;
+
+ aq_nic_set_tx_ring(aq_nic, idx_ring, ring);
+
+ ring = aq_ring_rx_alloc(&self->ring[i][AQ_VEC_RX_ID], aq_nic,
+ idx_ring, aq_nic_cfg);
+ if (!ring) {
+ err = -ENOMEM;
+ goto err_exit;
+ }
+
+ ++self->rx_rings;
+ }
+
+err_exit:
+ if (err < 0) {
+ aq_vec_free(self);
+ self = NULL;
+ }
+ return self;
+}
+
+int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+ struct aq_hw_s *aq_hw)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ self->aq_hw_ops = aq_hw_ops;
+ self->aq_hw = aq_hw;
+
+ spin_lock_init(&self->header.lock);
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ err = aq_ring_init(&ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_tx_init(self->aq_hw,
+ &ring[AQ_VEC_TX_ID],
+ &self->aq_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_init(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_init(self->aq_hw,
+ &ring[AQ_VEC_RX_ID],
+ &self->aq_ring_param);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_ring_rx_fill(&ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_fill(self->aq_hw,
+ &ring[AQ_VEC_RX_ID], 0U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+err_exit:
+ return err;
+}
+
+int aq_vec_start(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+ int err = 0;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ err = self->aq_hw_ops->hw_ring_tx_start(self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+ if (err < 0)
+ goto err_exit;
+
+ err = self->aq_hw_ops->hw_ring_rx_start(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ napi_enable(&self->napi);
+
+err_exit:
+ return err;
+}
+
+void aq_vec_stop(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ self->aq_hw_ops->hw_ring_tx_stop(self->aq_hw,
+ &ring[AQ_VEC_TX_ID]);
+
+ self->aq_hw_ops->hw_ring_rx_stop(self->aq_hw,
+ &ring[AQ_VEC_RX_ID]);
+ }
+
+ napi_disable(&self->napi);
+}
+
+void aq_vec_deinit(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ aq_ring_tx_deinit(&ring[AQ_VEC_TX_ID]);
+ aq_ring_rx_deinit(&ring[AQ_VEC_RX_ID]);
+ }
+err_exit:;
+}
+
+void aq_vec_free(struct aq_vec_s *self)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int i = 0U;
+
+ if (!self)
+ goto err_exit;
+
+ for (i = 0U, ring = self->ring[0];
+ self->tx_rings > i; ++i, ring = self->ring[i]) {
+ aq_ring_free(&ring[AQ_VEC_TX_ID]);
+ aq_ring_free(&ring[AQ_VEC_RX_ID]);
+ }
+
+ netif_napi_del(&self->napi);
+
+ kfree(self);
+
+err_exit:;
+}
+
+irqreturn_t aq_vec_isr(int irq, void *private)
+{
+ struct aq_vec_s *self = private;
+ int err = 0;
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ napi_schedule(&self->napi);
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+irqreturn_t aq_vec_isr_legacy(int irq, void *private)
+{
+ struct aq_vec_s *self = private;
+ u64 irq_mask = 0U;
+ irqreturn_t err = 0;
+
+ if (!self) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ err = self->aq_hw_ops->hw_irq_read(self->aq_hw, &irq_mask);
+ if (err < 0)
+ goto err_exit;
+
+ if (irq_mask) {
+ self->aq_hw_ops->hw_irq_disable(self->aq_hw,
+ 1U << self->aq_ring_param.vec_idx);
+ napi_schedule(&self->napi);
+ } else {
+ self->aq_hw_ops->hw_irq_enable(self->aq_hw, 1U);
+ err = IRQ_NONE;
+ }
+
+err_exit:
+ return err >= 0 ? IRQ_HANDLED : IRQ_NONE;
+}
+
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self)
+{
+ return &self->aq_ring_param.affinity_mask;
+}
+
+void aq_vec_add_stats(struct aq_vec_s *self,
+ struct aq_ring_stats_rx_s *stats_rx,
+ struct aq_ring_stats_tx_s *stats_tx)
+{
+ struct aq_ring_s *ring = NULL;
+ unsigned int r = 0U;
+
+ for (r = 0U, ring = self->ring[0];
+ self->tx_rings > r; ++r, ring = self->ring[r]) {
+ struct aq_ring_stats_tx_s *tx = &ring[AQ_VEC_TX_ID].stats.tx;
+ struct aq_ring_stats_rx_s *rx = &ring[AQ_VEC_RX_ID].stats.rx;
+
+ stats_rx->packets += rx->packets;
+ stats_rx->bytes += rx->bytes;
+ stats_rx->errors += rx->errors;
+ stats_rx->jumbo_packets += rx->jumbo_packets;
+ stats_rx->lro_packets += rx->lro_packets;
+
+ stats_tx->packets += tx->packets;
+ stats_tx->bytes += tx->bytes;
+ stats_tx->errors += tx->errors;
+ }
+}
+
+int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data, unsigned int *p_count)
+{
+ unsigned int count = 0U;
+ struct aq_ring_stats_rx_s stats_rx;
+ struct aq_ring_stats_tx_s stats_tx;
+
+ memset(&stats_rx, 0U, sizeof(struct aq_ring_stats_rx_s));
+ memset(&stats_tx, 0U, sizeof(struct aq_ring_stats_tx_s));
+ aq_vec_add_stats(self, &stats_rx, &stats_tx);
+
+ data[count] += stats_rx.packets;
+ data[++count] += stats_tx.packets;
+ data[++count] += stats_rx.jumbo_packets;
+ data[++count] += stats_rx.lro_packets;
+ data[++count] += stats_rx.errors;
+
+ if (p_count)
+ *p_count = ++count;
+
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_vec.h b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
new file mode 100644
index 0000000..6c68b18
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_vec.h
@@ -0,0 +1,42 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File aq_vec.h: Definition of common structures for vector of Rx and Tx rings.
+ * Declaration of functions for Rx and Tx rings.
+ */
+
+#ifndef AQ_VEC_H
+#define AQ_VEC_H
+
+#include "aq_common.h"
+#include <linux/irqreturn.h>
+
+struct aq_hw_s;
+struct aq_hw_ops;
+struct aq_ring_stats_rx_s;
+struct aq_ring_stats_tx_s;
+
+irqreturn_t aq_vec_isr(int irq, void *private);
+irqreturn_t aq_vec_isr_legacy(int irq, void *private);
+struct aq_vec_s *aq_vec_alloc(struct aq_nic_s *aq_nic, unsigned int idx,
+ struct aq_nic_cfg_s *aq_nic_cfg);
+int aq_vec_init(struct aq_vec_s *self, struct aq_hw_ops *aq_hw_ops,
+ struct aq_hw_s *aq_hw);
+void aq_vec_deinit(struct aq_vec_s *self);
+void aq_vec_free(struct aq_vec_s *self);
+int aq_vec_start(struct aq_vec_s *self);
+void aq_vec_stop(struct aq_vec_s *self);
+cpumask_t *aq_vec_get_affinity_mask(struct aq_vec_s *self);
+int aq_vec_get_sw_stats(struct aq_vec_s *self, u64 *data,
+ unsigned int *p_count);
+void aq_vec_add_stats(struct aq_vec_s *self,
+ struct aq_ring_stats_rx_s *stats_rx,
+ struct aq_ring_stats_tx_s *stats_tx);
+
+#endif /* AQ_VEC_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
new file mode 100644
index 0000000..1f38805
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.c
@@ -0,0 +1,905 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "hw_atl_a0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_a0_internal.h"
+
+static int hw_atl_a0_get_hw_caps(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ memcpy(aq_hw_caps, &hw_atl_a0_hw_caps_, sizeof(*aq_hw_caps));
+ return 0;
+}
+
+static struct aq_hw_s *hw_atl_a0_create(struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ struct aq_hw_ops *ops)
+{
+ struct hw_atl_s *self = NULL;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self)
+ goto err_exit;
+
+ self->base.aq_pci_func = aq_pci_func;
+
+ self->base.not_ff_addr = 0x10U;
+
+err_exit:
+ return (struct aq_hw_s *)self;
+}
+
+static void hw_atl_a0_destroy(struct aq_hw_s *self)
+{
+ kfree(self);
+}
+
+static int hw_atl_a0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ glb_glb_reg_res_dis_set(self, 1U);
+ pci_pci_reg_res_dis_set(self, 0U);
+ rx_rx_reg_res_dis_set(self, 0U);
+ tx_tx_reg_res_dis_set(self, 0U);
+
+ HW_ATL_FLUSH();
+ glb_soft_res_set(self, 1);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ itr_irq_reg_res_dis_set(self, 0U);
+ itr_res_irq_set(self, 1U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_qos_set(struct aq_hw_s *self)
+{
+ u32 tc = 0U;
+ u32 buff_size = 0U;
+ unsigned int i_priority = 0U;
+ bool is_rx_flow_control = false;
+
+ /* TPS Descriptor rate init */
+ tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+ /* Tx buf size */
+ buff_size = HW_ATL_A0_TXBUF_MAX;
+
+ tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 66U) /
+ 100U, tc);
+ tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 50U) /
+ 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ buff_size = HW_ATL_A0_RXBUF_MAX;
+
+ rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_nic_cfg_s *cfg = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int addr = 0U;
+
+ cfg = self->aq_nic_cfg;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ __swab32(rss_params->hash_secret_key[i]) : 0U;
+ rpf_rss_key_wr_data_set(self, key_data);
+ rpf_rss_key_addr_set(self, addr);
+ rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ u32 i = 0U;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ int err = 0;
+ u16 bitary[(HW_ATL_A0_RSS_REDIRECTION_MAX *
+ HW_ATL_A0_RSS_REDIRECTION_BITS / 16U)];
+
+ memset(bitary, 0, sizeof(bitary));
+
+ for (i = HW_ATL_A0_RSS_REDIRECTION_MAX; i--; ) {
+ (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ ((indirection_table[i] % num_rss_queues) <<
+ ((i * 3U) & 0xFU));
+ }
+
+ for (i = AQ_DIMOF(bitary); i--;) {
+ rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ rpf_rss_redir_tbl_addr_set(self, i);
+ rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+
+ /* TX checksums offloads*/
+ tpo_ipv4header_crc_offload_en_set(self, 1);
+ tpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* RX checksums offloads*/
+ rpo_ipv4header_crc_offload_en_set(self, 1);
+ rpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* LSO offloads*/
+ tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ 0x00010000U : 0x00000000U);
+ tdm_tx_dca_en_set(self, 0U);
+ tdm_tx_dca_mode_set(self, 0U);
+
+ tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+ /* Rx flow control */
+ rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ 0xB3333333U : 0x00000000U);
+
+ /* Multicast filters */
+ for (i = HW_ATL_A0_MAC_MAX; i--;) {
+ rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ rpf_vlan_outer_etht_set(self, 0x88A8U);
+ rpf_vlan_inner_etht_set(self, 0x8100U);
+ rpf_vlan_prom_mode_en_set(self, 1);
+
+ /* Rx Interrupts */
+ rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ rpfl2broadcast_flr_act_set(self, 1U);
+ rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ rdm_rx_dca_en_set(self, 0U);
+ rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+ int err = 0;
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC);
+ rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_A0_MAC);
+ rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_A0_MAC);
+ rpfl2_uc_flr_en_set(self, 1U, HW_ATL_A0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_init(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg,
+ u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
+ { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+ { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+ { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ };
+
+ int err = 0;
+
+ self->aq_nic_cfg = aq_nic_cfg;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &PHAL_ATLANTIC_A0->chip_features);
+
+ hw_atl_a0_hw_init_tx_path(self);
+ hw_atl_a0_hw_init_rx_path(self);
+
+ hw_atl_a0_hw_mac_addr_set(self, mac_addr);
+
+ hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+
+ reg_tx_dma_debug_ctl_set(self, 0x800000b8U);
+ reg_tx_dma_debug_ctl_set(self, 0x000000b8U);
+
+ hw_atl_a0_hw_qos_set(self);
+ hw_atl_a0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_a0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ reg_gen_irq_map_set(self,
+ ((HW_ATL_A0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_A0_ERR_INT << 0x10) | (1U << 0x17)) |
+ ((HW_ATL_A0_ERR_INT << 8) | (1U << 0xF)) |
+ ((HW_ATL_A0_ERR_INT) | (1U << 0x7)), 0U);
+
+ hw_atl_a0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_ring_tx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_start(struct aq_hw_s *self)
+{
+ tpb_tx_buff_en_set(self, 1);
+ rpb_rx_buff_en_set(self, 1);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ return 0;
+}
+
+static int hw_atl_a0_hw_ring_tx_xmit(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int frags)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ unsigned int buff_pa_len = 0U;
+ unsigned int pkt_len = 0U;
+ unsigned int frag_count = 0U;
+ bool is_gso = false;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+ pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+ for (frag_count = 0; frag_count < frags; frag_count++) {
+ txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+ HW_ATL_A0_TXD_SIZE];
+ txd->ctl = 0;
+ txd->ctl2 = 0;
+ txd->buf_addr = 0;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+
+ if (buff->is_txc) {
+ txd->ctl |= (buff->len_l3 << 31) |
+ (buff->len_l2 << 24) |
+ HW_ATL_A0_TXD_CTL_CMD_TCP |
+ HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl2 |= (buff->mss << 16) |
+ (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+
+ pkt_len -= (buff->len_l4 +
+ buff->len_l3 +
+ buff->len_l2);
+ is_gso = true;
+ } else {
+ buff_pa_len = buff->len;
+
+ txd->buf_addr = buff->pa;
+ txd->ctl |= (HW_ATL_A0_TXD_CTL_BLEN &
+ ((u32)buff_pa_len << 4));
+ txd->ctl |= HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD;
+ /* PAY_LEN */
+ txd->ctl2 |= HW_ATL_A0_TXD_CTL2_LEN & (pkt_len << 14);
+
+ if (is_gso) {
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_LSO;
+ txd->ctl2 |= HW_ATL_A0_TXD_CTL2_CTX_EN;
+ }
+
+ /* Tx checksum offloads */
+ if (buff->is_ip_cso)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_IPCSO;
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_TUCSO;
+
+ if (unlikely(buff->is_eop)) {
+ txd->ctl |= HW_ATL_A0_TXD_CTL_EOP;
+ txd->ctl |= HW_ATL_A0_TXD_CTL_CMD_WB;
+ }
+ }
+
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+ }
+
+ hw_atl_a0_hw_tx_ring_tail_update(self, ring);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+ reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
+
+ reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
+
+ rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->idx);
+
+ rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+ rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
+
+ reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
+
+ tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_a0_hw_tx_ring_tail_update(self, aq_ring);
+
+ /* Set Tx threshold */
+ tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+ /* Mapping interrupt vector */
+ itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+ tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
+{
+ for (; sw_tail_old != ring->sw_tail;
+ sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+ HW_ATL_A0_RXD_SIZE];
+
+ struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+ rxd->buf_addr = buff->pa;
+ rxd->hdr_addr = 0U;
+ }
+
+ reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ int err = 0;
+ unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_ring_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ struct device *ndev = aq_nic_get_dev(ring->aq_nic);
+
+ for (; ring->hw_head != ring->sw_tail;
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+ &ring->dx_ring[ring->hw_head * HW_ATL_A0_RXD_SIZE];
+
+ unsigned int is_err = 1U;
+ unsigned int is_rx_check_sum_enabled = 0U;
+ unsigned int pkt_type = 0U;
+
+ if (!(rxd_wb->status & 0x5U)) { /* RxD is not done */
+ if ((1U << 4) &
+ reg_rx_dma_desc_status_get(self, ring->idx)) {
+ rdm_rx_desc_en_set(self, false, ring->idx);
+ rdm_rx_desc_res_set(self, true, ring->idx);
+ rdm_rx_desc_res_set(self, false, ring->idx);
+ rdm_rx_desc_en_set(self, true, ring->idx);
+ }
+
+ if (ring->hw_head ||
+ (rdm_rx_desc_head_ptr_get(self, ring->idx) < 2U)) {
+ break;
+ } else if (!(rxd_wb->status & 0x1U)) {
+ struct hw_atl_rxd_wb_s *rxd_wb1 =
+ (struct hw_atl_rxd_wb_s *)
+ (&ring->dx_ring[(1U) *
+ HW_ATL_A0_RXD_SIZE]);
+
+ if ((rxd_wb1->status & 0x1U)) {
+ rxd_wb->pkt_len = 1514U;
+ rxd_wb->status = 3U;
+ } else {
+ break;
+ }
+ }
+ }
+
+ buff = &ring->buff_ring[ring->hw_head];
+
+ if (0x3U != (rxd_wb->status & 0x3U))
+ rxd_wb->status |= 4;
+
+ is_err = (0x0000001CU & rxd_wb->status);
+ is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+ pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+ if (is_rx_check_sum_enabled) {
+ if (0x0U == (pkt_type & 0x3U))
+ buff->is_ip_cso = (is_err & 0x08U) ? 0 : 1;
+
+ if (0x4U == (pkt_type & 0x1CU))
+ buff->is_udp_cso = (is_err & 0x10U) ? 0 : 1;
+ else if (0x0U == (pkt_type & 0x1CU))
+ buff->is_tcp_cso = (is_err & 0x10U) ? 0 : 1;
+ }
+
+ is_err &= ~0x18U;
+ is_err &= ~0x04U;
+
+ dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
+
+ if (is_err || rxd_wb->type & 0x1000U) {
+ /* status error or DMA error */
+ buff->is_error = 1U;
+ } else {
+ if (self->aq_nic_cfg->is_rss) {
+ /* last 4 byte */
+ u16 rss_type = rxd_wb->type & 0xFU;
+
+ if (rss_type && rss_type < 0x8U) {
+ buff->is_hash_l4 = (rss_type == 0x4 ||
+ rss_type == 0x5);
+ buff->rss_hash = rxd_wb->rss_hash;
+ }
+ }
+
+ if (HW_ATL_A0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+ buff->len = (rxd_wb->pkt_len &
+ (AQ_CFG_RX_FRAME_MAX - 1U));
+ buff->len = buff->len ?
+ buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->next = 0U;
+ buff->is_eop = 1U;
+ } else {
+ /* jumbo */
+ buff->next = aq_ring_next_dx(ring,
+ ring->hw_head);
+ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_setlsw_set(self, LODWORD(mask) |
+ (1U << HW_ATL_A0_ERR_INT));
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ if ((1U << 16) & reg_gen_irq_status_get(self))
+
+ atomic_inc(&PHAL_ATLANTIC_A0->dpc);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = itr_irq_statuslsw_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_a0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ unsigned int i = 0U;
+
+ rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ rpfl2multicast_flr_en_set(self, IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+ rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+ self->aq_nic_cfg->is_mc_list_enabled =
+ IS_FILTER_ENABLED(IFF_MULTICAST);
+
+ for (i = HW_ATL_A0_MAC_MIN; i < HW_ATL_A0_MAC_MAX; ++i)
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_a0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ int err = 0;
+
+ if (count > (HW_ATL_A0_MAC_MAX - HW_ATL_A0_MAC_MIN)) {
+ err = EBADRQC;
+ goto err_exit;
+ }
+ for (self->aq_nic_cfg->mc_list_count = 0U;
+ self->aq_nic_cfg->mc_list_count < count;
+ ++self->aq_nic_cfg->mc_list_count) {
+ u32 i = self->aq_nic_cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_A0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_A0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_A0_MAC_MIN + i);
+
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_A0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_a0_hw_interrupt_moderation_set(struct aq_hw_s *self,
+ bool itr_enabled)
+{
+ unsigned int i = 0U;
+
+ if (itr_enabled && self->aq_nic_cfg->itr) {
+ if (self->aq_nic_cfg->itr != 0xFFFFU) {
+ u32 itr_ = (self->aq_nic_cfg->itr >> 1);
+
+ itr_ = min(AQ_CFG_IRQ_MASK, itr_);
+
+ PHAL_ATLANTIC_A0->itr_rx = 0x80000000U |
+ (itr_ << 0x10);
+ } else {
+ u32 n = 0xFFFFU & aq_hw_read_reg(self, 0x00002A00U);
+
+ if (n < self->aq_link_status.mbps) {
+ PHAL_ATLANTIC_A0->itr_rx = 0U;
+ } else {
+ static unsigned int hw_timers_tbl_[] = {
+ 0x01CU, /* 10Gbit */
+ 0x039U, /* 5Gbit */
+ 0x039U, /* 5Gbit 5GS */
+ 0x073U, /* 2.5Gbit */
+ 0x120U, /* 1Gbit */
+ 0x1FFU, /* 100Mbit */
+ };
+
+ unsigned int speed_index =
+ hw_atl_utils_mbps_2_speed_index(
+ self->aq_link_status.mbps);
+
+ PHAL_ATLANTIC_A0->itr_rx =
+ 0x80000000U |
+ (hw_timers_tbl_[speed_index] << 0x10U);
+ }
+
+ aq_hw_write_reg(self, 0x00002A00U, 0x40000000U);
+ aq_hw_write_reg(self, 0x00002A00U, 0x8D000000U);
+ }
+ } else {
+ PHAL_ATLANTIC_A0->itr_rx = 0U;
+ }
+
+ for (i = HW_ATL_A0_RINGS_MAX; i--;)
+ reg_irq_thr_set(self, PHAL_ATLANTIC_A0->itr_rx, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_a0_hw_irq_disable(self, HW_ATL_A0_INT_MASK);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_tx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_ring_rx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_a0_hw_set_speed(struct aq_hw_s *self, u32 speed)
+{
+ int err = 0;
+
+ err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static struct aq_hw_ops hw_atl_ops_ = {
+ .create = hw_atl_a0_create,
+ .destroy = hw_atl_a0_destroy,
+ .get_hw_caps = hw_atl_a0_get_hw_caps,
+
+ .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .hw_set_mac_address = hw_atl_a0_hw_mac_addr_set,
+ .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
+ .hw_set_link_speed = hw_atl_a0_hw_set_speed,
+ .hw_init = hw_atl_a0_hw_init,
+ .hw_deinit = hw_atl_utils_hw_deinit,
+ .hw_set_power = hw_atl_utils_hw_set_power,
+ .hw_reset = hw_atl_a0_hw_reset,
+ .hw_start = hw_atl_a0_hw_start,
+ .hw_ring_tx_start = hw_atl_a0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_a0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_a0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_a0_hw_ring_rx_stop,
+ .hw_stop = hw_atl_a0_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_a0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_a0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_a0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_a0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_a0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_a0_hw_irq_disable,
+ .hw_irq_read = hw_atl_a0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl_a0_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl_a0_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl_a0_hw_packet_filter_set,
+ .hw_multicast_list_set = hw_atl_a0_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl_a0_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl_a0_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_a0_hw_rss_hash_set,
+ .hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+};
+
+struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev)
+{
+ bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
+ bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D100) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D107) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D108) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D109));
+
+ bool is_rev_ok = (pdev->revision == 1U);
+
+ return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
new file mode 100644
index 0000000..6e1d527
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0.h
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_A0_H
+#define HW_ATL_A0_H
+
+#include "../aq_common.h"
+
+#ifndef PCI_VENDOR_ID_AQUANTIA
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+#define HW_ATL_DEVICE_ID_0001 0x0001
+#define HW_ATL_DEVICE_ID_D100 0xD100
+#define HW_ATL_DEVICE_ID_D107 0xD107
+#define HW_ATL_DEVICE_ID_D108 0xD108
+#define HW_ATL_DEVICE_ID_D109 0xD109
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+
+#endif
+
+struct aq_hw_ops *hw_atl_a0_get_ops_by_id(struct pci_dev *pdev);
+
+#endif /* HW_ATL_A0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
new file mode 100644
index 0000000..1093ea1
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_a0_internal.h
@@ -0,0 +1,155 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_a0_internal.h: Definition of Atlantic A0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_A0_INTERNAL_H
+#define HW_ATL_A0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_A0_MTU_JUMBO 9014U
+
+#define HW_ATL_A0_TX_RINGS 4U
+#define HW_ATL_A0_RX_RINGS 4U
+
+#define HW_ATL_A0_RINGS_MAX 32U
+#define HW_ATL_A0_TXD_SIZE 16U
+#define HW_ATL_A0_RXD_SIZE 16U
+
+#define HW_ATL_A0_MAC 0U
+#define HW_ATL_A0_MAC_MIN 1U
+#define HW_ATL_A0_MAC_MAX 33U
+
+/* interrupts */
+#define HW_ATL_A0_ERR_INT 8U
+#define HW_ATL_A0_INT_MASK 0xFFFFFFFFU
+
+#define HW_ATL_A0_TXD_CTL2_LEN 0xFFFFC000U
+#define HW_ATL_A0_TXD_CTL2_CTX_EN 0x00002000U
+#define HW_ATL_A0_TXD_CTL2_CTX_IDX 0x00001000U
+
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXD 0x00000001U
+#define HW_ATL_A0_TXD_CTL_DESC_TYPE_TXC 0x00000002U
+#define HW_ATL_A0_TXD_CTL_BLEN 0x000FFFF0U
+#define HW_ATL_A0_TXD_CTL_DD 0x00100000U
+#define HW_ATL_A0_TXD_CTL_EOP 0x00200000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_X 0x3FC00000U
+
+#define HW_ATL_A0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_A0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_A0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_A0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_A0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_A0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_A0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_A0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_A0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_A0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_A0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_A0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_A0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_A0_RATE_10G BIT(0)
+#define HW_ATL_A0_RATE_5G BIT(1)
+#define HW_ATL_A0_RATE_2G5 BIT(3)
+#define HW_ATL_A0_RATE_1G BIT(4)
+#define HW_ATL_A0_RATE_100M BIT(5)
+
+#define HW_ATL_A0_TXBUF_MAX 160U
+#define HW_ATL_A0_RXBUF_MAX 320U
+
+#define HW_ATL_A0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_A0_RSS_REDIRECTION_BITS 3U
+
+#define HW_ATL_A0_TC_MAX 1U
+#define HW_ATL_A0_RSS_MAX 8U
+
+#define HW_ATL_A0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_A0_RXD_DD 0x1U
+#define HW_ATL_A0_RXD_NCEA0 0x1U
+
+#define HW_ATL_A0_RXD_WB_STAT2_EOP 0x0002U
+
+#define HW_ATL_A0_UCP_0X370_REG 0x370U
+
+#define HW_ATL_A0_FW_VER_EXPECTED 0x01050006U
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
+/* HW layer capabilities */
+static struct aq_hw_caps_s hw_atl_a0_hw_caps_ = {
+ .ports = 1U,
+ .is_64_dma = true,
+ .msix_irqs = 4U,
+ .irq_mask = ~0U,
+ .vecs = HW_ATL_A0_RSS_MAX,
+ .tcs = HW_ATL_A0_TC_MAX,
+ .rxd_alignment = 1U,
+ .rxd_size = HW_ATL_A0_RXD_SIZE,
+ .rxds = 248U,
+ .txd_alignment = 1U,
+ .txd_size = HW_ATL_A0_TXD_SIZE,
+ .txds = 8U * 1024U,
+ .txhwb_alignment = 4096U,
+ .tx_rings = HW_ATL_A0_TX_RINGS,
+ .rx_rings = HW_ATL_A0_RX_RINGS,
+ .hw_features = NETIF_F_HW_CSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_TSO,
+ .hw_priv_flags = IFF_UNICAST_FLT,
+ .link_speed_msk = (HW_ATL_A0_RATE_10G |
+ HW_ATL_A0_RATE_5G |
+ HW_ATL_A0_RATE_2G5 |
+ HW_ATL_A0_RATE_1G |
+ HW_ATL_A0_RATE_100M),
+ .flow_control = true,
+ .mtu = HW_ATL_A0_MTU_JUMBO,
+ .mac_regs_count = 88,
+ .fw_ver_expected = HW_ATL_A0_FW_VER_EXPECTED,
+};
+
+#endif /* HW_ATL_A0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
new file mode 100644
index 0000000..e7e694f
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -0,0 +1,958 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0.c: Definition of Atlantic hardware specific functions. */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_ring.h"
+#include "hw_atl_b0.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+#include "hw_atl_b0_internal.h"
+
+static int hw_atl_b0_get_hw_caps(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ memcpy(aq_hw_caps, &hw_atl_b0_hw_caps_, sizeof(*aq_hw_caps));
+ return 0;
+}
+
+static struct aq_hw_s *hw_atl_b0_create(struct aq_pci_func_s *aq_pci_func,
+ unsigned int port,
+ struct aq_hw_ops *ops)
+{
+ struct hw_atl_s *self = NULL;
+
+ self = kzalloc(sizeof(*self), GFP_KERNEL);
+ if (!self)
+ goto err_exit;
+
+ self->base.aq_pci_func = aq_pci_func;
+
+ self->base.not_ff_addr = 0x10U;
+
+err_exit:
+ return (struct aq_hw_s *)self;
+}
+
+static void hw_atl_b0_destroy(struct aq_hw_s *self)
+{
+ kfree(self);
+}
+
+static int hw_atl_b0_hw_reset(struct aq_hw_s *self)
+{
+ int err = 0;
+
+ glb_glb_reg_res_dis_set(self, 1U);
+ pci_pci_reg_res_dis_set(self, 0U);
+ rx_rx_reg_res_dis_set(self, 0U);
+ tx_tx_reg_res_dis_set(self, 0U);
+
+ HW_ATL_FLUSH();
+ glb_soft_res_set(self, 1);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(glb_soft_res_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ itr_irq_reg_res_dis_set(self, 0U);
+ itr_res_irq_set(self, 1U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(itr_res_irq_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+
+ hw_atl_utils_mpi_set(self, MPI_RESET, 0x0U);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_qos_set(struct aq_hw_s *self)
+{
+ u32 tc = 0U;
+ u32 buff_size = 0U;
+ unsigned int i_priority = 0U;
+ bool is_rx_flow_control = false;
+
+ /* TPS Descriptor rate init */
+ tps_tx_pkt_shed_desc_rate_curr_time_res_set(self, 0x0U);
+ tps_tx_pkt_shed_desc_rate_lim_set(self, 0xA);
+
+ /* TPS VM init */
+ tps_tx_pkt_shed_desc_vm_arb_mode_set(self, 0U);
+
+ /* TPS TC credits init */
+ tps_tx_pkt_shed_desc_tc_arb_mode_set(self, 0U);
+ tps_tx_pkt_shed_data_arb_mode_set(self, 0U);
+
+ tps_tx_pkt_shed_tc_data_max_credit_set(self, 0xFFF, 0U);
+ tps_tx_pkt_shed_tc_data_weight_set(self, 0x64, 0U);
+ tps_tx_pkt_shed_desc_tc_max_credit_set(self, 0x50, 0U);
+ tps_tx_pkt_shed_desc_tc_weight_set(self, 0x1E, 0U);
+
+ /* Tx buf size */
+ buff_size = HW_ATL_B0_TXBUF_MAX;
+
+ tpb_tx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ tpb_tx_buff_hi_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 66U) /
+ 100U, tc);
+ tpb_tx_buff_lo_threshold_per_tc_set(self,
+ (buff_size * (1024 / 32U) * 50U) /
+ 100U, tc);
+
+ /* QoS Rx buf size per TC */
+ tc = 0;
+ is_rx_flow_control = (AQ_NIC_FC_RX & self->aq_nic_cfg->flow_control);
+ buff_size = HW_ATL_B0_RXBUF_MAX;
+
+ rpb_rx_pkt_buff_size_per_tc_set(self, buff_size, tc);
+ rpb_rx_buff_hi_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 66U) /
+ 100U, tc);
+ rpb_rx_buff_lo_threshold_per_tc_set(self,
+ (buff_size *
+ (1024U / 32U) * 50U) /
+ 100U, tc);
+ rpb_rx_xoff_en_per_tc_set(self, is_rx_flow_control ? 1U : 0U, tc);
+
+ /* QoS 802.1p priority -> TC mapping */
+ for (i_priority = 8U; i_priority--;)
+ rpf_rpb_user_priority_tc_map_set(self, i_priority, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_rss_hash_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ struct aq_nic_cfg_s *cfg = NULL;
+ int err = 0;
+ unsigned int i = 0U;
+ unsigned int addr = 0U;
+
+ cfg = self->aq_nic_cfg;
+
+ for (i = 10, addr = 0U; i--; ++addr) {
+ u32 key_data = cfg->is_rss ?
+ __swab32(rss_params->hash_secret_key[i]) : 0U;
+ rpf_rss_key_wr_data_set(self, key_data);
+ rpf_rss_key_addr_set(self, addr);
+ rpf_rss_key_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_key_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_rss_set(struct aq_hw_s *self,
+ struct aq_rss_parameters *rss_params)
+{
+ u8 *indirection_table = rss_params->indirection_table;
+ u32 i = 0U;
+ u32 num_rss_queues = max(1U, self->aq_nic_cfg->num_rss_queues);
+ int err = 0;
+ u16 bitary[(HW_ATL_B0_RSS_REDIRECTION_MAX *
+ HW_ATL_B0_RSS_REDIRECTION_BITS / 16U)];
+
+ memset(bitary, 0, sizeof(bitary));
+
+ for (i = HW_ATL_B0_RSS_REDIRECTION_MAX; i--;) {
+ (*(u32 *)(bitary + ((i * 3U) / 16U))) |=
+ ((indirection_table[i] % num_rss_queues) <<
+ ((i * 3U) & 0xFU));
+ }
+
+ for (i = AQ_DIMOF(bitary); i--;) {
+ rpf_rss_redir_tbl_wr_data_set(self, bitary[i]);
+ rpf_rss_redir_tbl_addr_set(self, i);
+ rpf_rss_redir_wr_en_set(self, 1U);
+ AQ_HW_WAIT_FOR(rpf_rss_redir_wr_en_get(self) == 0, 1000U, 10U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_offload_set(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg)
+{
+ int err = 0;
+ unsigned int i;
+
+ /* TX checksums offloads*/
+ tpo_ipv4header_crc_offload_en_set(self, 1);
+ tpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* RX checksums offloads*/
+ rpo_ipv4header_crc_offload_en_set(self, 1);
+ rpo_tcp_udp_crc_offload_en_set(self, 1);
+ if (err < 0)
+ goto err_exit;
+
+ /* LSO offloads*/
+ tdm_large_send_offload_en_set(self, 0xFFFFFFFFU);
+ if (err < 0)
+ goto err_exit;
+
+/* LRO offloads */
+ {
+ unsigned int val = (8U < HW_ATL_B0_LRO_RXD_MAX) ? 0x3U :
+ ((4U < HW_ATL_B0_LRO_RXD_MAX) ? 0x2U :
+ ((2U < HW_ATL_B0_LRO_RXD_MAX) ? 0x1U : 0x0));
+
+ for (i = 0; i < HW_ATL_B0_RINGS_MAX; i++)
+ rpo_lro_max_num_of_descriptors_set(self, val, i);
+
+ rpo_lro_time_base_divider_set(self, 0x61AU);
+ rpo_lro_inactive_interval_set(self, 0);
+ rpo_lro_max_coalescing_interval_set(self, 2);
+
+ rpo_lro_qsessions_lim_set(self, 1U);
+
+ rpo_lro_total_desc_lim_set(self, 2U);
+
+ rpo_lro_patch_optimization_en_set(self, 0U);
+
+ rpo_lro_min_pay_of_first_pkt_set(self, 10U);
+
+ rpo_lro_pkt_lim_set(self, 1U);
+
+ rpo_lro_en_set(self, aq_nic_cfg->is_lro ? 0xFFFFFFFFU : 0U);
+ }
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_init_tx_path(struct aq_hw_s *self)
+{
+ thm_lso_tcp_flag_of_first_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_middle_pkt_set(self, 0x0FF6U);
+ thm_lso_tcp_flag_of_last_pkt_set(self, 0x0F7FU);
+
+ /* Tx interrupts */
+ tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00007040U, IS_CHIP_FEATURE(TPO2) ?
+ 0x00010000U : 0x00000000U);
+ tdm_tx_dca_en_set(self, 0U);
+ tdm_tx_dca_mode_set(self, 0U);
+
+ tpb_tx_path_scp_ins_en_set(self, 1U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_init_rx_path(struct aq_hw_s *self)
+{
+ struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
+ int i;
+
+ /* Rx TC/RSS number config */
+ rpb_rpf_rx_traf_class_mode_set(self, 1U);
+
+ /* Rx flow control */
+ rpb_rx_flow_ctl_mode_set(self, 1U);
+
+ /* RSS Ring selection */
+ reg_rx_flr_rss_control1set(self, cfg->is_rss ?
+ 0xB3333333U : 0x00000000U);
+
+ /* Multicast filters */
+ for (i = HW_ATL_B0_MAC_MAX; i--;) {
+ rpfl2_uc_flr_en_set(self, (i == 0U) ? 1U : 0U, i);
+ rpfl2unicast_flr_act_set(self, 1U, i);
+ }
+
+ reg_rx_flr_mcst_flr_msk_set(self, 0x00000000U);
+ reg_rx_flr_mcst_flr_set(self, 0x00010FFFU, 0U);
+
+ /* Vlan filters */
+ rpf_vlan_outer_etht_set(self, 0x88A8U);
+ rpf_vlan_inner_etht_set(self, 0x8100U);
+
+ if (cfg->vlan_id) {
+ rpf_vlan_flr_act_set(self, 1U, 0U);
+ rpf_vlan_id_flr_set(self, 0U, 0U);
+ rpf_vlan_flr_en_set(self, 0U, 0U);
+
+ rpf_vlan_accept_untagged_packets_set(self, 1U);
+ rpf_vlan_untagged_act_set(self, 1U);
+
+ rpf_vlan_flr_act_set(self, 1U, 1U);
+ rpf_vlan_id_flr_set(self, cfg->vlan_id, 0U);
+ rpf_vlan_flr_en_set(self, 1U, 1U);
+ } else {
+ rpf_vlan_prom_mode_en_set(self, 1);
+ }
+
+ /* Rx Interrupts */
+ rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+
+ /* misc */
+ aq_hw_write_reg(self, 0x00005040U,
+ IS_CHIP_FEATURE(RPF2) ? 0x000F0000U : 0x00000000U);
+
+ rpfl2broadcast_flr_act_set(self, 1U);
+ rpfl2broadcast_count_threshold_set(self, 0xFFFFU & (~0U / 256U));
+
+ rdm_rx_dca_en_set(self, 0U);
+ rdm_rx_dca_mode_set(self, 0U);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_mac_addr_set(struct aq_hw_s *self, u8 *mac_addr)
+{
+ int err = 0;
+ unsigned int h = 0U;
+ unsigned int l = 0U;
+
+ if (!mac_addr) {
+ err = -EINVAL;
+ goto err_exit;
+ }
+ h = (mac_addr[0] << 8) | (mac_addr[1]);
+ l = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
+ (mac_addr[4] << 8) | mac_addr[5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC);
+ rpfl2unicast_dest_addresslsw_set(self, l, HW_ATL_B0_MAC);
+ rpfl2unicast_dest_addressmsw_set(self, h, HW_ATL_B0_MAC);
+ rpfl2_uc_flr_en_set(self, 1U, HW_ATL_B0_MAC);
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_init(struct aq_hw_s *self,
+ struct aq_nic_cfg_s *aq_nic_cfg,
+ u8 *mac_addr)
+{
+ static u32 aq_hw_atl_igcr_table_[4][2] = {
+ { 0x20000000U, 0x20000000U }, /* AQ_IRQ_INVALID */
+ { 0x20000080U, 0x20000080U }, /* AQ_IRQ_LEGACY */
+ { 0x20000021U, 0x20000025U }, /* AQ_IRQ_MSI */
+ { 0x20000022U, 0x20000026U } /* AQ_IRQ_MSIX */
+ };
+
+ int err = 0;
+
+ self->aq_nic_cfg = aq_nic_cfg;
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &PHAL_ATLANTIC_B0->chip_features);
+
+ hw_atl_b0_hw_init_tx_path(self);
+ hw_atl_b0_hw_init_rx_path(self);
+
+ hw_atl_b0_hw_mac_addr_set(self, mac_addr);
+
+ hw_atl_utils_mpi_set(self, MPI_INIT, aq_nic_cfg->link_speed_msk);
+
+ hw_atl_b0_hw_qos_set(self);
+ hw_atl_b0_hw_rss_set(self, &aq_nic_cfg->aq_rss);
+ hw_atl_b0_hw_rss_hash_set(self, &aq_nic_cfg->aq_rss);
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
+
+ /* Interrupts */
+ reg_irq_glb_ctl_set(self,
+ aq_hw_atl_igcr_table_[aq_nic_cfg->irq_type]
+ [(aq_nic_cfg->vecs > 1U) ?
+ 1 : 0]);
+
+ itr_irq_auto_masklsw_set(self, aq_nic_cfg->aq_hw_caps->irq_mask);
+
+ /* Interrupts */
+ reg_gen_irq_map_set(self,
+ ((HW_ATL_B0_ERR_INT << 0x18) | (1U << 0x1F)) |
+ ((HW_ATL_B0_ERR_INT << 0x10) | (1U << 0x17)), 0U);
+
+ hw_atl_b0_hw_offload_set(self, aq_nic_cfg);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_ring_tx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_start(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 1, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_start(struct aq_hw_s *self)
+{
+ tpb_tx_buff_en_set(self, 1);
+ rpb_rx_buff_en_set(self, 1);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_tx_ring_tail_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ reg_tx_dma_desc_tail_ptr_set(self, ring->sw_tail, ring->idx);
+ return 0;
+}
+
+static int hw_atl_b0_hw_ring_tx_xmit(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int frags)
+{
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_txd_s *txd = NULL;
+ unsigned int buff_pa_len = 0U;
+ unsigned int pkt_len = 0U;
+ unsigned int frag_count = 0U;
+ bool is_gso = false;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+ pkt_len = (buff->is_eop && buff->is_sop) ? buff->len : buff->len_pkt;
+
+ for (frag_count = 0; frag_count < frags; frag_count++) {
+ txd = (struct hw_atl_txd_s *)&ring->dx_ring[ring->sw_tail *
+ HW_ATL_B0_TXD_SIZE];
+ txd->ctl = 0;
+ txd->ctl2 = 0;
+ txd->buf_addr = 0;
+
+ buff = &ring->buff_ring[ring->sw_tail];
+
+ if (buff->is_txc) {
+ txd->ctl |= (buff->len_l3 << 31) |
+ (buff->len_l2 << 24) |
+ HW_ATL_B0_TXD_CTL_CMD_TCP |
+ HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC;
+ txd->ctl2 |= (buff->mss << 16) |
+ (buff->len_l4 << 8) |
+ (buff->len_l3 >> 1);
+
+ pkt_len -= (buff->len_l4 +
+ buff->len_l3 +
+ buff->len_l2);
+ is_gso = true;
+ } else {
+ buff_pa_len = buff->len;
+
+ txd->buf_addr = buff->pa;
+ txd->ctl |= (HW_ATL_B0_TXD_CTL_BLEN &
+ ((u32)buff_pa_len << 4));
+ txd->ctl |= HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD;
+ /* PAY_LEN */
+ txd->ctl2 |= HW_ATL_B0_TXD_CTL2_LEN & (pkt_len << 14);
+
+ if (is_gso) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_LSO;
+ txd->ctl2 |= HW_ATL_B0_TXD_CTL2_CTX_EN;
+ }
+
+ /* Tx checksum offloads */
+ if (buff->is_ip_cso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_IPCSO;
+
+ if (buff->is_udp_cso || buff->is_tcp_cso)
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_TUCSO;
+
+ if (unlikely(buff->is_eop)) {
+ txd->ctl |= HW_ATL_B0_TXD_CTL_EOP;
+ txd->ctl |= HW_ATL_B0_TXD_CTL_CMD_WB;
+ }
+ }
+
+ ring->sw_tail = aq_ring_next_dx(ring, ring->sw_tail);
+ }
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, ring);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_addr_lsw = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_addr_msw = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ rdm_rx_desc_en_set(self, false, aq_ring->idx);
+
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+
+ reg_rx_dma_desc_base_addresslswset(self, dma_desc_addr_lsw,
+ aq_ring->idx);
+
+ reg_rx_dma_desc_base_addressmswset(self,
+ dma_desc_addr_msw, aq_ring->idx);
+
+ rdm_rx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ rdm_rx_desc_data_buff_size_set(self,
+ AQ_CFG_RX_FRAME_MAX / 1024U,
+ aq_ring->idx);
+
+ rdm_rx_desc_head_buff_size_set(self, 0U, aq_ring->idx);
+ rdm_rx_desc_head_splitting_set(self, 0U, aq_ring->idx);
+ rpo_rx_desc_vlan_stripping_set(self, 0U, aq_ring->idx);
+
+ /* Rx ring set mode */
+
+ /* Mapping interrupt vector */
+ itr_irq_map_rx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_rx_set(self, true, aq_ring->idx);
+
+ rdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ rdm_rx_desc_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_head_dca_en_set(self, 0U, aq_ring->idx);
+ rdm_rx_pld_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_init(struct aq_hw_s *self,
+ struct aq_ring_s *aq_ring,
+ struct aq_ring_param_s *aq_ring_param)
+{
+ u32 dma_desc_lsw_addr = (u32)aq_ring->dx_ring_pa;
+ u32 dma_desc_msw_addr = (u32)(((u64)aq_ring->dx_ring_pa) >> 32);
+
+ reg_tx_dma_desc_base_addresslswset(self, dma_desc_lsw_addr,
+ aq_ring->idx);
+
+ reg_tx_dma_desc_base_addressmswset(self, dma_desc_msw_addr,
+ aq_ring->idx);
+
+ tdm_tx_desc_len_set(self, aq_ring->size / 8U, aq_ring->idx);
+
+ hw_atl_b0_hw_tx_ring_tail_update(self, aq_ring);
+
+ /* Set Tx threshold */
+ tdm_tx_desc_wr_wb_threshold_set(self, 0U, aq_ring->idx);
+
+ /* Mapping interrupt vector */
+ itr_irq_map_tx_set(self, aq_ring_param->vec_idx, aq_ring->idx);
+ itr_irq_map_en_tx_set(self, true, aq_ring->idx);
+
+ tdm_cpu_id_set(self, aq_ring_param->cpu, aq_ring->idx);
+ tdm_tx_desc_dca_en_set(self, 0U, aq_ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_fill(struct aq_hw_s *self,
+ struct aq_ring_s *ring,
+ unsigned int sw_tail_old)
+{
+ for (; sw_tail_old != ring->sw_tail;
+ sw_tail_old = aq_ring_next_dx(ring, sw_tail_old)) {
+ struct hw_atl_rxd_s *rxd =
+ (struct hw_atl_rxd_s *)&ring->dx_ring[sw_tail_old *
+ HW_ATL_B0_RXD_SIZE];
+
+ struct aq_ring_buff_s *buff = &ring->buff_ring[sw_tail_old];
+
+ rxd->buf_addr = buff->pa;
+ rxd->hdr_addr = 0U;
+ }
+
+ reg_rx_dma_desc_tail_ptr_set(self, sw_tail_old, ring->idx);
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_head_update(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ int err = 0;
+ unsigned int hw_head_ = tdm_tx_desc_head_ptr_get(self, ring->idx);
+
+ if (aq_utils_obj_test(&self->header.flags, AQ_HW_FLAG_ERR_UNPLUG)) {
+ err = -ENXIO;
+ goto err_exit;
+ }
+ ring->hw_head = hw_head_;
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ struct device *ndev = aq_nic_get_dev(ring->aq_nic);
+
+ for (; ring->hw_head != ring->sw_tail;
+ ring->hw_head = aq_ring_next_dx(ring, ring->hw_head)) {
+ struct aq_ring_buff_s *buff = NULL;
+ struct hw_atl_rxd_wb_s *rxd_wb = (struct hw_atl_rxd_wb_s *)
+ &ring->dx_ring[ring->hw_head * HW_ATL_B0_RXD_SIZE];
+
+ unsigned int is_err = 1U;
+ unsigned int is_rx_check_sum_enabled = 0U;
+ unsigned int pkt_type = 0U;
+
+ if (!(rxd_wb->status & 0x1U)) { /* RxD is not done */
+ break;
+ }
+
+ buff = &ring->buff_ring[ring->hw_head];
+
+ is_err = (0x0000003CU & rxd_wb->status);
+
+ is_rx_check_sum_enabled = (rxd_wb->type) & (0x3U << 19);
+ is_err &= ~0x20U; /* exclude validity bit */
+
+ pkt_type = 0xFFU & (rxd_wb->type >> 4);
+
+ if (is_rx_check_sum_enabled) {
+ if (0x0U == (pkt_type & 0x3U))
+ buff->is_ip_cso = (is_err & 0x08U) ? 0U : 1U;
+
+ if (0x4U == (pkt_type & 0x1CU))
+ buff->is_udp_cso = buff->is_cso_err ? 0U : 1U;
+ else if (0x0U == (pkt_type & 0x1CU))
+ buff->is_tcp_cso = buff->is_cso_err ? 0U : 1U;
+ }
+
+ is_err &= ~0x18U;
+
+ dma_unmap_page(ndev, buff->pa, buff->len, DMA_FROM_DEVICE);
+
+ if (is_err || rxd_wb->type & 0x1000U) {
+ /* status error or DMA error */
+ buff->is_error = 1U;
+ } else {
+ if (self->aq_nic_cfg->is_rss) {
+ /* last 4 byte */
+ u16 rss_type = rxd_wb->type & 0xFU;
+
+ if (rss_type && rss_type < 0x8U) {
+ buff->is_hash_l4 = (rss_type == 0x4 ||
+ rss_type == 0x5);
+ buff->rss_hash = rxd_wb->rss_hash;
+ }
+ }
+
+ if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
+ buff->len = (rxd_wb->pkt_len &
+ (AQ_CFG_RX_FRAME_MAX - 1U));
+ buff->len = buff->len ?
+ buff->len : AQ_CFG_RX_FRAME_MAX;
+ buff->next = 0U;
+ buff->is_eop = 1U;
+ } else {
+ if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+ rxd_wb->status) {
+ /* LRO */
+ buff->next = rxd_wb->next_desc_ptr;
+ ++ring->stats.rx.lro_packets;
+ } else {
+ /* jumbo */
+ buff->next =
+ aq_ring_next_dx(ring,
+ ring->hw_head);
+ ++ring->stats.rx.jumbo_packets;
+ }
+ }
+ }
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_enable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_setlsw_set(self, LODWORD(mask));
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_disable(struct aq_hw_s *self, u64 mask)
+{
+ itr_irq_msk_clearlsw_set(self, LODWORD(mask));
+ itr_irq_status_clearlsw_set(self, LODWORD(mask));
+
+ atomic_inc(&PHAL_ATLANTIC_B0->dpc);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_irq_read(struct aq_hw_s *self, u64 *mask)
+{
+ *mask = itr_irq_statuslsw_get(self);
+ return aq_hw_err_from_flags(self);
+}
+
+#define IS_FILTER_ENABLED(_F_) ((packet_filter & (_F_)) ? 1U : 0U)
+
+static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
+ unsigned int packet_filter)
+{
+ unsigned int i = 0U;
+
+ rpfl2promiscuous_mode_en_set(self, IS_FILTER_ENABLED(IFF_PROMISC));
+ rpfl2multicast_flr_en_set(self,
+ IS_FILTER_ENABLED(IFF_MULTICAST), 0);
+
+ rpfl2_accept_all_mc_packets_set(self,
+ IS_FILTER_ENABLED(IFF_ALLMULTI));
+
+ rpfl2broadcast_en_set(self, IS_FILTER_ENABLED(IFF_BROADCAST));
+
+ self->aq_nic_cfg->is_mc_list_enabled = IS_FILTER_ENABLED(IFF_MULTICAST);
+
+ for (i = HW_ATL_B0_MAC_MIN; i < HW_ATL_B0_MAC_MAX; ++i)
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled &&
+ (i <= self->aq_nic_cfg->mc_list_count)) ?
+ 1U : 0U, i);
+
+ return aq_hw_err_from_flags(self);
+}
+
+#undef IS_FILTER_ENABLED
+
+static int hw_atl_b0_hw_multicast_list_set(struct aq_hw_s *self,
+ u8 ar_mac
+ [AQ_CFG_MULTICAST_ADDRESS_MAX]
+ [ETH_ALEN],
+ u32 count)
+{
+ int err = 0;
+
+ if (count > (HW_ATL_B0_MAC_MAX - HW_ATL_B0_MAC_MIN)) {
+ err = -EBADRQC;
+ goto err_exit;
+ }
+ for (self->aq_nic_cfg->mc_list_count = 0U;
+ self->aq_nic_cfg->mc_list_count < count;
+ ++self->aq_nic_cfg->mc_list_count) {
+ u32 i = self->aq_nic_cfg->mc_list_count;
+ u32 h = (ar_mac[i][0] << 8) | (ar_mac[i][1]);
+ u32 l = (ar_mac[i][2] << 24) | (ar_mac[i][3] << 16) |
+ (ar_mac[i][4] << 8) | ar_mac[i][5];
+
+ rpfl2_uc_flr_en_set(self, 0U, HW_ATL_B0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addresslsw_set(self,
+ l, HW_ATL_B0_MAC_MIN + i);
+
+ rpfl2unicast_dest_addressmsw_set(self,
+ h, HW_ATL_B0_MAC_MIN + i);
+
+ rpfl2_uc_flr_en_set(self,
+ (self->aq_nic_cfg->is_mc_list_enabled),
+ HW_ATL_B0_MAC_MIN + i);
+ }
+
+ err = aq_hw_err_from_flags(self);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_b0_hw_interrupt_moderation_set(struct aq_hw_s *self,
+ bool itr_enabled)
+{
+ unsigned int i = 0U;
+
+ if (itr_enabled && self->aq_nic_cfg->itr) {
+ tdm_tx_desc_wr_wb_irq_en_set(self, 0U);
+ tdm_tdm_intr_moder_en_set(self, 1U);
+ rdm_rx_desc_wr_wb_irq_en_set(self, 0U);
+ rdm_rdm_intr_moder_en_set(self, 1U);
+
+ PHAL_ATLANTIC_B0->itr_tx = 2U;
+ PHAL_ATLANTIC_B0->itr_rx = 2U;
+
+ if (self->aq_nic_cfg->itr != 0xFFFFU) {
+ unsigned int max_timer = self->aq_nic_cfg->itr / 2U;
+ unsigned int min_timer = self->aq_nic_cfg->itr / 32U;
+
+ max_timer = min(0x1FFU, max_timer);
+ min_timer = min(0xFFU, min_timer);
+
+ PHAL_ATLANTIC_B0->itr_tx |= min_timer << 0x8U;
+ PHAL_ATLANTIC_B0->itr_tx |= max_timer << 0x10U;
+ PHAL_ATLANTIC_B0->itr_rx |= min_timer << 0x8U;
+ PHAL_ATLANTIC_B0->itr_rx |= max_timer << 0x10U;
+ } else {
+ static unsigned int hw_atl_b0_timers_table_tx_[][2] = {
+ {0xffU, 0xffU}, /* 10Gbit */
+ {0xffU, 0x1ffU}, /* 5Gbit */
+ {0xffU, 0x1ffU}, /* 5Gbit 5GS */
+ {0xffU, 0x1ffU}, /* 2.5Gbit */
+ {0xffU, 0x1ffU}, /* 1Gbit */
+ {0xffU, 0x1ffU}, /* 100Mbit */
+ };
+
+ static unsigned int hw_atl_b0_timers_table_rx_[][2] = {
+ {0x6U, 0x38U},/* 10Gbit */
+ {0xCU, 0x70U},/* 5Gbit */
+ {0xCU, 0x70U},/* 5Gbit 5GS */
+ {0x18U, 0xE0U},/* 2.5Gbit */
+ {0x30U, 0x80U},/* 1Gbit */
+ {0x4U, 0x50U},/* 100Mbit */
+ };
+
+ unsigned int speed_index =
+ hw_atl_utils_mbps_2_speed_index(
+ self->aq_link_status.mbps);
+
+ PHAL_ATLANTIC_B0->itr_tx |=
+ hw_atl_b0_timers_table_tx_[speed_index]
+ [0] << 0x8U; /* set min timer value */
+ PHAL_ATLANTIC_B0->itr_tx |=
+ hw_atl_b0_timers_table_tx_[speed_index]
+ [1] << 0x10U; /* set max timer value */
+
+ PHAL_ATLANTIC_B0->itr_rx |=
+ hw_atl_b0_timers_table_rx_[speed_index]
+ [0] << 0x8U; /* set min timer value */
+ PHAL_ATLANTIC_B0->itr_rx |=
+ hw_atl_b0_timers_table_rx_[speed_index]
+ [1] << 0x10U; /* set max timer value */
+ }
+ } else {
+ tdm_tx_desc_wr_wb_irq_en_set(self, 1U);
+ tdm_tdm_intr_moder_en_set(self, 0U);
+ rdm_rx_desc_wr_wb_irq_en_set(self, 1U);
+ rdm_rdm_intr_moder_en_set(self, 0U);
+ PHAL_ATLANTIC_B0->itr_tx = 0U;
+ PHAL_ATLANTIC_B0->itr_rx = 0U;
+ }
+
+ for (i = HW_ATL_B0_RINGS_MAX; i--;) {
+ reg_tx_intr_moder_ctrl_set(self,
+ PHAL_ATLANTIC_B0->itr_tx, i);
+ reg_rx_intr_moder_ctrl_set(self,
+ PHAL_ATLANTIC_B0->itr_rx, i);
+ }
+
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_stop(struct aq_hw_s *self)
+{
+ hw_atl_b0_hw_irq_disable(self, HW_ATL_B0_INT_MASK);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_tx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ tdm_tx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_ring_rx_stop(struct aq_hw_s *self,
+ struct aq_ring_s *ring)
+{
+ rdm_rx_desc_en_set(self, 0U, ring->idx);
+ return aq_hw_err_from_flags(self);
+}
+
+static int hw_atl_b0_hw_set_speed(struct aq_hw_s *self, u32 speed)
+{
+ int err = 0;
+
+ err = hw_atl_utils_mpi_set_speed(self, speed, MPI_INIT);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+static struct aq_hw_ops hw_atl_ops_ = {
+ .create = hw_atl_b0_create,
+ .destroy = hw_atl_b0_destroy,
+ .get_hw_caps = hw_atl_b0_get_hw_caps,
+
+ .hw_get_mac_permanent = hw_atl_utils_get_mac_permanent,
+ .hw_set_mac_address = hw_atl_b0_hw_mac_addr_set,
+ .hw_get_link_status = hw_atl_utils_mpi_get_link_status,
+ .hw_set_link_speed = hw_atl_b0_hw_set_speed,
+ .hw_init = hw_atl_b0_hw_init,
+ .hw_deinit = hw_atl_utils_hw_deinit,
+ .hw_set_power = hw_atl_utils_hw_set_power,
+ .hw_reset = hw_atl_b0_hw_reset,
+ .hw_start = hw_atl_b0_hw_start,
+ .hw_ring_tx_start = hw_atl_b0_hw_ring_tx_start,
+ .hw_ring_tx_stop = hw_atl_b0_hw_ring_tx_stop,
+ .hw_ring_rx_start = hw_atl_b0_hw_ring_rx_start,
+ .hw_ring_rx_stop = hw_atl_b0_hw_ring_rx_stop,
+ .hw_stop = hw_atl_b0_hw_stop,
+
+ .hw_ring_tx_xmit = hw_atl_b0_hw_ring_tx_xmit,
+ .hw_ring_tx_head_update = hw_atl_b0_hw_ring_tx_head_update,
+
+ .hw_ring_rx_receive = hw_atl_b0_hw_ring_rx_receive,
+ .hw_ring_rx_fill = hw_atl_b0_hw_ring_rx_fill,
+
+ .hw_irq_enable = hw_atl_b0_hw_irq_enable,
+ .hw_irq_disable = hw_atl_b0_hw_irq_disable,
+ .hw_irq_read = hw_atl_b0_hw_irq_read,
+
+ .hw_ring_rx_init = hw_atl_b0_hw_ring_rx_init,
+ .hw_ring_tx_init = hw_atl_b0_hw_ring_tx_init,
+ .hw_packet_filter_set = hw_atl_b0_hw_packet_filter_set,
+ .hw_multicast_list_set = hw_atl_b0_hw_multicast_list_set,
+ .hw_interrupt_moderation_set = hw_atl_b0_hw_interrupt_moderation_set,
+ .hw_rss_set = hw_atl_b0_hw_rss_set,
+ .hw_rss_hash_set = hw_atl_b0_hw_rss_hash_set,
+ .hw_get_regs = hw_atl_utils_hw_get_regs,
+ .hw_get_hw_stats = hw_atl_utils_get_hw_stats,
+ .hw_get_fw_version = hw_atl_utils_get_fw_version,
+};
+
+struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev)
+{
+ bool is_vid_ok = (pdev->vendor == PCI_VENDOR_ID_AQUANTIA);
+ bool is_did_ok = ((pdev->device == HW_ATL_DEVICE_ID_0001) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D100) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D107) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D108) ||
+ (pdev->device == HW_ATL_DEVICE_ID_D109));
+
+ bool is_rev_ok = (pdev->revision == 2U);
+
+ return (is_vid_ok && is_did_ok && is_rev_ok) ? &hw_atl_ops_ : NULL;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
new file mode 100644
index 0000000..a1e1bce6
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.h
@@ -0,0 +1,34 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0.h: Declaration of abstract interface for Atlantic hardware
+ * specific functions.
+ */
+
+#ifndef HW_ATL_B0_H
+#define HW_ATL_B0_H
+
+#include "../aq_common.h"
+
+#ifndef PCI_VENDOR_ID_AQUANTIA
+
+#define PCI_VENDOR_ID_AQUANTIA 0x1D6A
+#define HW_ATL_DEVICE_ID_0001 0x0001
+#define HW_ATL_DEVICE_ID_D100 0xD100
+#define HW_ATL_DEVICE_ID_D107 0xD107
+#define HW_ATL_DEVICE_ID_D108 0xD108
+#define HW_ATL_DEVICE_ID_D109 0xD109
+
+#define HW_ATL_NIC_NAME "aQuantia AQtion 5Gbit Network Adapter"
+
+#endif
+
+struct aq_hw_ops *hw_atl_b0_get_ops_by_id(struct pci_dev *pdev);
+
+#endif /* HW_ATL_B0_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
new file mode 100644
index 0000000..8bdee3d
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0_internal.h
@@ -0,0 +1,207 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_b0_internal.h: Definition of Atlantic B0 chip specific
+ * constants.
+ */
+
+#ifndef HW_ATL_B0_INTERNAL_H
+#define HW_ATL_B0_INTERNAL_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_B0_MTU_JUMBO (16000U)
+#define HW_ATL_B0_MTU 1514U
+
+#define HW_ATL_B0_TX_RINGS 4U
+#define HW_ATL_B0_RX_RINGS 4U
+
+#define HW_ATL_B0_RINGS_MAX 32U
+#define HW_ATL_B0_TXD_SIZE (16U)
+#define HW_ATL_B0_RXD_SIZE (16U)
+
+#define HW_ATL_B0_MAC 0U
+#define HW_ATL_B0_MAC_MIN 1U
+#define HW_ATL_B0_MAC_MAX 33U
+
+/* UCAST/MCAST filters */
+#define HW_ATL_B0_UCAST_FILTERS_MAX 38
+#define HW_ATL_B0_MCAST_FILTERS_MAX 8
+
+/* interrupts */
+#define HW_ATL_B0_ERR_INT 8U
+#define HW_ATL_B0_INT_MASK (0xFFFFFFFFU)
+
+#define HW_ATL_B0_TXD_CTL2_LEN (0xFFFFC000)
+#define HW_ATL_B0_TXD_CTL2_CTX_EN (0x00002000)
+#define HW_ATL_B0_TXD_CTL2_CTX_IDX (0x00001000)
+
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXD (0x00000001)
+#define HW_ATL_B0_TXD_CTL_DESC_TYPE_TXC (0x00000002)
+#define HW_ATL_B0_TXD_CTL_BLEN (0x000FFFF0)
+#define HW_ATL_B0_TXD_CTL_DD (0x00100000)
+#define HW_ATL_B0_TXD_CTL_EOP (0x00200000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_X (0x3FC00000)
+
+#define HW_ATL_B0_TXD_CTL_CMD_VLAN BIT(22)
+#define HW_ATL_B0_TXD_CTL_CMD_FCS BIT(23)
+#define HW_ATL_B0_TXD_CTL_CMD_IPCSO BIT(24)
+#define HW_ATL_B0_TXD_CTL_CMD_TUCSO BIT(25)
+#define HW_ATL_B0_TXD_CTL_CMD_LSO BIT(26)
+#define HW_ATL_B0_TXD_CTL_CMD_WB BIT(27)
+#define HW_ATL_B0_TXD_CTL_CMD_VXLAN BIT(28)
+
+#define HW_ATL_B0_TXD_CTL_CMD_IPV6 BIT(21)
+#define HW_ATL_B0_TXD_CTL_CMD_TCP BIT(22)
+
+#define HW_ATL_B0_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_B0_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_B0_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_B0_MPI_SPEED_SHIFT 16U
+
+#define HW_ATL_B0_RATE_10G BIT(0)
+#define HW_ATL_B0_RATE_5G BIT(1)
+#define HW_ATL_B0_RATE_2G5 BIT(3)
+#define HW_ATL_B0_RATE_1G BIT(4)
+#define HW_ATL_B0_RATE_100M BIT(5)
+
+#define HW_ATL_B0_TXBUF_MAX 160U
+#define HW_ATL_B0_RXBUF_MAX 320U
+
+#define HW_ATL_B0_RSS_REDIRECTION_MAX 64U
+#define HW_ATL_B0_RSS_REDIRECTION_BITS 3U
+#define HW_ATL_B0_RSS_HASHKEY_BITS 320U
+
+#define HW_ATL_B0_TCRSS_4_8 1
+#define HW_ATL_B0_TC_MAX 1U
+#define HW_ATL_B0_RSS_MAX 8U
+
+#define HW_ATL_B0_LRO_RXD_MAX 2U
+#define HW_ATL_B0_RS_SLIP_ENABLED 0U
+
+/* (256k -1(max pay_len) - 54(header)) */
+#define HAL_ATL_B0_LSO_MAX_SEGMENT_SIZE 262089U
+
+/* (256k -1(max pay_len) - 74(header)) */
+#define HAL_ATL_B0_LSO_IPV6_MAX_SEGMENT_SIZE 262069U
+
+#define HW_ATL_B0_CHIP_REVISION_B0 0xA0U
+#define HW_ATL_B0_CHIP_REVISION_UNKNOWN 0xFFU
+
+#define HW_ATL_B0_FW_SEMA_RAM 0x2U
+
+#define HW_ATL_B0_TXC_LEN_TUNLEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN_OUTLEN (0xFFFF0000)
+
+#define HW_ATL_B0_TXC_CTL_DESC_TYPE (0x00000007)
+#define HW_ATL_B0_TXC_CTL_CTX_ID (0x00000008)
+#define HW_ATL_B0_TXC_CTL_VLAN (0x000FFFF0)
+#define HW_ATL_B0_TXC_CTL_CMD (0x00F00000)
+#define HW_ATL_B0_TXC_CTL_L2LEN (0x7F000000)
+
+#define HW_ATL_B0_TXC_CTL_L3LEN (0x80000000) /* L3LEN lsb */
+#define HW_ATL_B0_TXC_LEN2_L3LEN (0x000000FF) /* L3LE upper bits */
+#define HW_ATL_B0_TXC_LEN2_L4LEN (0x0000FF00)
+#define HW_ATL_B0_TXC_LEN2_MSSLEN (0xFFFF0000)
+
+#define HW_ATL_B0_RXD_DD (0x1)
+#define HW_ATL_B0_RXD_NCEA0 (0x1)
+
+#define HW_ATL_B0_RXD_WB_STAT_RSSTYPE (0x0000000F)
+#define HW_ATL_B0_RXD_WB_STAT_PKTTYPE (0x00000FF0)
+#define HW_ATL_B0_RXD_WB_STAT_RXCTRL (0x00180000)
+#define HW_ATL_B0_RXD_WB_STAT_SPLHDR (0x00200000)
+#define HW_ATL_B0_RXD_WB_STAT_HDRLEN (0xFFC00000)
+
+#define HW_ATL_B0_RXD_WB_STAT2_DD (0x0001)
+#define HW_ATL_B0_RXD_WB_STAT2_EOP (0x0002)
+#define HW_ATL_B0_RXD_WB_STAT2_RXSTAT (0x003C)
+#define HW_ATL_B0_RXD_WB_STAT2_MACERR (0x0004)
+#define HW_ATL_B0_RXD_WB_STAT2_IP4ERR (0x0008)
+#define HW_ATL_B0_RXD_WB_STAT2_TCPUPDERR (0x0010)
+#define HW_ATL_B0_RXD_WB_STAT2_RXESTAT (0x0FC0)
+#define HW_ATL_B0_RXD_WB_STAT2_RSCCNT (0xF000)
+
+#define L2_FILTER_ACTION_DISCARD (0x0)
+#define L2_FILTER_ACTION_HOST (0x1)
+
+#define HW_ATL_B0_UCP_0X370_REG (0x370)
+
+#define HW_ATL_B0_FLUSH() AQ_HW_READ_REG(self, 0x10)
+
+#define HW_ATL_B0_FW_VER_EXPECTED 0x01050006U
+
+/* Hardware tx descriptor */
+struct __packed hw_atl_txd_s {
+ u64 buf_addr;
+ u32 ctl;
+ u32 ctl2; /* 63..46 - payload length, 45 - ctx enable, 44 - ctx index */
+};
+
+/* Hardware tx context descriptor */
+struct __packed hw_atl_txc_s {
+ u32 rsvd;
+ u32 len;
+ u32 ctl;
+ u32 len2;
+};
+
+/* Hardware rx descriptor */
+struct __packed hw_atl_rxd_s {
+ u64 buf_addr;
+ u64 hdr_addr;
+};
+
+/* Hardware rx descriptor writeback */
+struct __packed hw_atl_rxd_wb_s {
+ u32 type;
+ u32 rss_hash;
+ u16 status;
+ u16 pkt_len;
+ u16 next_desc_ptr;
+ u16 vlan;
+};
+
+/* HW layer capabilities */
+static struct aq_hw_caps_s hw_atl_b0_hw_caps_ = {
+ .ports = 1U,
+ .is_64_dma = true,
+ .msix_irqs = 4U,
+ .irq_mask = ~0U,
+ .vecs = HW_ATL_B0_RSS_MAX,
+ .tcs = HW_ATL_B0_TC_MAX,
+ .rxd_alignment = 1U,
+ .rxd_size = HW_ATL_B0_RXD_SIZE,
+ .rxds = 8U * 1024U,
+ .txd_alignment = 1U,
+ .txd_size = HW_ATL_B0_TXD_SIZE,
+ .txds = 8U * 1024U,
+ .txhwb_alignment = 4096U,
+ .tx_rings = HW_ATL_B0_TX_RINGS,
+ .rx_rings = HW_ATL_B0_RX_RINGS,
+ .hw_features = NETIF_F_HW_CSUM |
+ NETIF_F_RXHASH |
+ NETIF_F_SG |
+ NETIF_F_TSO |
+ NETIF_F_LRO,
+ .hw_priv_flags = IFF_UNICAST_FLT,
+ .link_speed_msk = (HW_ATL_B0_RATE_10G |
+ HW_ATL_B0_RATE_5G |
+ HW_ATL_B0_RATE_2G5 |
+ HW_ATL_B0_RATE_1G |
+ HW_ATL_B0_RATE_100M),
+ .flow_control = true,
+ .mtu = HW_ATL_B0_MTU_JUMBO,
+ .mac_regs_count = 88,
+ .fw_ver_expected = HW_ATL_B0_FW_VER_EXPECTED,
+};
+
+#endif /* HW_ATL_B0_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
new file mode 100644
index 0000000..3de651a
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.c
@@ -0,0 +1,1394 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh.c: Definitions of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#include "hw_atl_llh.h"
+#include "hw_atl_llh_internal.h"
+#include "../aq_hw_utils.h"
+
+/* global */
+void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem, u32 semaphore)
+{
+ aq_hw_write_reg(aq_hw, glb_cpu_sem_adr(semaphore), glb_cpu_sem);
+}
+
+u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore)
+{
+ return aq_hw_read_reg(aq_hw, glb_cpu_sem_adr(semaphore));
+}
+
+void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, glb_reg_res_dis_adr,
+ glb_reg_res_dis_msk,
+ glb_reg_res_dis_shift,
+ glb_reg_res_dis);
+}
+
+void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res)
+{
+ aq_hw_write_reg_bit(aq_hw, glb_soft_res_adr, glb_soft_res_msk,
+ glb_soft_res_shift, soft_res);
+}
+
+u32 glb_soft_res_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, glb_soft_res_adr,
+ glb_soft_res_msk,
+ glb_soft_res_shift);
+}
+
+u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, rx_dma_stat_counter7_adr);
+}
+
+u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, glb_mif_id_adr);
+}
+
+/* stats */
+u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, rpb_rx_dma_drop_pkt_cnt_adr);
+}
+
+u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_counterlsw__adr);
+}
+
+u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_counterlsw__adr);
+}
+
+u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_counterlsw__adr);
+}
+
+u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_counterlsw__adr);
+}
+
+u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_octet_countermsw__adr);
+}
+
+u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_rx_dma_good_pkt_countermsw__adr);
+}
+
+u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_octet_countermsw__adr);
+}
+
+u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, stats_tx_dma_good_pkt_countermsw__adr);
+}
+
+/* interrupt */
+void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw)
+{
+ aq_hw_write_reg(aq_hw, itr_iamrlsw_adr, irq_auto_masklsw);
+}
+
+void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_msk[32] = {
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U,
+ 0x00008000U, 0x00000080U, 0x00008000U, 0x00000080U
+ };
+
+/* lower bit position of bitfield imr_rx{r}_en */
+ static u32 itr_imr_rxren_shift[32] = {
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U,
+ 15U, 7U, 15U, 7U, 15U, 7U, 15U, 7U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxren_adr[rx],
+ itr_imr_rxren_msk[rx],
+ itr_imr_rxren_shift[rx],
+ irq_map_en_rx);
+}
+
+void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_msk[32] = {
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U,
+ 0x80000000U, 0x00800000U, 0x80000000U, 0x00800000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}_en */
+ static u32 itr_imr_txten_shift[32] = {
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U,
+ 31U, 23U, 31U, 23U, 31U, 23U, 31U, 23U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txten_adr[tx],
+ itr_imr_txten_msk[tx],
+ itr_imr_txten_shift[tx],
+ irq_map_en_tx);
+}
+
+void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx)
+{
+/* register address for bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_msk[32] = {
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU,
+ 0x00001f00U, 0x0000001fU, 0x00001f00U, 0x0000001fU
+ };
+
+/* lower bit position of bitfield imr_rx{r}[4:0] */
+ static u32 itr_imr_rxr_shift[32] = {
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U,
+ 8U, 0U, 8U, 0U, 8U, 0U, 8U, 0U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_rxr_adr[rx],
+ itr_imr_rxr_msk[rx],
+ itr_imr_rxr_shift[rx],
+ irq_map_rx);
+}
+
+void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx)
+{
+/* register address for bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_adr[32] = {
+ 0x00002100U, 0x00002100U, 0x00002104U, 0x00002104U,
+ 0x00002108U, 0x00002108U, 0x0000210cU, 0x0000210cU,
+ 0x00002110U, 0x00002110U, 0x00002114U, 0x00002114U,
+ 0x00002118U, 0x00002118U, 0x0000211cU, 0x0000211cU,
+ 0x00002120U, 0x00002120U, 0x00002124U, 0x00002124U,
+ 0x00002128U, 0x00002128U, 0x0000212cU, 0x0000212cU,
+ 0x00002130U, 0x00002130U, 0x00002134U, 0x00002134U,
+ 0x00002138U, 0x00002138U, 0x0000213cU, 0x0000213cU
+ };
+
+/* bitmask for bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_msk[32] = {
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U,
+ 0x1f000000U, 0x001f0000U, 0x1f000000U, 0x001f0000U
+ };
+
+/* lower bit position of bitfield imr_tx{t}[4:0] */
+ static u32 itr_imr_txt_shift[32] = {
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U,
+ 24U, 16U, 24U, 16U, 24U, 16U, 24U, 16U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, itr_imr_txt_adr[tx],
+ itr_imr_txt_msk[tx],
+ itr_imr_txt_shift[tx],
+ irq_map_tx);
+}
+
+void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, itr_imcrlsw_adr, irq_msk_clearlsw);
+}
+
+void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw)
+{
+ aq_hw_write_reg(aq_hw, itr_imsrlsw_adr, irq_msk_setlsw);
+}
+
+void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, itr_reg_res_dsbl_adr,
+ itr_reg_res_dsbl_msk,
+ itr_reg_res_dsbl_shift, irq_reg_res_dis);
+}
+
+void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw)
+{
+ aq_hw_write_reg(aq_hw, itr_iscrlsw_adr, irq_status_clearlsw);
+}
+
+u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, itr_isrlsw_adr);
+}
+
+u32 itr_res_irq_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
+ itr_res_shift);
+}
+
+void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq)
+{
+ aq_hw_write_reg_bit(aq_hw, itr_res_adr, itr_res_msk,
+ itr_res_shift, res_irq);
+}
+
+/* rdm */
+void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcadcpuid_adr(dca),
+ rdm_dcadcpuid_msk,
+ rdm_dcadcpuid_shift, cpuid);
+}
+
+void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dca_en_adr, rdm_dca_en_msk,
+ rdm_dca_en_shift, rx_dca_en);
+}
+
+void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dca_mode_adr, rdm_dca_mode_msk,
+ rdm_dca_mode_shift, rx_dca_mode);
+}
+
+void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descddata_size_adr(descriptor),
+ rdm_descddata_size_msk,
+ rdm_descddata_size_shift,
+ rx_desc_data_buff_size);
+}
+
+void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcaddesc_en_adr(dca),
+ rdm_dcaddesc_en_msk,
+ rdm_dcaddesc_en_shift,
+ rx_desc_dca_en);
+}
+
+void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descden_adr(descriptor),
+ rdm_descden_msk,
+ rdm_descden_shift,
+ rx_desc_en);
+}
+
+void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_size_adr(descriptor),
+ rdm_descdhdr_size_msk,
+ rdm_descdhdr_size_shift,
+ rx_desc_head_buff_size);
+}
+
+void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdhdr_split_adr(descriptor),
+ rdm_descdhdr_split_msk,
+ rdm_descdhdr_split_shift,
+ rx_desc_head_splitting);
+}
+
+u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, rdm_descdhd_adr(descriptor),
+ rdm_descdhd_msk, rdm_descdhd_shift);
+}
+
+void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdlen_adr(descriptor),
+ rdm_descdlen_msk, rdm_descdlen_shift,
+ rx_desc_len);
+}
+
+void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_descdreset_adr(descriptor),
+ rdm_descdreset_msk, rdm_descdreset_shift,
+ rx_desc_res);
+}
+
+void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_int_desc_wrb_en_adr,
+ rdm_int_desc_wrb_en_msk,
+ rdm_int_desc_wrb_en_shift,
+ rx_desc_wr_wb_irq_en);
+}
+
+void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcadhdr_en_adr(dca),
+ rdm_dcadhdr_en_msk,
+ rdm_dcadhdr_en_shift,
+ rx_head_dca_en);
+}
+
+void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_dcadpay_en_adr(dca),
+ rdm_dcadpay_en_msk, rdm_dcadpay_en_shift,
+ rx_pld_dca_en);
+}
+
+void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rdm_int_rim_en_adr,
+ rdm_int_rim_en_msk,
+ rdm_int_rim_en_shift,
+ rdm_intr_moder_en);
+}
+
+/* reg */
+void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx)
+{
+ aq_hw_write_reg(aq_hw, gen_intr_map_adr(regidx), gen_intr_map);
+}
+
+u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, gen_intr_stat_adr);
+}
+
+void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl)
+{
+ aq_hw_write_reg(aq_hw, intr_glb_ctl_adr, intr_glb_ctl);
+}
+
+void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle)
+{
+ aq_hw_write_reg(aq_hw, intr_thr_adr(throttle), intr_thr);
+}
+
+void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrlsw_adr(descriptor),
+ rx_dma_desc_base_addrlsw);
+}
+
+void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, rx_dma_desc_base_addrmsw_adr(descriptor),
+ rx_dma_desc_base_addrmsw);
+}
+
+u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg(aq_hw, rx_dma_desc_stat_adr(descriptor));
+}
+
+void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr, u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, rx_dma_desc_tail_ptr_adr(descriptor),
+ rx_dma_desc_tail_ptr);
+}
+
+void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr_msk)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_msk_adr, rx_flr_mcst_flr_msk);
+}
+
+void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_mcst_flr_adr(filter), rx_flr_mcst_flr);
+}
+
+void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw, u32 rx_flr_rss_control1)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_rss_control1_adr, rx_flr_rss_control1);
+}
+
+void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_filter_control2)
+{
+ aq_hw_write_reg(aq_hw, rx_flr_control2_adr, rx_filter_control2);
+}
+
+void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, rx_intr_moderation_ctl_adr(queue),
+ rx_intr_moderation_ctl);
+}
+
+void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_debug_ctl_adr, tx_dma_debug_ctl);
+}
+
+void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrlsw_adr(descriptor),
+ tx_dma_desc_base_addrlsw);
+}
+
+void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_desc_base_addrmsw_adr(descriptor),
+ tx_dma_desc_base_addrmsw);
+}
+
+void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr, u32 descriptor)
+{
+ aq_hw_write_reg(aq_hw, tx_dma_desc_tail_ptr_adr(descriptor),
+ tx_dma_desc_tail_ptr);
+}
+
+void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue)
+{
+ aq_hw_write_reg(aq_hw, tx_intr_moderation_ctl_adr(queue),
+ tx_intr_moderation_ctl);
+}
+
+/* RPB: rx packet buffer */
+void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_dma_sys_lbk_adr,
+ rpb_dma_sys_lbk_msk,
+ rpb_dma_sys_lbk_shift, dma_sys_lbk);
+}
+
+void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rpf_rx_tc_mode_adr,
+ rpb_rpf_rx_tc_mode_msk,
+ rpb_rpf_rx_tc_mode_shift,
+ rx_traf_class_mode);
+}
+
+void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rx_buf_en_adr, rpb_rx_buf_en_msk,
+ rpb_rx_buf_en_shift, rx_buff_en);
+}
+
+void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxbhi_thresh_adr(buffer),
+ rpb_rxbhi_thresh_msk, rpb_rxbhi_thresh_shift,
+ rx_buff_hi_threshold_per_tc);
+}
+
+void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxblo_thresh_adr(buffer),
+ rpb_rxblo_thresh_msk,
+ rpb_rxblo_thresh_shift,
+ rx_buff_lo_threshold_per_tc);
+}
+
+void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rx_fc_mode_adr,
+ rpb_rx_fc_mode_msk,
+ rpb_rx_fc_mode_shift, rx_flow_ctl_mode);
+}
+
+void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxbbuf_size_adr(buffer),
+ rpb_rxbbuf_size_msk, rpb_rxbbuf_size_shift,
+ rx_pkt_buff_size_per_tc);
+}
+
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, rpb_rxbxoff_en_adr(buffer),
+ rpb_rxbxoff_en_msk, rpb_rxbxoff_en_shift,
+ rx_xoff_en_per_tc);
+}
+
+/* rpf */
+
+void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2bc_thresh_adr,
+ rpfl2bc_thresh_msk,
+ rpfl2bc_thresh_shift,
+ l2broadcast_count_threshold);
+}
+
+void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2bc_en_adr, rpfl2bc_en_msk,
+ rpfl2bc_en_shift, l2broadcast_en);
+}
+
+void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2broadcast_flr_act)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2bc_act_adr, rpfl2bc_act_msk,
+ rpfl2bc_act_shift, l2broadcast_flr_act);
+}
+
+void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2mc_enf_adr(filter),
+ rpfl2mc_enf_msk,
+ rpfl2mc_enf_shift, l2multicast_flr_en);
+}
+
+void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2promis_mode_adr,
+ rpfl2promis_mode_msk,
+ rpfl2promis_mode_shift,
+ l2promiscuous_mode_en);
+}
+
+void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2uc_actf_adr(filter),
+ rpfl2uc_actf_msk, rpfl2uc_actf_shift,
+ l2unicast_flr_act);
+}
+
+void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2uc_enf_adr(filter),
+ rpfl2uc_enf_msk,
+ rpfl2uc_enf_shift, l2unicast_flr_en);
+}
+
+void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter)
+{
+ aq_hw_write_reg(aq_hw, rpfl2uc_daflsw_adr(filter),
+ l2unicast_dest_addresslsw);
+}
+
+void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2uc_dafmsw_adr(filter),
+ rpfl2uc_dafmsw_msk, rpfl2uc_dafmsw_shift,
+ l2unicast_dest_addressmsw);
+}
+
+void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, rpfl2mc_accept_all_adr,
+ rpfl2mc_accept_all_msk,
+ rpfl2mc_accept_all_shift,
+ l2_accept_all_mc_packets);
+}
+
+void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc)
+{
+/* register address for bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_adr[8] = {
+ 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U,
+ 0x000054c4U, 0x000054c4U, 0x000054c4U, 0x000054c4U
+ };
+
+/* bitmask for bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_msk[8] = {
+ 0x00000007U, 0x00000070U, 0x00000700U, 0x00007000U,
+ 0x00070000U, 0x00700000U, 0x07000000U, 0x70000000U
+ };
+
+/* lower bit position of bitfield rx_tc_up{t}[2:0] */
+ static u32 rpf_rpb_rx_tc_upt_shft[8] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpf_rpb_rx_tc_upt_adr[tc],
+ rpf_rpb_rx_tc_upt_msk[tc],
+ rpf_rpb_rx_tc_upt_shft[tc],
+ user_priority_tc_map);
+}
+
+void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_key_addr_adr,
+ rpf_rss_key_addr_msk,
+ rpf_rss_key_addr_shift,
+ rss_key_addr);
+}
+
+void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data)
+{
+ aq_hw_write_reg(aq_hw, rpf_rss_key_wr_data_adr,
+ rss_key_wr_data);
+}
+
+u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
+ rpf_rss_key_wr_eni_msk,
+ rpf_rss_key_wr_eni_shift);
+}
+
+void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_key_wr_eni_adr,
+ rpf_rss_key_wr_eni_msk,
+ rpf_rss_key_wr_eni_shift,
+ rss_key_wr_en);
+}
+
+void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw, u32 rss_redir_tbl_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_addr_adr,
+ rpf_rss_redir_addr_msk,
+ rpf_rss_redir_addr_shift, rss_redir_tbl_addr);
+}
+
+void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_data_adr,
+ rpf_rss_redir_wr_data_msk,
+ rpf_rss_redir_wr_data_shift,
+ rss_redir_tbl_wr_data);
+}
+
+u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
+ rpf_rss_redir_wr_eni_msk,
+ rpf_rss_redir_wr_eni_shift);
+}
+
+void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_rss_redir_wr_eni_adr,
+ rpf_rss_redir_wr_eni_msk,
+ rpf_rss_redir_wr_eni_shift, rss_redir_wr_en);
+}
+
+void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw, u32 tpo_to_rpf_sys_lbk)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_tpo_rpf_sys_lbk_adr,
+ rpf_tpo_rpf_sys_lbk_msk,
+ rpf_tpo_rpf_sys_lbk_shift,
+ tpo_to_rpf_sys_lbk);
+}
+
+void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_inner_tpid_adr,
+ rpf_vl_inner_tpid_msk,
+ rpf_vl_inner_tpid_shift,
+ vlan_inner_etht);
+}
+
+void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_outer_tpid_adr,
+ rpf_vl_outer_tpid_msk,
+ rpf_vl_outer_tpid_shift,
+ vlan_outer_etht);
+}
+
+void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_promis_mode_adr,
+ rpf_vl_promis_mode_msk,
+ rpf_vl_promis_mode_shift,
+ vlan_prom_mode_en);
+}
+
+void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_accept_untagged_packets)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_accept_untagged_mode_adr,
+ rpf_vl_accept_untagged_mode_msk,
+ rpf_vl_accept_untagged_mode_shift,
+ vlan_accept_untagged_packets);
+}
+
+void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_untagged_act_adr,
+ rpf_vl_untagged_act_msk,
+ rpf_vl_untagged_act_shift,
+ vlan_untagged_act);
+}
+
+void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_en_f_adr(filter),
+ rpf_vl_en_f_msk,
+ rpf_vl_en_f_shift,
+ vlan_flr_en);
+}
+
+void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_flr_act, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_act_f_adr(filter),
+ rpf_vl_act_f_msk,
+ rpf_vl_act_f_shift,
+ vlan_flr_act);
+}
+
+void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_vl_id_f_adr(filter),
+ rpf_vl_id_f_msk,
+ rpf_vl_id_f_shift,
+ vlan_id_flr);
+}
+
+void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_enf_adr(filter),
+ rpf_et_enf_msk,
+ rpf_et_enf_shift, etht_flr_en);
+}
+
+void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_upfen_adr(filter),
+ rpf_et_upfen_msk, rpf_et_upfen_shift,
+ etht_user_priority_en);
+}
+
+void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_rxqfen_adr(filter),
+ rpf_et_rxqfen_msk, rpf_et_rxqfen_shift,
+ etht_rx_queue_en);
+}
+
+void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_upf_adr(filter),
+ rpf_et_upf_msk,
+ rpf_et_upf_shift, etht_user_priority);
+}
+
+void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_rxqf_adr(filter),
+ rpf_et_rxqf_msk,
+ rpf_et_rxqf_shift, etht_rx_queue);
+}
+
+void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_mng_rxqf_adr(filter),
+ rpf_et_mng_rxqf_msk, rpf_et_mng_rxqf_shift,
+ etht_mgt_queue);
+}
+
+void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_actf_adr(filter),
+ rpf_et_actf_msk,
+ rpf_et_actf_shift, etht_flr_act);
+}
+
+void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter)
+{
+ aq_hw_write_reg_bit(aq_hw, rpf_et_valf_adr(filter),
+ rpf_et_valf_msk,
+ rpf_et_valf_shift, etht_flr);
+}
+
+/* RPO: rx packet offload */
+void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_ipv4chk_en_adr,
+ rpo_ipv4chk_en_msk,
+ rpo_ipv4chk_en_shift,
+ ipv4header_crc_offload_en);
+}
+
+void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_descdvl_strip_adr(descriptor),
+ rpo_descdvl_strip_msk,
+ rpo_descdvl_strip_shift,
+ rx_desc_vlan_stripping);
+}
+
+void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpol4chk_en_adr, rpol4chk_en_msk,
+ rpol4chk_en_shift, tcp_udp_crc_offload_en);
+}
+
+void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en)
+{
+ aq_hw_write_reg(aq_hw, rpo_lro_en_adr, lro_en);
+}
+
+void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ptopt_en_adr,
+ rpo_lro_ptopt_en_msk,
+ rpo_lro_ptopt_en_shift,
+ lro_patch_optimization_en);
+}
+
+void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw,
+ u32 lro_qsessions_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_qses_lmt_adr,
+ rpo_lro_qses_lmt_msk,
+ rpo_lro_qses_lmt_shift,
+ lro_qsessions_lim);
+}
+
+void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_tot_dsc_lmt_adr,
+ rpo_lro_tot_dsc_lmt_msk,
+ rpo_lro_tot_dsc_lmt_shift,
+ lro_total_desc_lim);
+}
+
+void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_pkt_min_adr,
+ rpo_lro_pkt_min_msk,
+ rpo_lro_pkt_min_shift,
+ lro_min_pld_of_first_pkt);
+}
+
+void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_pkt_lim)
+{
+ aq_hw_write_reg(aq_hw, rpo_lro_rsc_max_adr, lro_pkt_lim);
+}
+
+void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_number_of_descriptors,
+ u32 lro)
+{
+/* Register address for bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_adr[32] = {
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A0U, 0x000055A0U, 0x000055A0U, 0x000055A0U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A4U, 0x000055A4U, 0x000055A4U, 0x000055A4U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055A8U, 0x000055A8U, 0x000055A8U, 0x000055A8U,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU,
+ 0x000055ACU, 0x000055ACU, 0x000055ACU, 0x000055ACU
+ };
+
+/* Bitmask for bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_msk[32] = {
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U,
+ 0x00000003U, 0x00000030U, 0x00000300U, 0x00003000U,
+ 0x00030000U, 0x00300000U, 0x03000000U, 0x30000000U
+ };
+
+/* Lower bit position of bitfield lro{L}_des_max[1:0] */
+ static u32 rpo_lro_ldes_max_shift[32] = {
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U,
+ 0U, 4U, 8U, 12U, 16U, 20U, 24U, 28U
+ };
+
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ldes_max_adr[lro],
+ rpo_lro_ldes_max_msk[lro],
+ rpo_lro_ldes_max_shift[lro],
+ lro_max_number_of_descriptors);
+}
+
+void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_tb_div_adr,
+ rpo_lro_tb_div_msk,
+ rpo_lro_tb_div_shift,
+ lro_time_base_divider);
+}
+
+void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_ina_ival_adr,
+ rpo_lro_ina_ival_msk,
+ rpo_lro_ina_ival_shift,
+ lro_inactive_interval);
+}
+
+void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coalescing_interval)
+{
+ aq_hw_write_reg_bit(aq_hw, rpo_lro_max_ival_adr,
+ rpo_lro_max_ival_msk,
+ rpo_lro_max_ival_shift,
+ lro_max_coalescing_interval);
+}
+
+/* rx */
+void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, rx_reg_res_dsbl_adr,
+ rx_reg_res_dsbl_msk,
+ rx_reg_res_dsbl_shift,
+ rx_reg_res_dis);
+}
+
+/* tdm */
+void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dcadcpuid_adr(dca),
+ tdm_dcadcpuid_msk,
+ tdm_dcadcpuid_shift, cpuid);
+}
+
+void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en)
+{
+ aq_hw_write_reg(aq_hw, tdm_lso_en_adr, large_send_offload_en);
+}
+
+void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dca_en_adr, tdm_dca_en_msk,
+ tdm_dca_en_shift, tx_dca_en);
+}
+
+void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dca_mode_adr, tdm_dca_mode_msk,
+ tdm_dca_mode_shift, tx_dca_mode);
+}
+
+void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_dcaddesc_en_adr(dca),
+ tdm_dcaddesc_en_msk, tdm_dcaddesc_en_shift,
+ tx_desc_dca_en);
+}
+
+void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_descden_adr(descriptor),
+ tdm_descden_msk,
+ tdm_descden_shift,
+ tx_desc_en);
+}
+
+u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor)
+{
+ return aq_hw_read_reg_bit(aq_hw, tdm_descdhd_adr(descriptor),
+ tdm_descdhd_msk, tdm_descdhd_shift);
+}
+
+void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_descdlen_adr(descriptor),
+ tdm_descdlen_msk,
+ tdm_descdlen_shift,
+ tx_desc_len);
+}
+
+void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_int_desc_wrb_en_adr,
+ tdm_int_desc_wrb_en_msk,
+ tdm_int_desc_wrb_en_shift,
+ tx_desc_wr_wb_irq_en);
+}
+
+void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_descdwrb_thresh_adr(descriptor),
+ tdm_descdwrb_thresh_msk,
+ tdm_descdwrb_thresh_shift,
+ tx_desc_wr_wb_threshold);
+}
+
+void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tdm_int_mod_en_adr,
+ tdm_int_mod_en_msk,
+ tdm_int_mod_en_shift,
+ tdm_irq_moderation_en);
+}
+
+/* thm */
+void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_first_adr,
+ thm_lso_tcp_flag_first_msk,
+ thm_lso_tcp_flag_first_shift,
+ lso_tcp_flag_of_first_pkt);
+}
+
+void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_last_adr,
+ thm_lso_tcp_flag_last_msk,
+ thm_lso_tcp_flag_last_shift,
+ lso_tcp_flag_of_last_pkt);
+}
+
+void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt)
+{
+ aq_hw_write_reg_bit(aq_hw, thm_lso_tcp_flag_mid_adr,
+ thm_lso_tcp_flag_mid_msk,
+ thm_lso_tcp_flag_mid_shift,
+ lso_tcp_flag_of_middle_pkt);
+}
+
+/* TPB: tx packet buffer */
+void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_tx_buf_en_adr, tpb_tx_buf_en_msk,
+ tpb_tx_buf_en_shift, tx_buff_en);
+}
+
+void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_txbhi_thresh_adr(buffer),
+ tpb_txbhi_thresh_msk, tpb_txbhi_thresh_shift,
+ tx_buff_hi_threshold_per_tc);
+}
+
+void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_txblo_thresh_adr(buffer),
+ tpb_txblo_thresh_msk, tpb_txblo_thresh_shift,
+ tx_buff_lo_threshold_per_tc);
+}
+
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_dma_sys_lbk_adr,
+ tpb_dma_sys_lbk_msk,
+ tpb_dma_sys_lbk_shift,
+ tx_dma_sys_lbk_en);
+}
+
+void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_txbbuf_size_adr(buffer),
+ tpb_txbbuf_size_msk,
+ tpb_txbbuf_size_shift,
+ tx_pkt_buff_size_per_tc);
+}
+
+void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpb_tx_scp_ins_en_adr,
+ tpb_tx_scp_ins_en_msk,
+ tpb_tx_scp_ins_en_shift,
+ tx_path_scp_ins_en);
+}
+
+/* TPO: tx packet offload */
+void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpo_ipv4chk_en_adr,
+ tpo_ipv4chk_en_msk,
+ tpo_ipv4chk_en_shift,
+ ipv4header_crc_offload_en);
+}
+
+void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpol4chk_en_adr,
+ tpol4chk_en_msk,
+ tpol4chk_en_shift,
+ tcp_udp_crc_offload_en);
+}
+
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en)
+{
+ aq_hw_write_reg_bit(aq_hw, tpo_pkt_sys_lbk_adr,
+ tpo_pkt_sys_lbk_msk,
+ tpo_pkt_sys_lbk_shift,
+ tx_pkt_sys_lbk_en);
+}
+
+/* TPS: tx packet scheduler */
+void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_data_tc_arb_mode_adr,
+ tps_data_tc_arb_mode_msk,
+ tps_data_tc_arb_mode_shift,
+ tx_pkt_shed_data_arb_mode);
+}
+
+void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_rate_ta_rst_adr,
+ tps_desc_rate_ta_rst_msk,
+ tps_desc_rate_ta_rst_shift,
+ curr_time_res);
+}
+
+void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_rate_lim_adr,
+ tps_desc_rate_lim_msk,
+ tps_desc_rate_lim_shift,
+ tx_pkt_shed_desc_rate_lim);
+}
+
+void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_tc_arb_mode_adr,
+ tps_desc_tc_arb_mode_msk,
+ tps_desc_tc_arb_mode_shift,
+ tx_pkt_shed_desc_tc_arb_mode);
+}
+
+void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_tctcredit_max_adr(tc),
+ tps_desc_tctcredit_max_msk,
+ tps_desc_tctcredit_max_shift,
+ tx_pkt_shed_desc_tc_max_credit);
+}
+
+void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight, u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_tctweight_adr(tc),
+ tps_desc_tctweight_msk,
+ tps_desc_tctweight_shift,
+ tx_pkt_shed_desc_tc_weight);
+}
+
+void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_vm_arb_mode)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_desc_vm_arb_mode_adr,
+ tps_desc_vm_arb_mode_msk,
+ tps_desc_vm_arb_mode_shift,
+ tx_pkt_shed_desc_vm_arb_mode);
+}
+
+void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_max_credit,
+ u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_data_tctcredit_max_adr(tc),
+ tps_data_tctcredit_max_msk,
+ tps_data_tctcredit_max_shift,
+ tx_pkt_shed_tc_data_max_credit);
+}
+
+void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight, u32 tc)
+{
+ aq_hw_write_reg_bit(aq_hw, tps_data_tctweight_adr(tc),
+ tps_data_tctweight_msk,
+ tps_data_tctweight_shift,
+ tx_pkt_shed_tc_data_weight);
+}
+
+/* tx */
+void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, tx_reg_res_dsbl_adr,
+ tx_reg_res_dsbl_msk,
+ tx_reg_res_dsbl_shift, tx_reg_res_dis);
+}
+
+/* msm */
+u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg_bit(aq_hw, msm_reg_access_busy_adr,
+ msm_reg_access_busy_msk,
+ msm_reg_access_busy_shift);
+}
+
+void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr)
+{
+ aq_hw_write_reg_bit(aq_hw, msm_reg_addr_adr,
+ msm_reg_addr_msk,
+ msm_reg_addr_shift,
+ reg_addr_for_indirect_addr);
+}
+
+void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, msm_reg_rd_strobe_adr,
+ msm_reg_rd_strobe_msk,
+ msm_reg_rd_strobe_shift,
+ reg_rd_strobe);
+}
+
+u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw)
+{
+ return aq_hw_read_reg(aq_hw, msm_reg_rd_data_adr);
+}
+
+void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data)
+{
+ aq_hw_write_reg(aq_hw, msm_reg_wr_data_adr, reg_wr_data);
+}
+
+void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe)
+{
+ aq_hw_write_reg_bit(aq_hw, msm_reg_wr_strobe_adr,
+ msm_reg_wr_strobe_msk,
+ msm_reg_wr_strobe_shift,
+ reg_wr_strobe);
+}
+
+/* pci */
+void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis)
+{
+ aq_hw_write_reg_bit(aq_hw, pci_reg_res_dsbl_adr,
+ pci_reg_res_dsbl_msk,
+ pci_reg_res_dsbl_shift,
+ pci_reg_res_dis);
+}
+
+void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw, u32 glb_cpu_scratch_scp,
+ u32 scratch_scp)
+{
+ aq_hw_write_reg(aq_hw, glb_cpu_scratch_scp_adr(scratch_scp),
+ glb_cpu_scratch_scp);
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
new file mode 100644
index 0000000..ed1085b
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh.h
@@ -0,0 +1,677 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh.h: Declarations of bitfield and register access functions for
+ * Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_H
+#define HW_ATL_LLH_H
+
+#include <linux/types.h>
+
+struct aq_hw_s;
+
+/* global */
+
+/* set global microprocessor semaphore */
+void reg_glb_cpu_sem_set(struct aq_hw_s *aq_hw, u32 glb_cpu_sem,
+ u32 semaphore);
+
+/* get global microprocessor semaphore */
+u32 reg_glb_cpu_sem_get(struct aq_hw_s *aq_hw, u32 semaphore);
+
+/* set global register reset disable */
+void glb_glb_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 glb_reg_res_dis);
+
+/* set soft reset */
+void glb_soft_res_set(struct aq_hw_s *aq_hw, u32 soft_res);
+
+/* get soft reset */
+u32 glb_soft_res_get(struct aq_hw_s *aq_hw);
+
+/* stats */
+
+u32 rpb_rx_dma_drop_pkt_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter lsw */
+u32 stats_rx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter lsw */
+u32 stats_rx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter lsw */
+u32 stats_tx_dma_good_octet_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter lsw */
+u32 stats_tx_dma_good_pkt_counterlsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good octet counter msw */
+u32 stats_rx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get rx dma good packet counter msw */
+u32 stats_rx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good octet counter msw */
+u32 stats_tx_dma_good_octet_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get tx dma good packet counter msw */
+u32 stats_tx_dma_good_pkt_countermsw_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx errors counter register */
+u32 reg_mac_msm_rx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast frames counter register */
+u32 reg_mac_msm_rx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx multicast frames counter register */
+u32 reg_mac_msm_rx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast frames counter register */
+u32 reg_mac_msm_rx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm rx broadcast octets counter register 1 */
+u32 reg_mac_msm_rx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm rx unicast octets counter register 0 */
+u32 reg_mac_msm_rx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get rx dma statistics counter 7 */
+u32 reg_rx_dma_stat_counter7get(struct aq_hw_s *aq_hw);
+
+/* get msm tx errors counter register */
+u32 reg_mac_msm_tx_errs_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast frames counter register */
+u32 reg_mac_msm_tx_ucst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast frames counter register */
+u32 reg_mac_msm_tx_mcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast frames counter register */
+u32 reg_mac_msm_tx_bcst_frm_cnt_get(struct aq_hw_s *aq_hw);
+
+/* get msm tx multicast octets counter register 1 */
+u32 reg_mac_msm_tx_mcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx broadcast octets counter register 1 */
+u32 reg_mac_msm_tx_bcst_octets_counter1get(struct aq_hw_s *aq_hw);
+
+/* get msm tx unicast octets counter register 0 */
+u32 reg_mac_msm_tx_ucst_octets_counter0get(struct aq_hw_s *aq_hw);
+
+/* get global mif identification */
+u32 reg_glb_mif_id_get(struct aq_hw_s *aq_hw);
+
+/* interrupt */
+
+/* set interrupt auto mask lsw */
+void itr_irq_auto_masklsw_set(struct aq_hw_s *aq_hw, u32 irq_auto_masklsw);
+
+/* set interrupt mapping enable rx */
+void itr_irq_map_en_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_rx, u32 rx);
+
+/* set interrupt mapping enable tx */
+void itr_irq_map_en_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_en_tx, u32 tx);
+
+/* set interrupt mapping rx */
+void itr_irq_map_rx_set(struct aq_hw_s *aq_hw, u32 irq_map_rx, u32 rx);
+
+/* set interrupt mapping tx */
+void itr_irq_map_tx_set(struct aq_hw_s *aq_hw, u32 irq_map_tx, u32 tx);
+
+/* set interrupt mask clear lsw */
+void itr_irq_msk_clearlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_clearlsw);
+
+/* set interrupt mask set lsw */
+void itr_irq_msk_setlsw_set(struct aq_hw_s *aq_hw, u32 irq_msk_setlsw);
+
+/* set interrupt register reset disable */
+void itr_irq_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 irq_reg_res_dis);
+
+/* set interrupt status clear lsw */
+void itr_irq_status_clearlsw_set(struct aq_hw_s *aq_hw,
+ u32 irq_status_clearlsw);
+
+/* get interrupt status lsw */
+u32 itr_irq_statuslsw_get(struct aq_hw_s *aq_hw);
+
+/* get reset interrupt */
+u32 itr_res_irq_get(struct aq_hw_s *aq_hw);
+
+/* set reset interrupt */
+void itr_res_irq_set(struct aq_hw_s *aq_hw, u32 res_irq);
+
+/* rdm */
+
+/* set cpu id */
+void rdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set rx dca enable */
+void rdm_rx_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_dca_en);
+
+/* set rx dca mode */
+void rdm_rx_dca_mode_set(struct aq_hw_s *aq_hw, u32 rx_dca_mode);
+
+/* set rx descriptor data buffer size */
+void rdm_rx_desc_data_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_data_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor dca enable */
+void rdm_rx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_dca_en,
+ u32 dca);
+
+/* set rx descriptor enable */
+void rdm_rx_desc_en_set(struct aq_hw_s *aq_hw, u32 rx_desc_en,
+ u32 descriptor);
+
+/* set rx descriptor header splitting */
+void rdm_rx_desc_head_splitting_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_splitting,
+ u32 descriptor);
+
+/* get rx descriptor head pointer */
+u32 rdm_rx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx descriptor length */
+void rdm_rx_desc_len_set(struct aq_hw_s *aq_hw, u32 rx_desc_len,
+ u32 descriptor);
+
+/* set rx descriptor write-back interrupt enable */
+void rdm_rx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_wr_wb_irq_en);
+
+/* set rx header dca enable */
+void rdm_rx_head_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_head_dca_en,
+ u32 dca);
+
+/* set rx payload dca enable */
+void rdm_rx_pld_dca_en_set(struct aq_hw_s *aq_hw, u32 rx_pld_dca_en, u32 dca);
+
+/* set rx descriptor header buffer size */
+void rdm_rx_desc_head_buff_size_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_head_buff_size,
+ u32 descriptor);
+
+/* set rx descriptor reset */
+void rdm_rx_desc_res_set(struct aq_hw_s *aq_hw, u32 rx_desc_res,
+ u32 descriptor);
+
+/* Set RDM Interrupt Moderation Enable */
+void rdm_rdm_intr_moder_en_set(struct aq_hw_s *aq_hw, u32 rdm_intr_moder_en);
+
+/* reg */
+
+/* set general interrupt mapping register */
+void reg_gen_irq_map_set(struct aq_hw_s *aq_hw, u32 gen_intr_map, u32 regidx);
+
+/* get general interrupt status register */
+u32 reg_gen_irq_status_get(struct aq_hw_s *aq_hw);
+
+/* set interrupt global control register */
+void reg_irq_glb_ctl_set(struct aq_hw_s *aq_hw, u32 intr_glb_ctl);
+
+/* set interrupt throttle register */
+void reg_irq_thr_set(struct aq_hw_s *aq_hw, u32 intr_thr, u32 throttle);
+
+/* set rx dma descriptor base address lsw */
+void reg_rx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set rx dma descriptor base address msw */
+void reg_rx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* get rx dma descriptor status register */
+u32 reg_rx_dma_desc_status_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set rx dma descriptor tail pointer register */
+void reg_rx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 rx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* set rx filter multicast filter mask register */
+void reg_rx_flr_mcst_flr_msk_set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_mcst_flr_msk);
+
+/* set rx filter multicast filter register */
+void reg_rx_flr_mcst_flr_set(struct aq_hw_s *aq_hw, u32 rx_flr_mcst_flr,
+ u32 filter);
+
+/* set rx filter rss control register 1 */
+void reg_rx_flr_rss_control1set(struct aq_hw_s *aq_hw,
+ u32 rx_flr_rss_control1);
+
+/* Set RX Filter Control Register 2 */
+void reg_rx_flr_control2_set(struct aq_hw_s *aq_hw, u32 rx_flr_control2);
+
+/* Set RX Interrupt Moderation Control Register */
+void reg_rx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 rx_intr_moderation_ctl,
+ u32 queue);
+
+/* set tx dma debug control */
+void reg_tx_dma_debug_ctl_set(struct aq_hw_s *aq_hw, u32 tx_dma_debug_ctl);
+
+/* set tx dma descriptor base address lsw */
+void reg_tx_dma_desc_base_addresslswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrlsw,
+ u32 descriptor);
+
+/* set tx dma descriptor base address msw */
+void reg_tx_dma_desc_base_addressmswset(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_base_addrmsw,
+ u32 descriptor);
+
+/* set tx dma descriptor tail pointer register */
+void reg_tx_dma_desc_tail_ptr_set(struct aq_hw_s *aq_hw,
+ u32 tx_dma_desc_tail_ptr,
+ u32 descriptor);
+
+/* Set TX Interrupt Moderation Control Register */
+void reg_tx_intr_moder_ctrl_set(struct aq_hw_s *aq_hw,
+ u32 tx_intr_moderation_ctl,
+ u32 queue);
+
+/* set global microprocessor scratch pad */
+void reg_glb_cpu_scratch_scp_set(struct aq_hw_s *aq_hw,
+ u32 glb_cpu_scratch_scp, u32 scratch_scp);
+
+/* rpb */
+
+/* set dma system loopback */
+void rpb_dma_sys_lbk_set(struct aq_hw_s *aq_hw, u32 dma_sys_lbk);
+
+/* set rx traffic class mode */
+void rpb_rpf_rx_traf_class_mode_set(struct aq_hw_s *aq_hw,
+ u32 rx_traf_class_mode);
+
+/* set rx buffer enable */
+void rpb_rx_buff_en_set(struct aq_hw_s *aq_hw, u32 rx_buff_en);
+
+/* set rx buffer high threshold (per tc) */
+void rpb_rx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set rx buffer low threshold (per tc) */
+void rpb_rx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set rx flow control mode */
+void rpb_rx_flow_ctl_mode_set(struct aq_hw_s *aq_hw, u32 rx_flow_ctl_mode);
+
+/* set rx packet buffer size (per tc) */
+void rpb_rx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 rx_pkt_buff_size_per_tc,
+ u32 buffer);
+
+/* set rx xoff enable (per tc) */
+void rpb_rx_xoff_en_per_tc_set(struct aq_hw_s *aq_hw, u32 rx_xoff_en_per_tc,
+ u32 buffer);
+
+/* rpf */
+
+/* set l2 broadcast count threshold */
+void rpfl2broadcast_count_threshold_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_count_threshold);
+
+/* set l2 broadcast enable */
+void rpfl2broadcast_en_set(struct aq_hw_s *aq_hw, u32 l2broadcast_en);
+
+/* set l2 broadcast filter action */
+void rpfl2broadcast_flr_act_set(struct aq_hw_s *aq_hw,
+ u32 l2broadcast_flr_act);
+
+/* set l2 multicast filter enable */
+void rpfl2multicast_flr_en_set(struct aq_hw_s *aq_hw, u32 l2multicast_flr_en,
+ u32 filter);
+
+/* set l2 promiscuous mode enable */
+void rpfl2promiscuous_mode_en_set(struct aq_hw_s *aq_hw,
+ u32 l2promiscuous_mode_en);
+
+/* set l2 unicast filter action */
+void rpfl2unicast_flr_act_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_act,
+ u32 filter);
+
+/* set l2 unicast filter enable */
+void rpfl2_uc_flr_en_set(struct aq_hw_s *aq_hw, u32 l2unicast_flr_en,
+ u32 filter);
+
+/* set l2 unicast destination address lsw */
+void rpfl2unicast_dest_addresslsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addresslsw,
+ u32 filter);
+
+/* set l2 unicast destination address msw */
+void rpfl2unicast_dest_addressmsw_set(struct aq_hw_s *aq_hw,
+ u32 l2unicast_dest_addressmsw,
+ u32 filter);
+
+/* Set L2 Accept all Multicast packets */
+void rpfl2_accept_all_mc_packets_set(struct aq_hw_s *aq_hw,
+ u32 l2_accept_all_mc_packets);
+
+/* set user-priority tc mapping */
+void rpf_rpb_user_priority_tc_map_set(struct aq_hw_s *aq_hw,
+ u32 user_priority_tc_map, u32 tc);
+
+/* set rss key address */
+void rpf_rss_key_addr_set(struct aq_hw_s *aq_hw, u32 rss_key_addr);
+
+/* set rss key write data */
+void rpf_rss_key_wr_data_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_data);
+
+/* get rss key write enable */
+u32 rpf_rss_key_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss key write enable */
+void rpf_rss_key_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_key_wr_en);
+
+/* set rss redirection table address */
+void rpf_rss_redir_tbl_addr_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_addr);
+
+/* set rss redirection table write data */
+void rpf_rss_redir_tbl_wr_data_set(struct aq_hw_s *aq_hw,
+ u32 rss_redir_tbl_wr_data);
+
+/* get rss redirection write enable */
+u32 rpf_rss_redir_wr_en_get(struct aq_hw_s *aq_hw);
+
+/* set rss redirection write enable */
+void rpf_rss_redir_wr_en_set(struct aq_hw_s *aq_hw, u32 rss_redir_wr_en);
+
+/* set tpo to rpf system loopback */
+void rpf_tpo_to_rpf_sys_lbk_set(struct aq_hw_s *aq_hw,
+ u32 tpo_to_rpf_sys_lbk);
+
+/* set vlan inner ethertype */
+void rpf_vlan_inner_etht_set(struct aq_hw_s *aq_hw, u32 vlan_inner_etht);
+
+/* set vlan outer ethertype */
+void rpf_vlan_outer_etht_set(struct aq_hw_s *aq_hw, u32 vlan_outer_etht);
+
+/* set vlan promiscuous mode enable */
+void rpf_vlan_prom_mode_en_set(struct aq_hw_s *aq_hw, u32 vlan_prom_mode_en);
+
+/* Set VLAN untagged action */
+void rpf_vlan_untagged_act_set(struct aq_hw_s *aq_hw, u32 vlan_untagged_act);
+
+/* Set VLAN accept untagged packets */
+void rpf_vlan_accept_untagged_packets_set(struct aq_hw_s *aq_hw,
+ u32 vlan_accept_untagged_packets);
+
+/* Set VLAN filter enable */
+void rpf_vlan_flr_en_set(struct aq_hw_s *aq_hw, u32 vlan_flr_en, u32 filter);
+
+/* Set VLAN Filter Action */
+void rpf_vlan_flr_act_set(struct aq_hw_s *aq_hw, u32 vlan_filter_act,
+ u32 filter);
+
+/* Set VLAN ID Filter */
+void rpf_vlan_id_flr_set(struct aq_hw_s *aq_hw, u32 vlan_id_flr, u32 filter);
+
+/* set ethertype filter enable */
+void rpf_etht_flr_en_set(struct aq_hw_s *aq_hw, u32 etht_flr_en, u32 filter);
+
+/* set ethertype user-priority enable */
+void rpf_etht_user_priority_en_set(struct aq_hw_s *aq_hw,
+ u32 etht_user_priority_en, u32 filter);
+
+/* set ethertype rx queue enable */
+void rpf_etht_rx_queue_en_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue_en,
+ u32 filter);
+
+/* set ethertype rx queue */
+void rpf_etht_rx_queue_set(struct aq_hw_s *aq_hw, u32 etht_rx_queue,
+ u32 filter);
+
+/* set ethertype user-priority */
+void rpf_etht_user_priority_set(struct aq_hw_s *aq_hw, u32 etht_user_priority,
+ u32 filter);
+
+/* set ethertype management queue */
+void rpf_etht_mgt_queue_set(struct aq_hw_s *aq_hw, u32 etht_mgt_queue,
+ u32 filter);
+
+/* set ethertype filter action */
+void rpf_etht_flr_act_set(struct aq_hw_s *aq_hw, u32 etht_flr_act,
+ u32 filter);
+
+/* set ethertype filter */
+void rpf_etht_flr_set(struct aq_hw_s *aq_hw, u32 etht_flr, u32 filter);
+
+/* rpo */
+
+/* set ipv4 header checksum offload enable */
+void rpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set rx descriptor vlan stripping */
+void rpo_rx_desc_vlan_stripping_set(struct aq_hw_s *aq_hw,
+ u32 rx_desc_vlan_stripping,
+ u32 descriptor);
+
+/* set tcp/udp checksum offload enable */
+void rpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* Set LRO Patch Optimization Enable. */
+void rpo_lro_patch_optimization_en_set(struct aq_hw_s *aq_hw,
+ u32 lro_patch_optimization_en);
+
+/* Set Large Receive Offload Enable */
+void rpo_lro_en_set(struct aq_hw_s *aq_hw, u32 lro_en);
+
+/* Set LRO Q Sessions Limit */
+void rpo_lro_qsessions_lim_set(struct aq_hw_s *aq_hw, u32 lro_qsessions_lim);
+
+/* Set LRO Total Descriptor Limit */
+void rpo_lro_total_desc_lim_set(struct aq_hw_s *aq_hw, u32 lro_total_desc_lim);
+
+/* Set LRO Min Payload of First Packet */
+void rpo_lro_min_pay_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lro_min_pld_of_first_pkt);
+
+/* Set LRO Packet Limit */
+void rpo_lro_pkt_lim_set(struct aq_hw_s *aq_hw, u32 lro_packet_lim);
+
+/* Set LRO Max Number of Descriptors */
+void rpo_lro_max_num_of_descriptors_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_desc_num, u32 lro);
+
+/* Set LRO Time Base Divider */
+void rpo_lro_time_base_divider_set(struct aq_hw_s *aq_hw,
+ u32 lro_time_base_divider);
+
+/*Set LRO Inactive Interval */
+void rpo_lro_inactive_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_inactive_interval);
+
+/*Set LRO Max Coalescing Interval */
+void rpo_lro_max_coalescing_interval_set(struct aq_hw_s *aq_hw,
+ u32 lro_max_coalescing_interval);
+
+/* rx */
+
+/* set rx register reset disable */
+void rx_rx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 rx_reg_res_dis);
+
+/* tdm */
+
+/* set cpu id */
+void tdm_cpu_id_set(struct aq_hw_s *aq_hw, u32 cpuid, u32 dca);
+
+/* set large send offload enable */
+void tdm_large_send_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 large_send_offload_en);
+
+/* set tx descriptor enable */
+void tdm_tx_desc_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_en, u32 descriptor);
+
+/* set tx dca enable */
+void tdm_tx_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_dca_en);
+
+/* set tx dca mode */
+void tdm_tx_dca_mode_set(struct aq_hw_s *aq_hw, u32 tx_dca_mode);
+
+/* set tx descriptor dca enable */
+void tdm_tx_desc_dca_en_set(struct aq_hw_s *aq_hw, u32 tx_desc_dca_en, u32 dca);
+
+/* get tx descriptor head pointer */
+u32 tdm_tx_desc_head_ptr_get(struct aq_hw_s *aq_hw, u32 descriptor);
+
+/* set tx descriptor length */
+void tdm_tx_desc_len_set(struct aq_hw_s *aq_hw, u32 tx_desc_len,
+ u32 descriptor);
+
+/* set tx descriptor write-back interrupt enable */
+void tdm_tx_desc_wr_wb_irq_en_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_irq_en);
+
+/* set tx descriptor write-back threshold */
+void tdm_tx_desc_wr_wb_threshold_set(struct aq_hw_s *aq_hw,
+ u32 tx_desc_wr_wb_threshold,
+ u32 descriptor);
+
+/* Set TDM Interrupt Moderation Enable */
+void tdm_tdm_intr_moder_en_set(struct aq_hw_s *aq_hw,
+ u32 tdm_irq_moderation_en);
+/* thm */
+
+/* set lso tcp flag of first packet */
+void thm_lso_tcp_flag_of_first_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_first_pkt);
+
+/* set lso tcp flag of last packet */
+void thm_lso_tcp_flag_of_last_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_last_pkt);
+
+/* set lso tcp flag of middle packet */
+void thm_lso_tcp_flag_of_middle_pkt_set(struct aq_hw_s *aq_hw,
+ u32 lso_tcp_flag_of_middle_pkt);
+
+/* tpb */
+
+/* set tx buffer enable */
+void tpb_tx_buff_en_set(struct aq_hw_s *aq_hw, u32 tx_buff_en);
+
+/* set tx buffer high threshold (per tc) */
+void tpb_tx_buff_hi_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_hi_threshold_per_tc,
+ u32 buffer);
+
+/* set tx buffer low threshold (per tc) */
+void tpb_tx_buff_lo_threshold_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_buff_lo_threshold_per_tc,
+ u32 buffer);
+
+/* set tx dma system loopback enable */
+void tpb_tx_dma_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_dma_sys_lbk_en);
+
+/* set tx packet buffer size (per tc) */
+void tpb_tx_pkt_buff_size_per_tc_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_buff_size_per_tc, u32 buffer);
+
+/* set tx path pad insert enable */
+void tpb_tx_path_scp_ins_en_set(struct aq_hw_s *aq_hw, u32 tx_path_scp_ins_en);
+
+/* tpo */
+
+/* set ipv4 header checksum offload enable */
+void tpo_ipv4header_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 ipv4header_crc_offload_en);
+
+/* set tcp/udp checksum offload enable */
+void tpo_tcp_udp_crc_offload_en_set(struct aq_hw_s *aq_hw,
+ u32 tcp_udp_crc_offload_en);
+
+/* set tx pkt system loopback enable */
+void tpo_tx_pkt_sys_lbk_en_set(struct aq_hw_s *aq_hw, u32 tx_pkt_sys_lbk_en);
+
+/* tps */
+
+/* set tx packet scheduler data arbitration mode */
+void tps_tx_pkt_shed_data_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_data_arb_mode);
+
+/* set tx packet scheduler descriptor rate current time reset */
+void tps_tx_pkt_shed_desc_rate_curr_time_res_set(struct aq_hw_s *aq_hw,
+ u32 curr_time_res);
+
+/* set tx packet scheduler descriptor rate limit */
+void tps_tx_pkt_shed_desc_rate_lim_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_rate_lim);
+
+/* set tx packet scheduler descriptor tc arbitration mode */
+void tps_tx_pkt_shed_desc_tc_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_arb_mode);
+
+/* set tx packet scheduler descriptor tc max credit */
+void tps_tx_pkt_shed_desc_tc_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_max_credit,
+ u32 tc);
+
+/* set tx packet scheduler descriptor tc weight */
+void tps_tx_pkt_shed_desc_tc_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_tc_weight,
+ u32 tc);
+
+/* set tx packet scheduler descriptor vm arbitration mode */
+void tps_tx_pkt_shed_desc_vm_arb_mode_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_desc_vm_arb_mode);
+
+/* set tx packet scheduler tc data max credit */
+void tps_tx_pkt_shed_tc_data_max_credit_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_max_credit,
+ u32 tc);
+
+/* set tx packet scheduler tc data weight */
+void tps_tx_pkt_shed_tc_data_weight_set(struct aq_hw_s *aq_hw,
+ u32 tx_pkt_shed_tc_data_weight,
+ u32 tc);
+
+/* tx */
+
+/* set tx register reset disable */
+void tx_tx_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 tx_reg_res_dis);
+
+/* msm */
+
+/* get register access status */
+u32 msm_reg_access_status_get(struct aq_hw_s *aq_hw);
+
+/* set register address for indirect address */
+void msm_reg_addr_for_indirect_addr_set(struct aq_hw_s *aq_hw,
+ u32 reg_addr_for_indirect_addr);
+
+/* set register read strobe */
+void msm_reg_rd_strobe_set(struct aq_hw_s *aq_hw, u32 reg_rd_strobe);
+
+/* get register read data */
+u32 msm_reg_rd_data_get(struct aq_hw_s *aq_hw);
+
+/* set register write data */
+void msm_reg_wr_data_set(struct aq_hw_s *aq_hw, u32 reg_wr_data);
+
+/* set register write strobe */
+void msm_reg_wr_strobe_set(struct aq_hw_s *aq_hw, u32 reg_wr_strobe);
+
+/* pci */
+
+/* set pci register reset disable */
+void pci_pci_reg_res_dis_set(struct aq_hw_s *aq_hw, u32 pci_reg_res_dis);
+
+#endif /* HW_ATL_LLH_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
new file mode 100644
index 0000000..5527fc0
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_llh_internal.h
@@ -0,0 +1,2375 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_llh_internal.h: Preprocessor definitions
+ * for Atlantic registers.
+ */
+
+#ifndef HW_ATL_LLH_INTERNAL_H
+#define HW_ATL_LLH_INTERNAL_H
+
+/* global microprocessor semaphore definitions
+ * base address: 0x000003a0
+ * parameter: semaphore {s} | stride size 0x4 | range [0, 15]
+ */
+#define glb_cpu_sem_adr(semaphore) (0x000003a0u + (semaphore) * 0x4)
+/* register address for bitfield rx dma good octet counter lsw [1f:0] */
+#define stats_rx_dma_good_octet_counterlsw__adr 0x00006808
+/* register address for bitfield rx dma good packet counter lsw [1f:0] */
+#define stats_rx_dma_good_pkt_counterlsw__adr 0x00006800
+/* register address for bitfield tx dma good octet counter lsw [1f:0] */
+#define stats_tx_dma_good_octet_counterlsw__adr 0x00008808
+/* register address for bitfield tx dma good packet counter lsw [1f:0] */
+#define stats_tx_dma_good_pkt_counterlsw__adr 0x00008800
+
+/* register address for bitfield rx dma good octet counter msw [3f:20] */
+#define stats_rx_dma_good_octet_countermsw__adr 0x0000680c
+/* register address for bitfield rx dma good packet counter msw [3f:20] */
+#define stats_rx_dma_good_pkt_countermsw__adr 0x00006804
+/* register address for bitfield tx dma good octet counter msw [3f:20] */
+#define stats_tx_dma_good_octet_countermsw__adr 0x0000880c
+/* register address for bitfield tx dma good packet counter msw [3f:20] */
+#define stats_tx_dma_good_pkt_countermsw__adr 0x00008804
+
+/* preprocessor definitions for msm rx errors counter register */
+#define mac_msm_rx_errs_cnt_adr 0x00000120u
+
+/* preprocessor definitions for msm rx unicast frames counter register */
+#define mac_msm_rx_ucst_frm_cnt_adr 0x000000e0u
+
+/* preprocessor definitions for msm rx multicast frames counter register */
+#define mac_msm_rx_mcst_frm_cnt_adr 0x000000e8u
+
+/* preprocessor definitions for msm rx broadcast frames counter register */
+#define mac_msm_rx_bcst_frm_cnt_adr 0x000000f0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 1 */
+#define mac_msm_rx_bcst_octets_counter1_adr 0x000001b0u
+
+/* preprocessor definitions for msm rx broadcast octets counter register 2 */
+#define mac_msm_rx_bcst_octets_counter2_adr 0x000001b4u
+
+/* preprocessor definitions for msm rx unicast octets counter register 0 */
+#define mac_msm_rx_ucst_octets_counter0_adr 0x000001b8u
+
+/* preprocessor definitions for rx dma statistics counter 7 */
+#define rx_dma_stat_counter7_adr 0x00006818u
+
+/* preprocessor definitions for msm tx unicast frames counter register */
+#define mac_msm_tx_ucst_frm_cnt_adr 0x00000108u
+
+/* preprocessor definitions for msm tx multicast frames counter register */
+#define mac_msm_tx_mcst_frm_cnt_adr 0x00000110u
+
+/* preprocessor definitions for global mif identification */
+#define glb_mif_id_adr 0x0000001cu
+
+/* register address for bitfield iamr_lsw[1f:0] */
+#define itr_iamrlsw_adr 0x00002090
+/* register address for bitfield rx dma drop packet counter [1f:0] */
+#define rpb_rx_dma_drop_pkt_cnt_adr 0x00006818
+
+/* register address for bitfield imcr_lsw[1f:0] */
+#define itr_imcrlsw_adr 0x00002070
+/* register address for bitfield imsr_lsw[1f:0] */
+#define itr_imsrlsw_adr 0x00002060
+/* register address for bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_adr 0x00002300
+/* bitmask for bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_msk 0x20000000
+/* lower bit position of bitfield itr_reg_res_dsbl */
+#define itr_reg_res_dsbl_shift 29
+/* register address for bitfield iscr_lsw[1f:0] */
+#define itr_iscrlsw_adr 0x00002050
+/* register address for bitfield isr_lsw[1f:0] */
+#define itr_isrlsw_adr 0x00002000
+/* register address for bitfield itr_reset */
+#define itr_res_adr 0x00002300
+/* bitmask for bitfield itr_reset */
+#define itr_res_msk 0x80000000
+/* lower bit position of bitfield itr_reset */
+#define itr_res_shift 31
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_msk 0x000000ff
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define rdm_dcadcpuid_shift 0
+/* register address for bitfield dca_en */
+#define rdm_dca_en_adr 0x00006180
+
+/* rx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_rdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define rdm_dca_en_adr 0x00006180
+/* bitmask for bitfield dca_en */
+#define rdm_dca_en_msk 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define rdm_dca_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define rdm_dca_en_shift 31
+/* width of bitfield dca_en */
+#define rdm_dca_en_width 1
+/* default value of bitfield dca_en */
+#define rdm_dca_en_default 0x1
+
+/* rx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_rdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_adr 0x00006180
+/* bitmask for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_msk 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define rdm_dca_mode_mskn 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_shift 0
+/* width of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_width 4
+/* default value of bitfield dca_mode[3:0] */
+#define rdm_dca_mode_default 0x0
+
+/* rx desc{d}_data_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_data_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_data_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_msk 0x0000001f
+/* inverted bitmask for bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_mskn 0xffffffe0
+/* lower bit position of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_shift 0
+/* width of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_width 5
+/* default value of bitfield desc{d}_data_size[4:0] */
+#define rdm_descddata_size_default 0x0
+
+/* rx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_msk 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_shift 31
+/* width of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_width 1
+/* default value of bitfield dca{d}_desc_en */
+#define rdm_dcaddesc_en_default 0x0
+
+/* rx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define rdm_descden_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_en */
+#define rdm_descden_msk 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define rdm_descden_mskn 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define rdm_descden_shift 31
+/* width of bitfield desc{d}_en */
+#define rdm_descden_width 1
+/* default value of bitfield desc{d}_en */
+#define rdm_descden_default 0x0
+
+/* rx desc{d}_hdr_size[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_size[4:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_hdr_size_i[4:0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_adr(descriptor) (0x00005b18 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_msk 0x00001f00
+/* inverted bitmask for bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_mskn 0xffffe0ff
+/* lower bit position of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_shift 8
+/* width of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_width 5
+/* default value of bitfield desc{d}_hdr_size[4:0] */
+#define rdm_descdhdr_size_default 0x0
+
+/* rx desc{d}_hdr_split bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hdr_split".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc_hdr_split_i[0]"
+ */
+
+/* register address for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_msk 0x10000000
+/* inverted bitmask for bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_mskn 0xefffffff
+/* lower bit position of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_shift 28
+/* width of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_width 1
+/* default value of bitfield desc{d}_hdr_split */
+#define rdm_descdhdr_split_default 0x0
+
+/* rx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="rdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_adr(descriptor) (0x00005b0c + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_msk 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_mskn 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_shift 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define rdm_descdhd_width 13
+
+/* rx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_msk 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_mskn 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_shift 3
+/* width of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_width 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define rdm_descdlen_default 0x0
+
+/* rx desc{d}_reset bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_reset".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rdm_q_pf_res_i[0]"
+ */
+
+/* register address for bitfield desc{d}_reset */
+#define rdm_descdreset_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_reset */
+#define rdm_descdreset_msk 0x02000000
+/* inverted bitmask for bitfield desc{d}_reset */
+#define rdm_descdreset_mskn 0xfdffffff
+/* lower bit position of bitfield desc{d}_reset */
+#define rdm_descdreset_shift 25
+/* width of bitfield desc{d}_reset */
+#define rdm_descdreset_width 1
+/* default value of bitfield desc{d}_reset */
+#define rdm_descdreset_default 0x0
+
+/* rx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_rdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_adr 0x00005a30
+/* bitmask for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_msk 0x00000004
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_mskn 0xfffffffb
+/* lower bit position of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_shift 2
+/* width of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_width 1
+/* default value of bitfield int_desc_wrb_en */
+#define rdm_int_desc_wrb_en_default 0x0
+
+/* rx dca{d}_hdr_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_hdr_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_hdr_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_msk 0x40000000
+/* inverted bitmask for bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_mskn 0xbfffffff
+/* lower bit position of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_shift 30
+/* width of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_width 1
+/* default value of bitfield dca{d}_hdr_en */
+#define rdm_dcadhdr_en_default 0x0
+
+/* rx dca{d}_pay_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_pay_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_rdm_dca_pay_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_adr(dca) (0x00006100 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_msk 0x20000000
+/* inverted bitmask for bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_mskn 0xdfffffff
+/* lower bit position of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_shift 29
+/* width of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_width 1
+/* default value of bitfield dca{d}_pay_en */
+#define rdm_dcadpay_en_default 0x0
+
+/* RX rdm_int_rim_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "rdm_int_rim_en".
+ * PORT="pif_rdm_int_rim_en_i"
+ */
+
+/* Register address for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_adr 0x00005A30
+/* Bitmask for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_msk 0x00000008
+/* Inverted bitmask for bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_mskn 0xFFFFFFF7
+/* Lower bit position of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_shift 3
+/* Width of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_width 1
+/* Default value of bitfield rdm_int_rim_en */
+#define rdm_int_rim_en_default 0x0
+
+/* general interrupt mapping register definitions
+ * preprocessor definitions for general interrupt mapping register
+ * base address: 0x00002180
+ * parameter: regidx {f} | stride size 0x4 | range [0, 3]
+ */
+#define gen_intr_map_adr(regidx) (0x00002180u + (regidx) * 0x4)
+
+/* general interrupt status register definitions
+ * preprocessor definitions for general interrupt status register
+ * address: 0x000021A0
+ */
+
+#define gen_intr_stat_adr 0x000021A4U
+
+/* interrupt global control register definitions
+ * preprocessor definitions for interrupt global control register
+ * address: 0x00002300
+ */
+#define intr_glb_ctl_adr 0x00002300u
+
+/* interrupt throttle register definitions
+ * preprocessor definitions for interrupt throttle register
+ * base address: 0x00002800
+ * parameter: throttle {t} | stride size 0x4 | range [0, 31]
+ */
+#define intr_thr_adr(throttle) (0x00002800u + (throttle) * 0x4)
+
+/* rx dma descriptor base address lsw definitions
+ * preprocessor definitions for rx dma descriptor base address lsw
+ * base address: 0x00005b00
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_base_addrlsw_adr(descriptor) \
+(0x00005b00u + (descriptor) * 0x20)
+
+/* rx dma descriptor base address msw definitions
+ * preprocessor definitions for rx dma descriptor base address msw
+ * base address: 0x00005b04
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_base_addrmsw_adr(descriptor) \
+(0x00005b04u + (descriptor) * 0x20)
+
+/* rx dma descriptor status register definitions
+ * preprocessor definitions for rx dma descriptor status register
+ * base address: 0x00005b14
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_stat_adr(descriptor) (0x00005b14u + (descriptor) * 0x20)
+
+/* rx dma descriptor tail pointer register definitions
+ * preprocessor definitions for rx dma descriptor tail pointer register
+ * base address: 0x00005b10
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ */
+#define rx_dma_desc_tail_ptr_adr(descriptor) (0x00005b10u + (descriptor) * 0x20)
+
+/* rx interrupt moderation control register definitions
+ * Preprocessor definitions for RX Interrupt Moderation Control Register
+ * Base Address: 0x00005A40
+ * Parameter: RIM {R} | stride size 0x4 | range [0, 31]
+ */
+#define rx_intr_moderation_ctl_adr(rim) (0x00005A40u + (rim) * 0x4)
+
+/* rx filter multicast filter mask register definitions
+ * preprocessor definitions for rx filter multicast filter mask register
+ * address: 0x00005270
+ */
+#define rx_flr_mcst_flr_msk_adr 0x00005270u
+
+/* rx filter multicast filter register definitions
+ * preprocessor definitions for rx filter multicast filter register
+ * base address: 0x00005250
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ */
+#define rx_flr_mcst_flr_adr(filter) (0x00005250u + (filter) * 0x4)
+
+/* RX Filter RSS Control Register 1 Definitions
+ * Preprocessor definitions for RX Filter RSS Control Register 1
+ * Address: 0x000054C0
+ */
+#define rx_flr_rss_control1_adr 0x000054C0u
+
+/* RX Filter Control Register 2 Definitions
+ * Preprocessor definitions for RX Filter Control Register 2
+ * Address: 0x00005104
+ */
+#define rx_flr_control2_adr 0x00005104u
+
+/* tx tx dma debug control [1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx dma debug control [1f:0]".
+ * port="pif_tdm_debug_cntl_i[31:0]"
+ */
+
+/* register address for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_adr 0x00008920
+/* bitmask for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_msk 0xffffffff
+/* inverted bitmask for bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_mskn 0x00000000
+/* lower bit position of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_shift 0
+/* width of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_width 32
+/* default value of bitfield tx dma debug control [1f:0] */
+#define tdm_tx_dma_debug_ctl_default 0x0
+
+/* tx dma descriptor base address lsw definitions
+ * preprocessor definitions for tx dma descriptor base address lsw
+ * base address: 0x00007c00
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define tx_dma_desc_base_addrlsw_adr(descriptor) \
+ (0x00007c00u + (descriptor) * 0x40)
+
+/* tx dma descriptor tail pointer register definitions
+ * preprocessor definitions for tx dma descriptor tail pointer register
+ * base address: 0x00007c10
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ */
+#define tx_dma_desc_tail_ptr_adr(descriptor) (0x00007c10u + (descriptor) * 0x40)
+
+/* rx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_rpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_adr 0x00005000
+/* bitmask for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_msk 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_mskn 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_shift 6
+/* width of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_width 1
+/* default value of bitfield dma_sys_loopback */
+#define rpb_dma_sys_lbk_default 0x0
+
+/* rx rx_tc_mode bitfield definitions
+ * preprocessor definitions for the bitfield "rx_tc_mode".
+ * port="pif_rpb_rx_tc_mode_i,pif_rpf_rx_tc_mode_i"
+ */
+
+/* register address for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_adr 0x00005700
+/* bitmask for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_msk 0x00000100
+/* inverted bitmask for bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_mskn 0xfffffeff
+/* lower bit position of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_shift 8
+/* width of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_width 1
+/* default value of bitfield rx_tc_mode */
+#define rpb_rpf_rx_tc_mode_default 0x0
+
+/* rx rx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx_buf_en".
+ * port="pif_rpb_rx_buf_en_i"
+ */
+
+/* register address for bitfield rx_buf_en */
+#define rpb_rx_buf_en_adr 0x00005700
+/* bitmask for bitfield rx_buf_en */
+#define rpb_rx_buf_en_msk 0x00000001
+/* inverted bitmask for bitfield rx_buf_en */
+#define rpb_rx_buf_en_mskn 0xfffffffe
+/* lower bit position of bitfield rx_buf_en */
+#define rpb_rx_buf_en_shift 0
+/* width of bitfield rx_buf_en */
+#define rpb_rx_buf_en_width 1
+/* default value of bitfield rx_buf_en */
+#define rpb_rx_buf_en_default 0x0
+
+/* rx rx{b}_hi_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_hi_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_hi_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_msk 0x3fff0000
+/* inverted bitmask for bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_mskn 0xc000ffff
+/* lower bit position of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_shift 16
+/* width of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_width 14
+/* default value of bitfield rx{b}_hi_thresh[d:0] */
+#define rpb_rxbhi_thresh_default 0x0
+
+/* rx rx{b}_lo_thresh[d:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_lo_thresh[d:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_lo_thresh_i[13:0]"
+ */
+
+/* register address for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_msk 0x00003fff
+/* inverted bitmask for bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_mskn 0xffffc000
+/* lower bit position of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_shift 0
+/* width of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_width 14
+/* default value of bitfield rx{b}_lo_thresh[d:0] */
+#define rpb_rxblo_thresh_default 0x0
+
+/* rx rx_fc_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx_fc_mode[1:0]".
+ * port="pif_rpb_rx_fc_mode_i[1:0]"
+ */
+
+/* register address for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_adr 0x00005700
+/* bitmask for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_msk 0x00000030
+/* inverted bitmask for bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_mskn 0xffffffcf
+/* lower bit position of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_shift 4
+/* width of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_width 2
+/* default value of bitfield rx_fc_mode[1:0] */
+#define rpb_rx_fc_mode_default 0x0
+
+/* rx rx{b}_buf_size[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_buf_size[8:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx0_buf_size_i[8:0]"
+ */
+
+/* register address for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_adr(buffer) (0x00005710 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_msk 0x000001ff
+/* inverted bitmask for bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_mskn 0xfffffe00
+/* lower bit position of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_shift 0
+/* width of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_width 9
+/* default value of bitfield rx{b}_buf_size[8:0] */
+#define rpb_rxbbuf_size_default 0x0
+
+/* rx rx{b}_xoff_en bitfield definitions
+ * preprocessor definitions for the bitfield "rx{b}_xoff_en".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_rpb_rx_xoff_en_i[0]"
+ */
+
+/* register address for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_adr(buffer) (0x00005714 + (buffer) * 0x10)
+/* bitmask for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_msk 0x80000000
+/* inverted bitmask for bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_mskn 0x7fffffff
+/* lower bit position of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_shift 31
+/* width of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_width 1
+/* default value of bitfield rx{b}_xoff_en */
+#define rpb_rxbxoff_en_default 0x0
+
+/* rx l2_bc_thresh[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_thresh[f:0]".
+ * port="pif_rpf_l2_bc_thresh_i[15:0]"
+ */
+
+/* register address for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_adr 0x00005100
+/* bitmask for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_msk 0xffff0000
+/* inverted bitmask for bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_mskn 0x0000ffff
+/* lower bit position of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_shift 16
+/* width of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_width 16
+/* default value of bitfield l2_bc_thresh[f:0] */
+#define rpfl2bc_thresh_default 0x0
+
+/* rx l2_bc_en bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_en".
+ * port="pif_rpf_l2_bc_en_i"
+ */
+
+/* register address for bitfield l2_bc_en */
+#define rpfl2bc_en_adr 0x00005100
+/* bitmask for bitfield l2_bc_en */
+#define rpfl2bc_en_msk 0x00000001
+/* inverted bitmask for bitfield l2_bc_en */
+#define rpfl2bc_en_mskn 0xfffffffe
+/* lower bit position of bitfield l2_bc_en */
+#define rpfl2bc_en_shift 0
+/* width of bitfield l2_bc_en */
+#define rpfl2bc_en_width 1
+/* default value of bitfield l2_bc_en */
+#define rpfl2bc_en_default 0x0
+
+/* rx l2_bc_act[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_bc_act[2:0]".
+ * port="pif_rpf_l2_bc_act_i[2:0]"
+ */
+
+/* register address for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_adr 0x00005100
+/* bitmask for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_msk 0x00007000
+/* inverted bitmask for bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_mskn 0xffff8fff
+/* lower bit position of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_shift 12
+/* width of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_width 3
+/* default value of bitfield l2_bc_act[2:0] */
+#define rpfl2bc_act_default 0x0
+
+/* rx l2_mc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_mc_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 7]
+ * port="pif_rpf_l2_mc_en_i[0]"
+ */
+
+/* register address for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_adr(filter) (0x00005250 + (filter) * 0x4)
+/* bitmask for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_msk 0x80000000
+/* inverted bitmask for bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_mskn 0x7fffffff
+/* lower bit position of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_shift 31
+/* width of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_width 1
+/* default value of bitfield l2_mc_en{f} */
+#define rpfl2mc_enf_default 0x0
+
+/* rx l2_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "l2_promis_mode".
+ * port="pif_rpf_l2_promis_mode_i"
+ */
+
+/* register address for bitfield l2_promis_mode */
+#define rpfl2promis_mode_adr 0x00005100
+/* bitmask for bitfield l2_promis_mode */
+#define rpfl2promis_mode_msk 0x00000008
+/* inverted bitmask for bitfield l2_promis_mode */
+#define rpfl2promis_mode_mskn 0xfffffff7
+/* lower bit position of bitfield l2_promis_mode */
+#define rpfl2promis_mode_shift 3
+/* width of bitfield l2_promis_mode */
+#define rpfl2promis_mode_width 1
+/* default value of bitfield l2_promis_mode */
+#define rpfl2promis_mode_default 0x0
+
+/* rx l2_uc_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_act0_i[2:0]"
+ */
+
+/* register address for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_msk 0x00070000
+/* inverted bitmask for bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_mskn 0xfff8ffff
+/* lower bit position of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_shift 16
+/* width of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_width 3
+/* default value of bitfield l2_uc_act{f}[2:0] */
+#define rpfl2uc_actf_default 0x0
+
+/* rx l2_uc_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "l2_uc_en{f}".
+ * parameter: filter {f} | stride size 0x8 | range [0, 37]
+ * port="pif_rpf_l2_uc_en_i[0]"
+ */
+
+/* register address for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_msk 0x80000000
+/* inverted bitmask for bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_mskn 0x7fffffff
+/* lower bit position of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_shift 31
+/* width of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_width 1
+/* default value of bitfield l2_uc_en{f} */
+#define rpfl2uc_enf_default 0x0
+
+/* register address for bitfield l2_uc_da{f}_lsw[1f:0] */
+#define rpfl2uc_daflsw_adr(filter) (0x00005110 + (filter) * 0x8)
+/* register address for bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_adr(filter) (0x00005114 + (filter) * 0x8)
+/* bitmask for bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_msk 0x0000ffff
+/* lower bit position of bitfield l2_uc_da{f}_msw[f:0] */
+#define rpfl2uc_dafmsw_shift 0
+
+/* rx l2_mc_accept_all bitfield definitions
+ * Preprocessor definitions for the bitfield "l2_mc_accept_all".
+ * PORT="pif_rpf_l2_mc_all_accept_i"
+ */
+
+/* Register address for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_adr 0x00005270
+/* Bitmask for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_msk 0x00004000
+/* Inverted bitmask for bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_mskn 0xFFFFBFFF
+/* Lower bit position of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_shift 14
+/* Width of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_width 1
+/* Default value of bitfield l2_mc_accept_all */
+#define rpfl2mc_accept_all_default 0x0
+
+/* width of bitfield rx_tc_up{t}[2:0] */
+#define rpf_rpb_rx_tc_upt_width 3
+/* default value of bitfield rx_tc_up{t}[2:0] */
+#define rpf_rpb_rx_tc_upt_default 0x0
+
+/* rx rss_key_addr[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_addr[4:0]".
+ * port="pif_rpf_rss_key_addr_i[4:0]"
+ */
+
+/* register address for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_adr 0x000054d0
+/* bitmask for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_msk 0x0000001f
+/* inverted bitmask for bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_mskn 0xffffffe0
+/* lower bit position of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_shift 0
+/* width of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_width 5
+/* default value of bitfield rss_key_addr[4:0] */
+#define rpf_rss_key_addr_default 0x0
+
+/* rx rss_key_wr_data[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_data[1f:0]".
+ * port="pif_rpf_rss_key_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_adr 0x000054d4
+/* bitmask for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_msk 0xffffffff
+/* inverted bitmask for bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_mskn 0x00000000
+/* lower bit position of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_shift 0
+/* width of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_width 32
+/* default value of bitfield rss_key_wr_data[1f:0] */
+#define rpf_rss_key_wr_data_default 0x0
+
+/* rx rss_key_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_key_wr_en_i".
+ * port="pif_rpf_rss_key_wr_en_i"
+ */
+
+/* register address for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_adr 0x000054d0
+/* bitmask for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_msk 0x00000020
+/* inverted bitmask for bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_mskn 0xffffffdf
+/* lower bit position of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_shift 5
+/* width of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_width 1
+/* default value of bitfield rss_key_wr_en_i */
+#define rpf_rss_key_wr_eni_default 0x0
+
+/* rx rss_redir_addr[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_addr[3:0]".
+ * port="pif_rpf_rss_redir_addr_i[3:0]"
+ */
+
+/* register address for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_adr 0x000054e0
+/* bitmask for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_msk 0x0000000f
+/* inverted bitmask for bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_mskn 0xfffffff0
+/* lower bit position of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_shift 0
+/* width of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_width 4
+/* default value of bitfield rss_redir_addr[3:0] */
+#define rpf_rss_redir_addr_default 0x0
+
+/* rx rss_redir_wr_data[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_data[f:0]".
+ * port="pif_rpf_rss_redir_wr_data_i[15:0]"
+ */
+
+/* register address for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_adr 0x000054e4
+/* bitmask for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_msk 0x0000ffff
+/* inverted bitmask for bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_mskn 0xffff0000
+/* lower bit position of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_shift 0
+/* width of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_width 16
+/* default value of bitfield rss_redir_wr_data[f:0] */
+#define rpf_rss_redir_wr_data_default 0x0
+
+/* rx rss_redir_wr_en_i bitfield definitions
+ * preprocessor definitions for the bitfield "rss_redir_wr_en_i".
+ * port="pif_rpf_rss_redir_wr_en_i"
+ */
+
+/* register address for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_adr 0x000054e0
+/* bitmask for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_msk 0x00000010
+/* inverted bitmask for bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_mskn 0xffffffef
+/* lower bit position of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_shift 4
+/* width of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_width 1
+/* default value of bitfield rss_redir_wr_en_i */
+#define rpf_rss_redir_wr_eni_default 0x0
+
+/* rx tpo_rpf_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "tpo_rpf_sys_loopback".
+ * port="pif_rpf_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_adr 0x00005000
+/* bitmask for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_msk 0x00000100
+/* inverted bitmask for bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_mskn 0xfffffeff
+/* lower bit position of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_shift 8
+/* width of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_width 1
+/* default value of bitfield tpo_rpf_sys_loopback */
+#define rpf_tpo_rpf_sys_lbk_default 0x0
+
+/* rx vl_inner_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_inner_tpid[f:0]".
+ * port="pif_rpf_vl_inner_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_adr 0x00005284
+/* bitmask for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_msk 0x0000ffff
+/* inverted bitmask for bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_mskn 0xffff0000
+/* lower bit position of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_shift 0
+/* width of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_width 16
+/* default value of bitfield vl_inner_tpid[f:0] */
+#define rpf_vl_inner_tpid_default 0x8100
+
+/* rx vl_outer_tpid[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "vl_outer_tpid[f:0]".
+ * port="pif_rpf_vl_outer_tpid_i[15:0]"
+ */
+
+/* register address for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_adr 0x00005284
+/* bitmask for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_msk 0xffff0000
+/* inverted bitmask for bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_mskn 0x0000ffff
+/* lower bit position of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_shift 16
+/* width of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_width 16
+/* default value of bitfield vl_outer_tpid[f:0] */
+#define rpf_vl_outer_tpid_default 0x88a8
+
+/* rx vl_promis_mode bitfield definitions
+ * preprocessor definitions for the bitfield "vl_promis_mode".
+ * port="pif_rpf_vl_promis_mode_i"
+ */
+
+/* register address for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_adr 0x00005280
+/* bitmask for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_msk 0x00000002
+/* inverted bitmask for bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_mskn 0xfffffffd
+/* lower bit position of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_shift 1
+/* width of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_width 1
+/* default value of bitfield vl_promis_mode */
+#define rpf_vl_promis_mode_default 0x0
+
+/* RX vl_accept_untagged_mode Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_accept_untagged_mode".
+ * PORT="pif_rpf_vl_accept_untagged_i"
+ */
+
+/* Register address for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_adr 0x00005280
+/* Bitmask for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_msk 0x00000004
+/* Inverted bitmask for bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_mskn 0xFFFFFFFB
+/* Lower bit position of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_shift 2
+/* Width of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_width 1
+/* Default value of bitfield vl_accept_untagged_mode */
+#define rpf_vl_accept_untagged_mode_default 0x0
+
+/* rX vl_untagged_act[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_untagged_act[2:0]".
+ * PORT="pif_rpf_vl_untagged_act_i[2:0]"
+ */
+
+/* Register address for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_adr 0x00005280
+/* Bitmask for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_msk 0x00000038
+/* Inverted bitmask for bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_mskn 0xFFFFFFC7
+/* Lower bit position of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_shift 3
+/* Width of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_width 3
+/* Default value of bitfield vl_untagged_act[2:0] */
+#define rpf_vl_untagged_act_default 0x0
+
+/* RX vl_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_en_i[0]"
+ */
+
+/* Register address for bitfield vl_en{F} */
+#define rpf_vl_en_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_en{F} */
+#define rpf_vl_en_f_msk 0x80000000
+/* Inverted bitmask for bitfield vl_en{F} */
+#define rpf_vl_en_f_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield vl_en{F} */
+#define rpf_vl_en_f_shift 31
+/* Width of bitfield vl_en{F} */
+#define rpf_vl_en_f_width 1
+/* Default value of bitfield vl_en{F} */
+#define rpf_vl_en_f_default 0x0
+
+/* RX vl_act{F}[2:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_act{F}[2:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_act0_i[2:0]"
+ */
+
+/* Register address for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_msk 0x00070000
+/* Inverted bitmask for bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_mskn 0xFFF8FFFF
+/* Lower bit position of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_shift 16
+/* Width of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_width 3
+/* Default value of bitfield vl_act{F}[2:0] */
+#define rpf_vl_act_f_default 0x0
+
+/* RX vl_id{F}[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "vl_id{F}[B:0]".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_vl_id0_i[11:0]"
+ */
+
+/* Register address for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_adr(filter) (0x00005290 + (filter) * 0x4)
+/* Bitmask for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_msk 0x00000FFF
+/* Inverted bitmask for bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_mskn 0xFFFFF000
+/* Lower bit position of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_shift 0
+/* Width of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_width 12
+/* Default value of bitfield vl_id{F}[B:0] */
+#define rpf_vl_id_f_default 0x0
+
+/* RX et_en{F} Bitfield Definitions
+ * Preprocessor definitions for the bitfield "et_en{F}".
+ * Parameter: filter {F} | stride size 0x4 | range [0, 15]
+ * PORT="pif_rpf_et_en_i[0]"
+ */
+
+/* Register address for bitfield et_en{F} */
+#define rpf_et_en_f_adr(filter) (0x00005300 + (filter) * 0x4)
+/* Bitmask for bitfield et_en{F} */
+#define rpf_et_en_f_msk 0x80000000
+/* Inverted bitmask for bitfield et_en{F} */
+#define rpf_et_en_f_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield et_en{F} */
+#define rpf_et_en_f_shift 31
+/* Width of bitfield et_en{F} */
+#define rpf_et_en_f_width 1
+/* Default value of bitfield et_en{F} */
+#define rpf_et_en_f_default 0x0
+
+/* rx et_en{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_en{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_en_i[0]"
+ */
+
+/* register address for bitfield et_en{f} */
+#define rpf_et_enf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_en{f} */
+#define rpf_et_enf_msk 0x80000000
+/* inverted bitmask for bitfield et_en{f} */
+#define rpf_et_enf_mskn 0x7fffffff
+/* lower bit position of bitfield et_en{f} */
+#define rpf_et_enf_shift 31
+/* width of bitfield et_en{f} */
+#define rpf_et_enf_width 1
+/* default value of bitfield et_en{f} */
+#define rpf_et_enf_default 0x0
+
+/* rx et_up{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up_en_i[0]"
+ */
+
+/* register address for bitfield et_up{f}_en */
+#define rpf_et_upfen_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}_en */
+#define rpf_et_upfen_msk 0x40000000
+/* inverted bitmask for bitfield et_up{f}_en */
+#define rpf_et_upfen_mskn 0xbfffffff
+/* lower bit position of bitfield et_up{f}_en */
+#define rpf_et_upfen_shift 30
+/* width of bitfield et_up{f}_en */
+#define rpf_et_upfen_width 1
+/* default value of bitfield et_up{f}_en */
+#define rpf_et_upfen_default 0x0
+
+/* rx et_rxq{f}_en bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}_en".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq_en_i[0]"
+ */
+
+/* register address for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_msk 0x20000000
+/* inverted bitmask for bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_mskn 0xdfffffff
+/* lower bit position of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_shift 29
+/* width of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_width 1
+/* default value of bitfield et_rxq{f}_en */
+#define rpf_et_rxqfen_default 0x0
+
+/* rx et_up{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_up{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_up0_i[2:0]"
+ */
+
+/* register address for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_msk 0x1c000000
+/* inverted bitmask for bitfield et_up{f}[2:0] */
+#define rpf_et_upf_mskn 0xe3ffffff
+/* lower bit position of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_shift 26
+/* width of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_width 3
+/* default value of bitfield et_up{f}[2:0] */
+#define rpf_et_upf_default 0x0
+
+/* rx et_rxq{f}[4:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_rxq{f}[4:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_rxq0_i[4:0]"
+ */
+
+/* register address for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_msk 0x01f00000
+/* inverted bitmask for bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_mskn 0xfe0fffff
+/* lower bit position of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_shift 20
+/* width of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_width 5
+/* default value of bitfield et_rxq{f}[4:0] */
+#define rpf_et_rxqf_default 0x0
+
+/* rx et_mng_rxq{f} bitfield definitions
+ * preprocessor definitions for the bitfield "et_mng_rxq{f}".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_mng_rxq_i[0]"
+ */
+
+/* register address for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_msk 0x00080000
+/* inverted bitmask for bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_mskn 0xfff7ffff
+/* lower bit position of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_shift 19
+/* width of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_width 1
+/* default value of bitfield et_mng_rxq{f} */
+#define rpf_et_mng_rxqf_default 0x0
+
+/* rx et_act{f}[2:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_act{f}[2:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_act0_i[2:0]"
+ */
+
+/* register address for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_msk 0x00070000
+/* inverted bitmask for bitfield et_act{f}[2:0] */
+#define rpf_et_actf_mskn 0xfff8ffff
+/* lower bit position of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_shift 16
+/* width of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_width 3
+/* default value of bitfield et_act{f}[2:0] */
+#define rpf_et_actf_default 0x0
+
+/* rx et_val{f}[f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "et_val{f}[f:0]".
+ * parameter: filter {f} | stride size 0x4 | range [0, 15]
+ * port="pif_rpf_et_val0_i[15:0]"
+ */
+
+/* register address for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_adr(filter) (0x00005300 + (filter) * 0x4)
+/* bitmask for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_msk 0x0000ffff
+/* inverted bitmask for bitfield et_val{f}[f:0] */
+#define rpf_et_valf_mskn 0xffff0000
+/* lower bit position of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_shift 0
+/* width of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_width 16
+/* default value of bitfield et_val{f}[f:0] */
+#define rpf_et_valf_default 0x0
+
+/* rx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_rpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_adr 0x00005580
+/* bitmask for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_msk 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_mskn 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_shift 1
+/* width of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_width 1
+/* default value of bitfield ipv4_chk_en */
+#define rpo_ipv4chk_en_default 0x0
+
+/* rx desc{d}_vl_strip bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_vl_strip".
+ * parameter: descriptor {d} | stride size 0x20 | range [0, 31]
+ * port="pif_rpo_desc_vl_strip_i[0]"
+ */
+
+/* register address for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_adr(descriptor) (0x00005b08 + (descriptor) * 0x20)
+/* bitmask for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_msk 0x20000000
+/* inverted bitmask for bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_mskn 0xdfffffff
+/* lower bit position of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_shift 29
+/* width of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_width 1
+/* default value of bitfield desc{d}_vl_strip */
+#define rpo_descdvl_strip_default 0x0
+
+/* rx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_rpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define rpol4chk_en_adr 0x00005580
+/* bitmask for bitfield l4_chk_en */
+#define rpol4chk_en_msk 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define rpol4chk_en_mskn 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define rpol4chk_en_shift 0
+/* width of bitfield l4_chk_en */
+#define rpol4chk_en_width 1
+/* default value of bitfield l4_chk_en */
+#define rpol4chk_en_default 0x0
+
+/* rx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_rx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_adr 0x00005000
+/* bitmask for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define rx_reg_res_dsbl_default 0x1
+
+/* tx dca{d}_cpuid[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_cpuid[7:0]".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* register address for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_msk 0x000000ff
+/* inverted bitmask for bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_mskn 0xffffff00
+/* lower bit position of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_shift 0
+/* width of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_width 8
+/* default value of bitfield dca{d}_cpuid[7:0] */
+#define tdm_dcadcpuid_default 0x0
+
+/* tx lso_en[1f:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_en[1f:0]".
+ * port="pif_tdm_lso_en_i[31:0]"
+ */
+
+/* register address for bitfield lso_en[1f:0] */
+#define tdm_lso_en_adr 0x00007810
+/* bitmask for bitfield lso_en[1f:0] */
+#define tdm_lso_en_msk 0xffffffff
+/* inverted bitmask for bitfield lso_en[1f:0] */
+#define tdm_lso_en_mskn 0x00000000
+/* lower bit position of bitfield lso_en[1f:0] */
+#define tdm_lso_en_shift 0
+/* width of bitfield lso_en[1f:0] */
+#define tdm_lso_en_width 32
+/* default value of bitfield lso_en[1f:0] */
+#define tdm_lso_en_default 0x0
+
+/* tx dca_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca_en".
+ * port="pif_tdm_dca_en_i"
+ */
+
+/* register address for bitfield dca_en */
+#define tdm_dca_en_adr 0x00008480
+/* bitmask for bitfield dca_en */
+#define tdm_dca_en_msk 0x80000000
+/* inverted bitmask for bitfield dca_en */
+#define tdm_dca_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca_en */
+#define tdm_dca_en_shift 31
+/* width of bitfield dca_en */
+#define tdm_dca_en_width 1
+/* default value of bitfield dca_en */
+#define tdm_dca_en_default 0x1
+
+/* tx dca_mode[3:0] bitfield definitions
+ * preprocessor definitions for the bitfield "dca_mode[3:0]".
+ * port="pif_tdm_dca_mode_i[3:0]"
+ */
+
+/* register address for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_adr 0x00008480
+/* bitmask for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_msk 0x0000000f
+/* inverted bitmask for bitfield dca_mode[3:0] */
+#define tdm_dca_mode_mskn 0xfffffff0
+/* lower bit position of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_shift 0
+/* width of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_width 4
+/* default value of bitfield dca_mode[3:0] */
+#define tdm_dca_mode_default 0x0
+
+/* tx dca{d}_desc_en bitfield definitions
+ * preprocessor definitions for the bitfield "dca{d}_desc_en".
+ * parameter: dca {d} | stride size 0x4 | range [0, 31]
+ * port="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* register address for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+/* bitmask for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_msk 0x80000000
+/* inverted bitmask for bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_mskn 0x7fffffff
+/* lower bit position of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_shift 31
+/* width of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_width 1
+/* default value of bitfield dca{d}_desc_en */
+#define tdm_dcaddesc_en_default 0x0
+
+/* tx desc{d}_en bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_en".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc_en_i[0]"
+ */
+
+/* register address for bitfield desc{d}_en */
+#define tdm_descden_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_en */
+#define tdm_descden_msk 0x80000000
+/* inverted bitmask for bitfield desc{d}_en */
+#define tdm_descden_mskn 0x7fffffff
+/* lower bit position of bitfield desc{d}_en */
+#define tdm_descden_shift 31
+/* width of bitfield desc{d}_en */
+#define tdm_descden_width 1
+/* default value of bitfield desc{d}_en */
+#define tdm_descden_default 0x0
+
+/* tx desc{d}_hd[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_hd[c:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* register address for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_adr(descriptor) (0x00007c0c + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_msk 0x00001fff
+/* inverted bitmask for bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_mskn 0xffffe000
+/* lower bit position of bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_shift 0
+/* width of bitfield desc{d}_hd[c:0] */
+#define tdm_descdhd_width 13
+
+/* tx desc{d}_len[9:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_len[9:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* register address for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_adr(descriptor) (0x00007c08 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_msk 0x00001ff8
+/* inverted bitmask for bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_mskn 0xffffe007
+/* lower bit position of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_shift 3
+/* width of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_width 10
+/* default value of bitfield desc{d}_len[9:0] */
+#define tdm_descdlen_default 0x0
+
+/* tx int_desc_wrb_en bitfield definitions
+ * preprocessor definitions for the bitfield "int_desc_wrb_en".
+ * port="pif_tdm_int_desc_wrb_en_i"
+ */
+
+/* register address for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_adr 0x00007b40
+/* bitmask for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_msk 0x00000002
+/* inverted bitmask for bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_mskn 0xfffffffd
+/* lower bit position of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_shift 1
+/* width of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_width 1
+/* default value of bitfield int_desc_wrb_en */
+#define tdm_int_desc_wrb_en_default 0x0
+
+/* tx desc{d}_wrb_thresh[6:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc{d}_wrb_thresh[6:0]".
+ * parameter: descriptor {d} | stride size 0x40 | range [0, 31]
+ * port="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* register address for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_adr(descriptor) (0x00007c18 + (descriptor) * 0x40)
+/* bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_msk 0x00007f00
+/* inverted bitmask for bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_mskn 0xffff80ff
+/* lower bit position of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_shift 8
+/* width of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_width 7
+/* default value of bitfield desc{d}_wrb_thresh[6:0] */
+#define tdm_descdwrb_thresh_default 0x0
+
+/* tx lso_tcp_flag_first[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_first[b:0]".
+ * port="pif_thm_lso_tcp_flag_first_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_adr 0x00007820
+/* bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_msk 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_mskn 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_shift 0
+/* width of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_width 12
+/* default value of bitfield lso_tcp_flag_first[b:0] */
+#define thm_lso_tcp_flag_first_default 0x0
+
+/* tx lso_tcp_flag_last[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_last[b:0]".
+ * port="pif_thm_lso_tcp_flag_last_i[11:0]"
+ */
+
+/* register address for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_adr 0x00007824
+/* bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_msk 0x00000fff
+/* inverted bitmask for bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_mskn 0xfffff000
+/* lower bit position of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_shift 0
+/* width of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_width 12
+/* default value of bitfield lso_tcp_flag_last[b:0] */
+#define thm_lso_tcp_flag_last_default 0x0
+
+/* tx lso_tcp_flag_mid[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "lso_tcp_flag_mid[b:0]".
+ * port="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+
+/* Register address for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_adr 0x00005598
+/* Bitmask for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_msk 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_mskn 0x00000000
+/* Lower bit position of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_shift 0
+/* Width of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_width 32
+/* Default value of bitfield lro_rsc_max[1F:0] */
+#define rpo_lro_rsc_max_default 0x0
+
+/* RX lro_en[1F:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_en[1F:0]".
+ * PORT="pif_rpo_lro_en_i[31:0]"
+ */
+
+/* Register address for bitfield lro_en[1F:0] */
+#define rpo_lro_en_adr 0x00005590
+/* Bitmask for bitfield lro_en[1F:0] */
+#define rpo_lro_en_msk 0xFFFFFFFF
+/* Inverted bitmask for bitfield lro_en[1F:0] */
+#define rpo_lro_en_mskn 0x00000000
+/* Lower bit position of bitfield lro_en[1F:0] */
+#define rpo_lro_en_shift 0
+/* Width of bitfield lro_en[1F:0] */
+#define rpo_lro_en_width 32
+/* Default value of bitfield lro_en[1F:0] */
+#define rpo_lro_en_default 0x0
+
+/* RX lro_ptopt_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ptopt_en".
+ * PORT="pif_rpo_lro_ptopt_en_i"
+ */
+
+/* Register address for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_adr 0x00005594
+/* Bitmask for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_msk 0x00008000
+/* Inverted bitmask for bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_mskn 0xFFFF7FFF
+/* Lower bit position of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_shift 15
+/* Width of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_width 1
+/* Default value of bitfield lro_ptopt_en */
+#define rpo_lro_ptopt_en_defalt 0x1
+
+/* RX lro_q_ses_lmt Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_q_ses_lmt".
+ * PORT="pif_rpo_lro_q_ses_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_adr 0x00005594
+/* Bitmask for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_msk 0x00003000
+/* Inverted bitmask for bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_mskn 0xFFFFCFFF
+/* Lower bit position of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_shift 12
+/* Width of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_width 2
+/* Default value of bitfield lro_q_ses_lmt */
+#define rpo_lro_qses_lmt_default 0x1
+
+/* RX lro_tot_dsc_lmt[1:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tot_dsc_lmt[1:0]".
+ * PORT="pif_rpo_lro_tot_dsc_lmt_i[1:0]"
+ */
+
+/* Register address for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_adr 0x00005594
+/* Bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_msk 0x00000060
+/* Inverted bitmask for bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_mskn 0xFFFFFF9F
+/* Lower bit position of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_shift 5
+/* Width of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_width 2
+/* Default value of bitfield lro_tot_dsc_lmt[1:0] */
+#define rpo_lro_tot_dsc_lmt_defalt 0x1
+
+/* RX lro_pkt_min[4:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_pkt_min[4:0]".
+ * PORT="pif_rpo_lro_pkt_min_i[4:0]"
+ */
+
+/* Register address for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_adr 0x00005594
+/* Bitmask for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_msk 0x0000001F
+/* Inverted bitmask for bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_mskn 0xFFFFFFE0
+/* Lower bit position of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_shift 0
+/* Width of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_width 5
+/* Default value of bitfield lro_pkt_min[4:0] */
+#define rpo_lro_pkt_min_default 0x8
+
+/* Width of bitfield lro{L}_des_max[1:0] */
+#define rpo_lro_ldes_max_width 2
+/* Default value of bitfield lro{L}_des_max[1:0] */
+#define rpo_lro_ldes_max_default 0x0
+
+/* RX lro_tb_div[11:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_tb_div[11:0]".
+ * PORT="pif_rpo_lro_tb_div_i[11:0]"
+ */
+
+/* Register address for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_adr 0x00005620
+/* Bitmask for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_msk 0xFFF00000
+/* Inverted bitmask for bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_mskn 0x000FFFFF
+/* Lower bit position of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_shift 20
+/* Width of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_width 12
+/* Default value of bitfield lro_tb_div[11:0] */
+#define rpo_lro_tb_div_default 0xC35
+
+/* RX lro_ina_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_ina_ival[9:0]".
+ * PORT="pif_rpo_lro_ina_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_adr 0x00005620
+/* Bitmask for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_msk 0x000FFC00
+/* Inverted bitmask for bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_mskn 0xFFF003FF
+/* Lower bit position of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_shift 10
+/* Width of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_width 10
+/* Default value of bitfield lro_ina_ival[9:0] */
+#define rpo_lro_ina_ival_default 0xA
+
+/* RX lro_max_ival[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lro_max_ival[9:0]".
+ * PORT="pif_rpo_lro_max_ival_i[9:0]"
+ */
+
+/* Register address for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_adr 0x00005620
+/* Bitmask for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_msk 0x000003FF
+/* Inverted bitmask for bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_mskn 0xFFFFFC00
+/* Lower bit position of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_shift 0
+/* Width of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_width 10
+/* Default value of bitfield lro_max_ival[9:0] */
+#define rpo_lro_max_ival_default 0x19
+
+/* TX dca{D}_cpuid[7:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_cpuid[7:0]".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca0_cpuid_i[7:0]"
+ */
+
+/* Register address for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_adr(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_msk 0x000000FF
+/* Inverted bitmask for bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_mskn 0xFFFFFF00
+/* Lower bit position of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_shift 0
+/* Width of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_width 8
+/* Default value of bitfield dca{D}_cpuid[7:0] */
+#define tdm_dca_dcpuid_default 0x0
+
+/* TX dca{D}_desc_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "dca{D}_desc_en".
+ * Parameter: DCA {D} | stride size 0x4 | range [0, 31]
+ * PORT="pif_tdm_dca_desc_en_i[0]"
+ */
+
+/* Register address for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_adr(dca) (0x00008400 + (dca) * 0x4)
+/* Bitmask for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_msk 0x80000000
+/* Inverted bitmask for bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_shift 31
+/* Width of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_width 1
+/* Default value of bitfield dca{D}_desc_en */
+#define tdm_dca_ddesc_en_default 0x0
+
+/* TX desc{D}_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_en".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc_en_i[0]"
+ */
+
+/* Register address for bitfield desc{D}_en */
+#define tdm_desc_den_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_en */
+#define tdm_desc_den_msk 0x80000000
+/* Inverted bitmask for bitfield desc{D}_en */
+#define tdm_desc_den_mskn 0x7FFFFFFF
+/* Lower bit position of bitfield desc{D}_en */
+#define tdm_desc_den_shift 31
+/* Width of bitfield desc{D}_en */
+#define tdm_desc_den_width 1
+/* Default value of bitfield desc{D}_en */
+#define tdm_desc_den_default 0x0
+
+/* TX desc{D}_hd[C:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_hd[C:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="tdm_pif_desc0_hd_o[12:0]"
+ */
+
+/* Register address for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_adr(descriptor) (0x00007C0C + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_msk 0x00001FFF
+/* Inverted bitmask for bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_mskn 0xFFFFE000
+/* Lower bit position of bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_shift 0
+/* Width of bitfield desc{D}_hd[C:0] */
+#define tdm_desc_dhd_width 13
+
+/* TX desc{D}_len[9:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_len[9:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_len_i[9:0]"
+ */
+
+/* Register address for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_adr(descriptor) (0x00007C08 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_msk 0x00001FF8
+/* Inverted bitmask for bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_mskn 0xFFFFE007
+/* Lower bit position of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_shift 3
+/* Width of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_width 10
+/* Default value of bitfield desc{D}_len[9:0] */
+#define tdm_desc_dlen_default 0x0
+
+/* TX desc{D}_wrb_thresh[6:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "desc{D}_wrb_thresh[6:0]".
+ * Parameter: descriptor {D} | stride size 0x40 | range [0, 31]
+ * PORT="pif_tdm_desc0_wrb_thresh_i[6:0]"
+ */
+
+/* Register address for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_adr(descriptor) \
+ (0x00007C18 + (descriptor) * 0x40)
+/* Bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_msk 0x00007F00
+/* Inverted bitmask for bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_mskn 0xFFFF80FF
+/* Lower bit position of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_shift 8
+/* Width of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_width 7
+/* Default value of bitfield desc{D}_wrb_thresh[6:0] */
+#define tdm_desc_dwrb_thresh_default 0x0
+
+/* TX tdm_int_mod_en Bitfield Definitions
+ * Preprocessor definitions for the bitfield "tdm_int_mod_en".
+ * PORT="pif_tdm_int_mod_en_i"
+ */
+
+/* Register address for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_adr 0x00007B40
+/* Bitmask for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_msk 0x00000010
+/* Inverted bitmask for bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_mskn 0xFFFFFFEF
+/* Lower bit position of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_shift 4
+/* Width of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_width 1
+/* Default value of bitfield tdm_int_mod_en */
+#define tdm_int_mod_en_default 0x0
+
+/* TX lso_tcp_flag_mid[B:0] Bitfield Definitions
+ * Preprocessor definitions for the bitfield "lso_tcp_flag_mid[B:0]".
+ * PORT="pif_thm_lso_tcp_flag_mid_i[11:0]"
+ */
+/* register address for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_adr 0x00007820
+/* bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_msk 0x0fff0000
+/* inverted bitmask for bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_mskn 0xf000ffff
+/* lower bit position of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_shift 16
+/* width of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_width 12
+/* default value of bitfield lso_tcp_flag_mid[b:0] */
+#define thm_lso_tcp_flag_mid_default 0x0
+
+/* tx tx_buf_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_buf_en".
+ * port="pif_tpb_tx_buf_en_i"
+ */
+
+/* register address for bitfield tx_buf_en */
+#define tpb_tx_buf_en_adr 0x00007900
+/* bitmask for bitfield tx_buf_en */
+#define tpb_tx_buf_en_msk 0x00000001
+/* inverted bitmask for bitfield tx_buf_en */
+#define tpb_tx_buf_en_mskn 0xfffffffe
+/* lower bit position of bitfield tx_buf_en */
+#define tpb_tx_buf_en_shift 0
+/* width of bitfield tx_buf_en */
+#define tpb_tx_buf_en_width 1
+/* default value of bitfield tx_buf_en */
+#define tpb_tx_buf_en_default 0x0
+
+/* tx tx{b}_hi_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_hi_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_hi_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_msk 0x1fff0000
+/* inverted bitmask for bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_mskn 0xe000ffff
+/* lower bit position of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_shift 16
+/* width of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_width 13
+/* default value of bitfield tx{b}_hi_thresh[c:0] */
+#define tpb_txbhi_thresh_default 0x0
+
+/* tx tx{b}_lo_thresh[c:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_lo_thresh[c:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_lo_thresh_i[12:0]"
+ */
+
+/* register address for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_adr(buffer) (0x00007914 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_msk 0x00001fff
+/* inverted bitmask for bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_mskn 0xffffe000
+/* lower bit position of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_shift 0
+/* width of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_width 13
+/* default value of bitfield tx{b}_lo_thresh[c:0] */
+#define tpb_txblo_thresh_default 0x0
+
+/* tx dma_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "dma_sys_loopback".
+ * port="pif_tpb_dma_sys_lbk_i"
+ */
+
+/* register address for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_adr 0x00007000
+/* bitmask for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_msk 0x00000040
+/* inverted bitmask for bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_mskn 0xffffffbf
+/* lower bit position of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_shift 6
+/* width of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_width 1
+/* default value of bitfield dma_sys_loopback */
+#define tpb_dma_sys_lbk_default 0x0
+
+/* tx tx{b}_buf_size[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "tx{b}_buf_size[7:0]".
+ * parameter: buffer {b} | stride size 0x10 | range [0, 7]
+ * port="pif_tpb_tx0_buf_size_i[7:0]"
+ */
+
+/* register address for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_adr(buffer) (0x00007910 + (buffer) * 0x10)
+/* bitmask for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_msk 0x000000ff
+/* inverted bitmask for bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_mskn 0xffffff00
+/* lower bit position of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_shift 0
+/* width of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_width 8
+/* default value of bitfield tx{b}_buf_size[7:0] */
+#define tpb_txbbuf_size_default 0x0
+
+/* tx tx_scp_ins_en bitfield definitions
+ * preprocessor definitions for the bitfield "tx_scp_ins_en".
+ * port="pif_tpb_scp_ins_en_i"
+ */
+
+/* register address for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_adr 0x00007900
+/* bitmask for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_msk 0x00000004
+/* inverted bitmask for bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_mskn 0xfffffffb
+/* lower bit position of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_shift 2
+/* width of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_width 1
+/* default value of bitfield tx_scp_ins_en */
+#define tpb_tx_scp_ins_en_default 0x0
+
+/* tx ipv4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "ipv4_chk_en".
+ * port="pif_tpo_ipv4_chk_en_i"
+ */
+
+/* register address for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_adr 0x00007800
+/* bitmask for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_msk 0x00000002
+/* inverted bitmask for bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_mskn 0xfffffffd
+/* lower bit position of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_shift 1
+/* width of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_width 1
+/* default value of bitfield ipv4_chk_en */
+#define tpo_ipv4chk_en_default 0x0
+
+/* tx l4_chk_en bitfield definitions
+ * preprocessor definitions for the bitfield "l4_chk_en".
+ * port="pif_tpo_l4_chk_en_i"
+ */
+
+/* register address for bitfield l4_chk_en */
+#define tpol4chk_en_adr 0x00007800
+/* bitmask for bitfield l4_chk_en */
+#define tpol4chk_en_msk 0x00000001
+/* inverted bitmask for bitfield l4_chk_en */
+#define tpol4chk_en_mskn 0xfffffffe
+/* lower bit position of bitfield l4_chk_en */
+#define tpol4chk_en_shift 0
+/* width of bitfield l4_chk_en */
+#define tpol4chk_en_width 1
+/* default value of bitfield l4_chk_en */
+#define tpol4chk_en_default 0x0
+
+/* tx pkt_sys_loopback bitfield definitions
+ * preprocessor definitions for the bitfield "pkt_sys_loopback".
+ * port="pif_tpo_pkt_sys_lbk_i"
+ */
+
+/* register address for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_adr 0x00007000
+/* bitmask for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_msk 0x00000080
+/* inverted bitmask for bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_mskn 0xffffff7f
+/* lower bit position of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_shift 7
+/* width of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_width 1
+/* default value of bitfield pkt_sys_loopback */
+#define tpo_pkt_sys_lbk_default 0x0
+
+/* tx data_tc_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc_arb_mode".
+ * port="pif_tps_data_tc_arb_mode_i"
+ */
+
+/* register address for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_adr 0x00007100
+/* bitmask for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_msk 0x00000001
+/* inverted bitmask for bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_mskn 0xfffffffe
+/* lower bit position of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_shift 0
+/* width of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_width 1
+/* default value of bitfield data_tc_arb_mode */
+#define tps_data_tc_arb_mode_default 0x0
+
+/* tx desc_rate_ta_rst bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_ta_rst".
+ * port="pif_tps_desc_rate_ta_rst_i"
+ */
+
+/* register address for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_adr 0x00007310
+/* bitmask for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_msk 0x80000000
+/* inverted bitmask for bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_mskn 0x7fffffff
+/* lower bit position of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_shift 31
+/* width of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_width 1
+/* default value of bitfield desc_rate_ta_rst */
+#define tps_desc_rate_ta_rst_default 0x0
+
+/* tx desc_rate_limit[a:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_rate_limit[a:0]".
+ * port="pif_tps_desc_rate_lim_i[10:0]"
+ */
+
+/* register address for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_adr 0x00007310
+/* bitmask for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_msk 0x000007ff
+/* inverted bitmask for bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_mskn 0xfffff800
+/* lower bit position of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_shift 0
+/* width of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_width 11
+/* default value of bitfield desc_rate_limit[a:0] */
+#define tps_desc_rate_lim_default 0x0
+
+/* tx desc_tc_arb_mode[1:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc_arb_mode[1:0]".
+ * port="pif_tps_desc_tc_arb_mode_i[1:0]"
+ */
+
+/* register address for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_adr 0x00007200
+/* bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_msk 0x00000003
+/* inverted bitmask for bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_mskn 0xfffffffc
+/* lower bit position of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_shift 0
+/* width of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_width 2
+/* default value of bitfield desc_tc_arb_mode[1:0] */
+#define tps_desc_tc_arb_mode_default 0x0
+
+/* tx desc_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_adr(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_msk 0x0fff0000
+/* inverted bitmask for bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_mskn 0xf000ffff
+/* lower bit position of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_shift 16
+/* width of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_width 12
+/* default value of bitfield desc_tc{t}_credit_max[b:0] */
+#define tps_desc_tctcredit_max_default 0x0
+
+/* tx desc_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "desc_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_desc_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_adr(tc) (0x00007210 + (tc) * 0x4)
+/* bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_msk 0x000001ff
+/* inverted bitmask for bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_mskn 0xfffffe00
+/* lower bit position of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_shift 0
+/* width of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_width 9
+/* default value of bitfield desc_tc{t}_weight[8:0] */
+#define tps_desc_tctweight_default 0x0
+
+/* tx desc_vm_arb_mode bitfield definitions
+ * preprocessor definitions for the bitfield "desc_vm_arb_mode".
+ * port="pif_tps_desc_vm_arb_mode_i"
+ */
+
+/* register address for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_adr 0x00007300
+/* bitmask for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_msk 0x00000001
+/* inverted bitmask for bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_mskn 0xfffffffe
+/* lower bit position of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_shift 0
+/* width of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_width 1
+/* default value of bitfield desc_vm_arb_mode */
+#define tps_desc_vm_arb_mode_default 0x0
+
+/* tx data_tc{t}_credit_max[b:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_credit_max[b:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_credit_max_i[11:0]"
+ */
+
+/* register address for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_adr(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_msk 0x0fff0000
+/* inverted bitmask for bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_mskn 0xf000ffff
+/* lower bit position of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_shift 16
+/* width of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_width 12
+/* default value of bitfield data_tc{t}_credit_max[b:0] */
+#define tps_data_tctcredit_max_default 0x0
+
+/* tx data_tc{t}_weight[8:0] bitfield definitions
+ * preprocessor definitions for the bitfield "data_tc{t}_weight[8:0]".
+ * parameter: tc {t} | stride size 0x4 | range [0, 7]
+ * port="pif_tps_data_tc0_weight_i[8:0]"
+ */
+
+/* register address for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_adr(tc) (0x00007110 + (tc) * 0x4)
+/* bitmask for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_msk 0x000001ff
+/* inverted bitmask for bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_mskn 0xfffffe00
+/* lower bit position of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_shift 0
+/* width of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_width 9
+/* default value of bitfield data_tc{t}_weight[8:0] */
+#define tps_data_tctweight_default 0x0
+
+/* tx reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_tx_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_adr 0x00007000
+/* bitmask for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define tx_reg_res_dsbl_default 0x1
+
+/* mac_phy register access busy bitfield definitions
+ * preprocessor definitions for the bitfield "register access busy".
+ * port="msm_pif_reg_busy_o"
+ */
+
+/* register address for bitfield register access busy */
+#define msm_reg_access_busy_adr 0x00004400
+/* bitmask for bitfield register access busy */
+#define msm_reg_access_busy_msk 0x00001000
+/* inverted bitmask for bitfield register access busy */
+#define msm_reg_access_busy_mskn 0xffffefff
+/* lower bit position of bitfield register access busy */
+#define msm_reg_access_busy_shift 12
+/* width of bitfield register access busy */
+#define msm_reg_access_busy_width 1
+
+/* mac_phy msm register address[7:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register address[7:0]".
+ * port="pif_msm_reg_addr_i[7:0]"
+ */
+
+/* register address for bitfield msm register address[7:0] */
+#define msm_reg_addr_adr 0x00004400
+/* bitmask for bitfield msm register address[7:0] */
+#define msm_reg_addr_msk 0x000000ff
+/* inverted bitmask for bitfield msm register address[7:0] */
+#define msm_reg_addr_mskn 0xffffff00
+/* lower bit position of bitfield msm register address[7:0] */
+#define msm_reg_addr_shift 0
+/* width of bitfield msm register address[7:0] */
+#define msm_reg_addr_width 8
+/* default value of bitfield msm register address[7:0] */
+#define msm_reg_addr_default 0x0
+
+/* mac_phy register read strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register read strobe".
+ * port="pif_msm_reg_rden_i"
+ */
+
+/* register address for bitfield register read strobe */
+#define msm_reg_rd_strobe_adr 0x00004400
+/* bitmask for bitfield register read strobe */
+#define msm_reg_rd_strobe_msk 0x00000200
+/* inverted bitmask for bitfield register read strobe */
+#define msm_reg_rd_strobe_mskn 0xfffffdff
+/* lower bit position of bitfield register read strobe */
+#define msm_reg_rd_strobe_shift 9
+/* width of bitfield register read strobe */
+#define msm_reg_rd_strobe_width 1
+/* default value of bitfield register read strobe */
+#define msm_reg_rd_strobe_default 0x0
+
+/* mac_phy msm register read data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register read data[31:0]".
+ * port="msm_pif_reg_rd_data_o[31:0]"
+ */
+
+/* register address for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_adr 0x00004408
+/* bitmask for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_msk 0xffffffff
+/* inverted bitmask for bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_mskn 0x00000000
+/* lower bit position of bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_shift 0
+/* width of bitfield msm register read data[31:0] */
+#define msm_reg_rd_data_width 32
+
+/* mac_phy msm register write data[31:0] bitfield definitions
+ * preprocessor definitions for the bitfield "msm register write data[31:0]".
+ * port="pif_msm_reg_wr_data_i[31:0]"
+ */
+
+/* register address for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_adr 0x00004404
+/* bitmask for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_msk 0xffffffff
+/* inverted bitmask for bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_mskn 0x00000000
+/* lower bit position of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_shift 0
+/* width of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_width 32
+/* default value of bitfield msm register write data[31:0] */
+#define msm_reg_wr_data_default 0x0
+
+/* mac_phy register write strobe bitfield definitions
+ * preprocessor definitions for the bitfield "register write strobe".
+ * port="pif_msm_reg_wren_i"
+ */
+
+/* register address for bitfield register write strobe */
+#define msm_reg_wr_strobe_adr 0x00004400
+/* bitmask for bitfield register write strobe */
+#define msm_reg_wr_strobe_msk 0x00000100
+/* inverted bitmask for bitfield register write strobe */
+#define msm_reg_wr_strobe_mskn 0xfffffeff
+/* lower bit position of bitfield register write strobe */
+#define msm_reg_wr_strobe_shift 8
+/* width of bitfield register write strobe */
+#define msm_reg_wr_strobe_width 1
+/* default value of bitfield register write strobe */
+#define msm_reg_wr_strobe_default 0x0
+
+/* mif soft reset bitfield definitions
+ * preprocessor definitions for the bitfield "soft reset".
+ * port="pif_glb_res_i"
+ */
+
+/* register address for bitfield soft reset */
+#define glb_soft_res_adr 0x00000000
+/* bitmask for bitfield soft reset */
+#define glb_soft_res_msk 0x00008000
+/* inverted bitmask for bitfield soft reset */
+#define glb_soft_res_mskn 0xffff7fff
+/* lower bit position of bitfield soft reset */
+#define glb_soft_res_shift 15
+/* width of bitfield soft reset */
+#define glb_soft_res_width 1
+/* default value of bitfield soft reset */
+#define glb_soft_res_default 0x0
+
+/* mif register reset disable bitfield definitions
+ * preprocessor definitions for the bitfield "register reset disable".
+ * port="pif_glb_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield register reset disable */
+#define glb_reg_res_dis_adr 0x00000000
+/* bitmask for bitfield register reset disable */
+#define glb_reg_res_dis_msk 0x00004000
+/* inverted bitmask for bitfield register reset disable */
+#define glb_reg_res_dis_mskn 0xffffbfff
+/* lower bit position of bitfield register reset disable */
+#define glb_reg_res_dis_shift 14
+/* width of bitfield register reset disable */
+#define glb_reg_res_dis_width 1
+/* default value of bitfield register reset disable */
+#define glb_reg_res_dis_default 0x1
+
+/* tx dma debug control definitions */
+#define tx_dma_debug_ctl_adr 0x00008920u
+
+/* tx dma descriptor base address msw definitions */
+#define tx_dma_desc_base_addrmsw_adr(descriptor) \
+ (0x00007c04u + (descriptor) * 0x40)
+
+/* tx interrupt moderation control register definitions
+ * Preprocessor definitions for TX Interrupt Moderation Control Register
+ * Base Address: 0x00008980
+ * Parameter: queue {Q} | stride size 0x4 | range [0, 31]
+ */
+
+#define tx_intr_moderation_ctl_adr(queue) (0x00008980u + (queue) * 0x4)
+
+/* pcie reg_res_dsbl bitfield definitions
+ * preprocessor definitions for the bitfield "reg_res_dsbl".
+ * port="pif_pci_reg_res_dsbl_i"
+ */
+
+/* register address for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_adr 0x00001000
+/* bitmask for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_msk 0x20000000
+/* inverted bitmask for bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_mskn 0xdfffffff
+/* lower bit position of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_shift 29
+/* width of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_width 1
+/* default value of bitfield reg_res_dsbl */
+#define pci_reg_res_dsbl_default 0x1
+
+/* global microprocessor scratch pad definitions */
+#define glb_cpu_scratch_scp_adr(scratch_scp) (0x00000300u + (scratch_scp) * 0x4)
+
+#endif /* HW_ATL_LLH_INTERNAL_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
new file mode 100644
index 0000000..8d6d8f5
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -0,0 +1,570 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils.c: Definition of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#include "../aq_hw.h"
+#include "../aq_hw_utils.h"
+#include "../aq_pci_func.h"
+#include "../aq_ring.h"
+#include "../aq_vec.h"
+#include "hw_atl_utils.h"
+#include "hw_atl_llh.h"
+
+#include <linux/random.h>
+
+#define HW_ATL_UCP_0X370_REG 0x0370U
+
+#define HW_ATL_FW_SM_RAM 0x2U
+#define HW_ATL_MPI_CONTROL_ADR 0x0368U
+#define HW_ATL_MPI_STATE_ADR 0x036CU
+
+#define HW_ATL_MPI_STATE_MSK 0x00FFU
+#define HW_ATL_MPI_STATE_SHIFT 0U
+#define HW_ATL_MPI_SPEED_MSK 0xFFFFU
+#define HW_ATL_MPI_SPEED_SHIFT 16U
+
+static int hw_atl_utils_fw_downld_dwords(struct aq_hw_s *self, u32 a,
+ u32 *p, u32 cnt)
+{
+ int err = 0;
+
+ AQ_HW_WAIT_FOR(reg_glb_cpu_sem_get(self,
+ HW_ATL_FW_SM_RAM) == 1U,
+ 1U, 10000U);
+
+ if (err < 0) {
+ bool is_locked;
+
+ reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+ is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIME;
+ goto err_exit;
+ }
+ }
+
+ aq_hw_write_reg(self, 0x00000208U, a);
+
+ for (++cnt; --cnt;) {
+ u32 i = 0U;
+
+ aq_hw_write_reg(self, 0x00000200U, 0x00008000U);
+
+ for (i = 1024U;
+ (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+ }
+
+ *(p++) = aq_hw_read_reg(self, 0x0000020CU);
+ }
+
+ reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_fw_upload_dwords(struct aq_hw_s *self, u32 a, u32 *p,
+ u32 cnt)
+{
+ int err = 0;
+ bool is_locked;
+
+ is_locked = reg_glb_cpu_sem_get(self, HW_ATL_FW_SM_RAM);
+ if (!is_locked) {
+ err = -ETIME;
+ goto err_exit;
+ }
+
+ aq_hw_write_reg(self, 0x00000208U, a);
+
+ for (++cnt; --cnt;) {
+ u32 i = 0U;
+
+ aq_hw_write_reg(self, 0x0000020CU, *(p++));
+ aq_hw_write_reg(self, 0x00000200U, 0xC000U);
+
+ for (i = 1024U;
+ (0x100U & aq_hw_read_reg(self, 0x00000200U)) && --i;) {
+ }
+ }
+
+ reg_glb_cpu_sem_set(self, 1U, HW_ATL_FW_SM_RAM);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual)
+{
+ int err = 0;
+ const u32 dw_major_mask = 0xff000000U;
+ const u32 dw_minor_mask = 0x00ffffffU;
+
+ err = (dw_major_mask & (ver_expected ^ ver_actual)) ? -EOPNOTSUPP : 0;
+ if (err < 0)
+ goto err_exit;
+ err = ((dw_minor_mask & ver_expected) > (dw_minor_mask & ver_actual)) ?
+ -EOPNOTSUPP : 0;
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ int err = 0;
+
+ if (!aq_hw_read_reg(self, 0x370U)) {
+ unsigned int rnd = 0U;
+ unsigned int ucp_0x370 = 0U;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202U | (0xFEFEFEFEU & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ reg_glb_cpu_scratch_scp_set(self, 0x00000000U, 25U);
+
+ /* check 10 times by 1ms */
+ AQ_HW_WAIT_FOR(0U != (PHAL_ATLANTIC_A0->mbox_addr =
+ aq_hw_read_reg(self, 0x360U)), 1000U, 10U);
+
+ err = hw_atl_utils_ver_match(aq_hw_caps->fw_ver_expected,
+ aq_hw_read_reg(self, 0x18U));
+ return err;
+}
+
+#define HW_ATL_RPC_CONTROL_ADR 0x0338U
+#define HW_ATL_RPC_STATE_ADR 0x033CU
+
+struct aq_hw_atl_utils_fw_rpc_tid_s {
+ union {
+ u32 val;
+ struct {
+ u16 tid;
+ u16 len;
+ };
+ };
+};
+
+#define hw_atl_utils_fw_rpc_init(_H_) hw_atl_utils_fw_rpc_wait(_H_, NULL)
+
+static int hw_atl_utils_fw_rpc_call(struct aq_hw_s *self, unsigned int rpc_size)
+{
+ int err = 0;
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+
+ if (!IS_CHIP_FEATURE(MIPS)) {
+ err = -1;
+ goto err_exit;
+ }
+ err = hw_atl_utils_fw_upload_dwords(self, PHAL_ATLANTIC->rpc_addr,
+ (u32 *)(void *)&PHAL_ATLANTIC->rpc,
+ (rpc_size + sizeof(u32) -
+ sizeof(u8)) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ sw.tid = 0xFFFFU & (++PHAL_ATLANTIC->rpc_tid);
+ sw.len = (u16)rpc_size;
+ aq_hw_write_reg(self, HW_ATL_RPC_CONTROL_ADR, sw.val);
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_fw_rpc **rpc)
+{
+ int err = 0;
+ struct aq_hw_atl_utils_fw_rpc_tid_s sw;
+ struct aq_hw_atl_utils_fw_rpc_tid_s fw;
+
+ do {
+ sw.val = aq_hw_read_reg(self, HW_ATL_RPC_CONTROL_ADR);
+
+ PHAL_ATLANTIC->rpc_tid = sw.tid;
+
+ AQ_HW_WAIT_FOR(sw.tid ==
+ (fw.val =
+ aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR),
+ fw.tid), 1000U, 100U);
+ if (err < 0)
+ goto err_exit;
+
+ if (fw.len == 0xFFFFU) {
+ err = hw_atl_utils_fw_rpc_call(self, sw.len);
+ if (err < 0)
+ goto err_exit;
+ }
+ } while (sw.tid != fw.tid || 0xFFFFU == fw.len);
+ if (err < 0)
+ goto err_exit;
+
+ if (rpc) {
+ if (fw.len) {
+ err =
+ hw_atl_utils_fw_downld_dwords(self,
+ PHAL_ATLANTIC->rpc_addr,
+ (u32 *)(void *)
+ &PHAL_ATLANTIC->rpc,
+ (fw.len + sizeof(u32) -
+ sizeof(u8)) /
+ sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+ }
+
+ *rpc = &PHAL_ATLANTIC->rpc;
+ }
+
+err_exit:
+ return err;
+}
+
+static int hw_atl_utils_mpi_create(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps)
+{
+ int err = 0;
+
+ err = hw_atl_utils_init_ucp(self, aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ err = hw_atl_utils_fw_rpc_init(self);
+ if (err < 0)
+ goto err_exit;
+
+err_exit:
+ return err;
+}
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox *pmbox)
+{
+ int err = 0;
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ PHAL_ATLANTIC->mbox_addr,
+ (u32 *)(void *)pmbox,
+ sizeof(*pmbox) / sizeof(u32));
+ if (err < 0)
+ goto err_exit;
+
+ if (pmbox != &PHAL_ATLANTIC->mbox)
+ memcpy(pmbox, &PHAL_ATLANTIC->mbox, sizeof(*pmbox));
+
+ if (IS_CHIP_FEATURE(REVISION_A0)) {
+ unsigned int mtu = self->aq_nic_cfg ?
+ self->aq_nic_cfg->mtu : 1514U;
+ pmbox->stats.ubrc = pmbox->stats.uprc * mtu;
+ pmbox->stats.ubtc = pmbox->stats.uptc * mtu;
+ pmbox->stats.dpc = atomic_read(&PHAL_ATLANTIC_A0->dpc);
+ } else {
+ pmbox->stats.dpc = reg_rx_dma_stat_counter7get(self);
+ }
+
+err_exit:;
+}
+
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
+ enum hal_atl_utils_fw_state_e state)
+{
+ u32 ucp_0x368 = 0;
+
+ ucp_0x368 = (speed << HW_ATL_MPI_SPEED_SHIFT) | state;
+ aq_hw_write_reg(self, HW_ATL_MPI_CONTROL_ADR, ucp_0x368);
+
+ return 0;
+}
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state, u32 speed)
+{
+ int err = 0;
+ u32 transaction_id = 0;
+
+ if (state == MPI_RESET) {
+ hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+
+ transaction_id = PHAL_ATLANTIC->mbox.transaction_id;
+
+ AQ_HW_WAIT_FOR(transaction_id !=
+ (hw_atl_utils_mpi_read_stats
+ (self, &PHAL_ATLANTIC->mbox),
+ PHAL_ATLANTIC->mbox.transaction_id),
+ 1000U, 100U);
+ if (err < 0)
+ goto err_exit;
+ }
+
+ err = hw_atl_utils_mpi_set_speed(self, speed, state);
+
+err_exit:;
+}
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
+ struct aq_hw_link_status_s *link_status)
+{
+ u32 cp0x036C = aq_hw_read_reg(self, HW_ATL_MPI_STATE_ADR);
+ u32 link_speed_mask = cp0x036C >> HW_ATL_MPI_SPEED_SHIFT;
+
+ if (!link_speed_mask) {
+ link_status->mbps = 0U;
+ } else {
+ switch (link_speed_mask) {
+ case HAL_ATLANTIC_RATE_10G:
+ link_status->mbps = 10000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_5G:
+ case HAL_ATLANTIC_RATE_5GSR:
+ link_status->mbps = 5000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_2GS:
+ link_status->mbps = 2500U;
+ break;
+
+ case HAL_ATLANTIC_RATE_1G:
+ link_status->mbps = 1000U;
+ break;
+
+ case HAL_ATLANTIC_RATE_100M:
+ link_status->mbps = 100U;
+ break;
+
+ default:
+ link_status->mbps = 0U;
+ break;
+ }
+ }
+
+ return 0;
+}
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u8 *mac)
+{
+ int err = 0;
+ u32 h = 0U;
+ u32 l = 0U;
+ u32 mac_addr[2];
+
+ self->mmio = aq_pci_func_get_mmio(self->aq_pci_func);
+
+ hw_atl_utils_hw_chip_features_init(self,
+ &PHAL_ATLANTIC_A0->chip_features);
+
+ err = hw_atl_utils_mpi_create(self, aq_hw_caps);
+ if (err < 0)
+ goto err_exit;
+
+ if (!aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG)) {
+ unsigned int rnd = 0;
+ unsigned int ucp_0x370 = 0;
+
+ get_random_bytes(&rnd, sizeof(unsigned int));
+
+ ucp_0x370 = 0x02020202 | (0xFEFEFEFE & rnd);
+ aq_hw_write_reg(self, HW_ATL_UCP_0X370_REG, ucp_0x370);
+ }
+
+ err = hw_atl_utils_fw_downld_dwords(self,
+ aq_hw_read_reg(self, 0x00000374U) +
+ (40U * 4U),
+ mac_addr,
+ AQ_DIMOF(mac_addr));
+ if (err < 0) {
+ mac_addr[0] = 0U;
+ mac_addr[1] = 0U;
+ err = 0;
+ } else {
+ mac_addr[0] = __swab32(mac_addr[0]);
+ mac_addr[1] = __swab32(mac_addr[1]);
+ }
+
+ ether_addr_copy(mac, (u8 *)mac_addr);
+
+ if ((mac[0] & 0x01U) || ((mac[0] | mac[1] | mac[2]) == 0x00U)) {
+ /* chip revision */
+ l = 0xE3000000U
+ | (0xFFFFU & aq_hw_read_reg(self, HW_ATL_UCP_0X370_REG))
+ | (0x00 << 16);
+ h = 0x8001300EU;
+
+ mac[5] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[4] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[3] = (u8)(0xFFU & l);
+ l >>= 8;
+ mac[2] = (u8)(0xFFU & l);
+ mac[1] = (u8)(0xFFU & h);
+ h >>= 8;
+ mac[0] = (u8)(0xFFU & h);
+ }
+
+err_exit:
+ return err;
+}
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps)
+{
+ unsigned int ret = 0U;
+
+ switch (mbps) {
+ case 100U:
+ ret = 5U;
+ break;
+
+ case 1000U:
+ ret = 4U;
+ break;
+
+ case 2500U:
+ ret = 3U;
+ break;
+
+ case 5000U:
+ ret = 1U;
+ break;
+
+ case 10000U:
+ ret = 0U;
+ break;
+
+ default:
+ break;
+ }
+ return ret;
+}
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p)
+{
+ u32 chip_features = 0U;
+ u32 val = reg_glb_mif_id_get(self);
+ u32 mif_rev = val & 0xFFU;
+
+ if ((3U & mif_rev) == 1U) {
+ chip_features |=
+ HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS;
+ } else if ((3U & mif_rev) == 2U) {
+ chip_features |=
+ HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 |
+ HAL_ATLANTIC_UTILS_CHIP_MPI_AQ |
+ HAL_ATLANTIC_UTILS_CHIP_MIPS |
+ HAL_ATLANTIC_UTILS_CHIP_TPO2 |
+ HAL_ATLANTIC_UTILS_CHIP_RPF2;
+ }
+
+ *p = chip_features;
+}
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self)
+{
+ hw_atl_utils_mpi_set(self, MPI_DEINIT, 0x0U);
+ return 0;
+}
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+ unsigned int power_state)
+{
+ hw_atl_utils_mpi_set(self, MPI_POWER, 0x0U);
+ return 0;
+}
+
+int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
+ u64 *data, unsigned int *p_count)
+{
+ struct hw_atl_stats_s *stats = NULL;
+ int i = 0;
+
+ hw_atl_utils_mpi_read_stats(self, &PHAL_ATLANTIC->mbox);
+
+ stats = &PHAL_ATLANTIC->mbox.stats;
+
+ data[i] = stats->uprc + stats->mprc + stats->bprc;
+ data[++i] = stats->uprc;
+ data[++i] = stats->mprc;
+ data[++i] = stats->bprc;
+ data[++i] = stats->erpt;
+ data[++i] = stats->uptc + stats->mptc + stats->bptc;
+ data[++i] = stats->uptc;
+ data[++i] = stats->mptc;
+ data[++i] = stats->bptc;
+ data[++i] = stats->ubrc;
+ data[++i] = stats->ubtc;
+ data[++i] = stats->mbrc;
+ data[++i] = stats->mbtc;
+ data[++i] = stats->bbrc;
+ data[++i] = stats->bbtc;
+ data[++i] = stats->ubrc + stats->mbrc + stats->bbrc;
+ data[++i] = stats->ubtc + stats->mbtc + stats->bbtc;
+ data[++i] = stats_rx_dma_good_pkt_counterlsw_get(self);
+ data[++i] = stats_tx_dma_good_pkt_counterlsw_get(self);
+ data[++i] = stats_rx_dma_good_octet_counterlsw_get(self);
+ data[++i] = stats_tx_dma_good_octet_counterlsw_get(self);
+ data[++i] = stats->dpc;
+
+ if (p_count)
+ *p_count = ++i;
+
+ return 0;
+}
+
+static const u32 hw_atl_utils_hw_mac_regs[] = {
+ 0x00005580U, 0x00005590U, 0x000055B0U, 0x000055B4U,
+ 0x000055C0U, 0x00005B00U, 0x00005B04U, 0x00005B08U,
+ 0x00005B0CU, 0x00005B10U, 0x00005B14U, 0x00005B18U,
+ 0x00005B1CU, 0x00005B20U, 0x00005B24U, 0x00005B28U,
+ 0x00005B2CU, 0x00005B30U, 0x00005B34U, 0x00005B38U,
+ 0x00005B3CU, 0x00005B40U, 0x00005B44U, 0x00005B48U,
+ 0x00005B4CU, 0x00005B50U, 0x00005B54U, 0x00005B58U,
+ 0x00005B5CU, 0x00005B60U, 0x00005B64U, 0x00005B68U,
+ 0x00005B6CU, 0x00005B70U, 0x00005B74U, 0x00005B78U,
+ 0x00005B7CU, 0x00007C00U, 0x00007C04U, 0x00007C08U,
+ 0x00007C0CU, 0x00007C10U, 0x00007C14U, 0x00007C18U,
+ 0x00007C1CU, 0x00007C20U, 0x00007C40U, 0x00007C44U,
+ 0x00007C48U, 0x00007C4CU, 0x00007C50U, 0x00007C54U,
+ 0x00007C58U, 0x00007C5CU, 0x00007C60U, 0x00007C80U,
+ 0x00007C84U, 0x00007C88U, 0x00007C8CU, 0x00007C90U,
+ 0x00007C94U, 0x00007C98U, 0x00007C9CU, 0x00007CA0U,
+ 0x00007CC0U, 0x00007CC4U, 0x00007CC8U, 0x00007CCCU,
+ 0x00007CD0U, 0x00007CD4U, 0x00007CD8U, 0x00007CDCU,
+ 0x00007CE0U, 0x00000300U, 0x00000304U, 0x00000308U,
+ 0x0000030cU, 0x00000310U, 0x00000314U, 0x00000318U,
+ 0x0000031cU, 0x00000360U, 0x00000364U, 0x00000368U,
+ 0x0000036cU, 0x00000370U, 0x00000374U, 0x00006900U,
+};
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff)
+{
+ unsigned int i = 0U;
+
+ for (i = 0; i < aq_hw_caps->mac_regs_count; i++)
+ regs_buff[i] = aq_hw_read_reg(self,
+ hw_atl_utils_hw_mac_regs[i]);
+ return 0;
+}
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version)
+{
+ *fw_version = aq_hw_read_reg(self, 0x18U);
+ return 0;
+}
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
new file mode 100644
index 0000000..b8e3d88
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.h
@@ -0,0 +1,210 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+/* File hw_atl_utils.h: Declaration of common functions for Atlantic hardware
+ * abstraction layer.
+ */
+
+#ifndef HW_ATL_UTILS_H
+#define HW_ATL_UTILS_H
+
+#include "../aq_common.h"
+
+#define HW_ATL_FLUSH() { (void)aq_hw_read_reg(self, 0x10); }
+
+struct __packed hw_atl_stats_s {
+ u32 uprc;
+ u32 mprc;
+ u32 bprc;
+ u32 erpt;
+ u32 uptc;
+ u32 mptc;
+ u32 bptc;
+ u32 erpr;
+ u32 mbtc;
+ u32 bbtc;
+ u32 mbrc;
+ u32 bbrc;
+ u32 ubrc;
+ u32 ubtc;
+ u32 dpc;
+};
+
+union __packed ip_addr {
+ struct {
+ u8 addr[16];
+ } v6;
+ struct {
+ u8 padding[12];
+ u8 addr[4];
+ } v4;
+};
+
+struct __packed hw_aq_atl_utils_fw_rpc {
+ u32 msg_id;
+
+ union {
+ struct {
+ u32 pong;
+ } msg_ping;
+
+ struct {
+ u8 mac_addr[6];
+ u32 ip_addr_cnt;
+
+ struct {
+ union ip_addr addr;
+ union ip_addr mask;
+ } ip[1];
+ } msg_arp;
+
+ struct {
+ u32 len;
+ u8 packet[1514U];
+ } msg_inject;
+
+ struct {
+ u32 priority;
+ u32 wol_packet_type;
+ u16 friendly_name_len;
+ u16 friendly_name[65];
+ u32 pattern_id;
+ u32 next_wol_pattern_offset;
+
+ union {
+ struct {
+ u32 flags;
+ u8 ipv4_source_address[4];
+ u8 ipv4_dest_address[4];
+ u16 tcp_source_port_number;
+ u16 tcp_dest_port_number;
+ } ipv4_tcp_syn_parameters;
+
+ struct {
+ u32 flags;
+ u8 ipv6_source_address[16];
+ u8 ipv6_dest_address[16];
+ u16 tcp_source_port_number;
+ u16 tcp_dest_port_number;
+ } ipv6_tcp_syn_parameters;
+
+ struct {
+ u32 flags;
+ } eapol_request_id_message_parameters;
+
+ struct {
+ u32 flags;
+ u32 mask_offset;
+ u32 mask_size;
+ u32 pattern_offset;
+ u32 pattern_size;
+ } wol_bit_map_pattern;
+ } wol_pattern;
+ } msg_wol;
+
+ struct {
+ u32 is_wake_on_link_down;
+ u32 is_wake_on_link_up;
+ } msg_wolink;
+ };
+};
+
+struct __packed hw_aq_atl_utils_mbox {
+ u32 version;
+ u32 transaction_id;
+ int error;
+ struct hw_atl_stats_s stats;
+};
+
+struct __packed hw_atl_s {
+ struct aq_hw_s base;
+ struct hw_aq_atl_utils_mbox mbox;
+ u64 speed;
+ u32 itr_tx;
+ u32 itr_rx;
+ unsigned int chip_features;
+ u32 fw_ver_actual;
+ atomic_t dpc;
+ u32 mbox_addr;
+ u32 rpc_addr;
+ u32 rpc_tid;
+ struct hw_aq_atl_utils_fw_rpc rpc;
+};
+
+#define SELF ((struct hw_atl_s *)self)
+
+#define PHAL_ATLANTIC ((struct hw_atl_s *)((void *)(self)))
+#define PHAL_ATLANTIC_A0 ((struct hw_atl_s *)((void *)(self)))
+#define PHAL_ATLANTIC_B0 ((struct hw_atl_s *)((void *)(self)))
+
+#define HAL_ATLANTIC_UTILS_CHIP_MIPS 0x00000001U
+#define HAL_ATLANTIC_UTILS_CHIP_TPO2 0x00000002U
+#define HAL_ATLANTIC_UTILS_CHIP_RPF2 0x00000004U
+#define HAL_ATLANTIC_UTILS_CHIP_MPI_AQ 0x00000010U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_A0 0x01000000U
+#define HAL_ATLANTIC_UTILS_CHIP_REVISION_B0 0x02000000U
+
+#define IS_CHIP_FEATURE(_F_) (HAL_ATLANTIC_UTILS_CHIP_##_F_ & \
+ PHAL_ATLANTIC->chip_features)
+
+enum hal_atl_utils_fw_state_e {
+ MPI_DEINIT = 0,
+ MPI_RESET = 1,
+ MPI_INIT = 2,
+ MPI_POWER = 4,
+};
+
+#define HAL_ATLANTIC_RATE_10G BIT(0)
+#define HAL_ATLANTIC_RATE_5G BIT(1)
+#define HAL_ATLANTIC_RATE_5GSR BIT(2)
+#define HAL_ATLANTIC_RATE_2GS BIT(3)
+#define HAL_ATLANTIC_RATE_1G BIT(4)
+#define HAL_ATLANTIC_RATE_100M BIT(5)
+#define HAL_ATLANTIC_RATE_INVALID BIT(6)
+
+void hw_atl_utils_hw_chip_features_init(struct aq_hw_s *self, u32 *p);
+
+void hw_atl_utils_mpi_read_stats(struct aq_hw_s *self,
+ struct hw_aq_atl_utils_mbox *pmbox);
+
+void hw_atl_utils_mpi_set(struct aq_hw_s *self,
+ enum hal_atl_utils_fw_state_e state,
+ u32 speed);
+
+int hw_atl_utils_mpi_set_speed(struct aq_hw_s *self, u32 speed,
+ enum hal_atl_utils_fw_state_e state);
+
+int hw_atl_utils_mpi_get_link_status(struct aq_hw_s *self,
+ struct aq_hw_link_status_s *link_status);
+
+int hw_atl_utils_get_mac_permanent(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u8 *mac);
+
+unsigned int hw_atl_utils_mbps_2_speed_index(unsigned int mbps);
+
+int hw_atl_utils_hw_get_regs(struct aq_hw_s *self,
+ struct aq_hw_caps_s *aq_hw_caps,
+ u32 *regs_buff);
+
+int hw_atl_utils_hw_get_settings(struct aq_hw_s *self,
+ struct ethtool_cmd *cmd);
+
+int hw_atl_utils_hw_set_power(struct aq_hw_s *self,
+ unsigned int power_state);
+
+int hw_atl_utils_hw_deinit(struct aq_hw_s *self);
+
+int hw_atl_utils_get_fw_version(struct aq_hw_s *self, u32 *fw_version);
+
+int hw_atl_utils_get_hw_stats(struct aq_hw_s *self,
+ u64 *data,
+ unsigned int *p_count);
+
+#endif /* HW_ATL_UTILS_H */
diff --git a/drivers/net/ethernet/aquantia/atlantic/ver.h b/drivers/net/ethernet/aquantia/atlantic/ver.h
new file mode 100644
index 0000000..0de858d
--- /dev/null
+++ b/drivers/net/ethernet/aquantia/atlantic/ver.h
@@ -0,0 +1,18 @@
+/*
+ * aQuantia Corporation Network Driver
+ * Copyright (C) 2014-2017 aQuantia Corporation. All rights reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#ifndef VER_H
+#define VER_H
+
+#define NIC_MAJOR_DRIVER_VERSION 1
+#define NIC_MINOR_DRIVER_VERSION 5
+#define NIC_BUILD_DRIVER_VERSION 345
+#define NIC_REVISION_DRIVER_VERSION 0
+
+#endif /* VER_H */
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abc9f2a..2387339 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -275,7 +275,7 @@ static int arc_emac_poll(struct napi_struct *napi, int budget)
work_done = arc_emac_rx(ndev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
arc_reg_or(priv, R_ENABLE, RXINT_MASK | TXINT_MASK);
}
diff --git a/drivers/net/ethernet/atheros/alx/main.c b/drivers/net/ethernet/atheros/alx/main.c
index 4c80e06..6a27c26 100644
--- a/drivers/net/ethernet/atheros/alx/main.c
+++ b/drivers/net/ethernet/atheros/alx/main.c
@@ -311,7 +311,7 @@ static int alx_poll(struct napi_struct *napi, int budget)
if (!tx_complete || work == budget)
return budget;
- napi_complete(&np->napi);
+ napi_complete_done(&np->napi, work);
/* enable interrupt */
if (alx->flags & ALX_FLAG_USING_MSIX) {
@@ -685,8 +685,6 @@ static int alx_alloc_rings(struct alx_priv *alx)
return -ENOMEM;
}
- alx_reinit_rings(alx);
-
return 0;
}
@@ -703,7 +701,7 @@ static void alx_free_rings(struct alx_priv *alx)
if (alx->qnapi[0] && alx->qnapi[0]->rxq)
kfree(alx->qnapi[0]->rxq->bufs);
- if (!alx->descmem.virt)
+ if (alx->descmem.virt)
dma_free_coherent(&alx->hw.pdev->dev,
alx->descmem.size,
alx->descmem.virt,
@@ -984,6 +982,7 @@ static int alx_realloc_resources(struct alx_priv *alx)
alx_free_rings(alx);
alx_free_napis(alx);
alx_disable_advanced_intr(alx);
+ alx_init_intr(alx, false);
err = alx_alloc_napis(alx);
if (err)
@@ -1241,6 +1240,12 @@ static int __alx_open(struct alx_priv *alx, bool resume)
if (err)
goto out_free_rings;
+ /* must be called after alx_request_irq because the chip stops working
+ * if we copy the dma addresses in alx_init_ring_ptrs twice when
+ * requesting msi-x interrupts failed
+ */
+ alx_reinit_rings(alx);
+
netif_set_real_num_tx_queues(alx->dev, alx->num_txq);
netif_set_real_num_rx_queues(alx->dev, alx->num_rxq);
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 773d3b7..7e913d8 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -1892,7 +1892,7 @@ static int atl1c_clean(struct napi_struct *napi, int budget)
if (work_done < budget) {
quit_polling:
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
adapter->hw.intr_mask |= ISR_RX_PKT;
AT_WRITE_REG(&adapter->hw, REG_IMR, adapter->hw.intr_mask);
}
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
index e96091b6..ef003c5 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_main.c
@@ -1526,7 +1526,7 @@ static int atl1e_clean(struct napi_struct *napi, int budget)
/* If no Tx and not enough Rx work done, exit the polling mode */
if (work_done < budget) {
quit_polling:
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
imr_data = AT_READ_REG(&adapter->hw, REG_IMR);
AT_WRITE_REG(&adapter->hw, REG_IMR, imr_data | ISR_RX_EVENT);
/* test debug */
diff --git a/drivers/net/ethernet/atheros/atlx/atl1.c b/drivers/net/ethernet/atheros/atlx/atl1.c
index 7dad8e4..022772e 100644
--- a/drivers/net/ethernet/atheros/atlx/atl1.c
+++ b/drivers/net/ethernet/atheros/atlx/atl1.c
@@ -2457,7 +2457,7 @@ static int atl1_rings_clean(struct napi_struct *napi, int budget)
if (work_done >= budget)
return work_done;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* re-enable Interrupt */
if (likely(adapter->int_enabled))
atlx_imr_set(adapter, IMR_NORMAL_MASK);
diff --git a/drivers/net/ethernet/broadcom/b44.c b/drivers/net/ethernet/broadcom/b44.c
index 7aef70f..5b95bb4 100644
--- a/drivers/net/ethernet/broadcom/b44.c
+++ b/drivers/net/ethernet/broadcom/b44.c
@@ -902,7 +902,7 @@ static int b44_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
b44_enable_ints(bp);
}
diff --git a/drivers/net/ethernet/broadcom/bcm63xx_enet.c b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
index 3b14d51..0ee6e20 100644
--- a/drivers/net/ethernet/broadcom/bcm63xx_enet.c
+++ b/drivers/net/ethernet/broadcom/bcm63xx_enet.c
@@ -511,7 +511,7 @@ static int bcm_enet_poll(struct napi_struct *napi, int budget)
/* no more packet in rx/tx queue, remove device from poll
* queue */
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
/* restore rx/tx interrupt */
enet_dmac_writel(priv, priv->dma_chan_int_mask,
@@ -913,6 +913,8 @@ static int bcm_enet_open(struct net_device *dev)
priv->old_link = 0;
priv->old_duplex = -1;
priv->old_pause = -1;
+ } else {
+ phydev = NULL;
}
/* mask all interrupts and request them */
@@ -1083,7 +1085,7 @@ static int bcm_enet_open(struct net_device *dev)
enet_dmac_writel(priv, priv->dma_chan_int_mask,
ENETDMAC_IRMASK, priv->tx_chan);
- if (priv->has_phy)
+ if (phydev)
phy_start(phydev);
else
bcm_enet_adjust_link(dev);
@@ -1126,7 +1128,7 @@ out_freeirq:
free_irq(dev->irq, dev);
out_phy_disconnect:
- if (priv->has_phy)
+ if (phydev)
phy_disconnect(phydev);
return ret;
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 7e8cf21..a68d4889 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -43,14 +43,43 @@ static inline void name##_writel(struct bcm_sysport_priv *priv, \
BCM_SYSPORT_IO_MACRO(intrl2_0, SYS_PORT_INTRL2_0_OFFSET);
BCM_SYSPORT_IO_MACRO(intrl2_1, SYS_PORT_INTRL2_1_OFFSET);
BCM_SYSPORT_IO_MACRO(umac, SYS_PORT_UMAC_OFFSET);
+BCM_SYSPORT_IO_MACRO(gib, SYS_PORT_GIB_OFFSET);
BCM_SYSPORT_IO_MACRO(tdma, SYS_PORT_TDMA_OFFSET);
-BCM_SYSPORT_IO_MACRO(rdma, SYS_PORT_RDMA_OFFSET);
BCM_SYSPORT_IO_MACRO(rxchk, SYS_PORT_RXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(txchk, SYS_PORT_TXCHK_OFFSET);
BCM_SYSPORT_IO_MACRO(rbuf, SYS_PORT_RBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(tbuf, SYS_PORT_TBUF_OFFSET);
BCM_SYSPORT_IO_MACRO(topctrl, SYS_PORT_TOPCTRL_OFFSET);
+/* On SYSTEMPORT Lite, any register after RDMA_STATUS has the exact
+ * same layout, except it has been moved by 4 bytes up, *sigh*
+ */
+static inline u32 rdma_readl(struct bcm_sysport_priv *priv, u32 off)
+{
+ if (priv->is_lite && off >= RDMA_STATUS)
+ off += 4;
+ return __raw_readl(priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline void rdma_writel(struct bcm_sysport_priv *priv, u32 val, u32 off)
+{
+ if (priv->is_lite && off >= RDMA_STATUS)
+ off += 4;
+ __raw_writel(val, priv->base + SYS_PORT_RDMA_OFFSET + off);
+}
+
+static inline u32 tdma_control_bit(struct bcm_sysport_priv *priv, u32 bit)
+{
+ if (!priv->is_lite) {
+ return BIT(bit);
+ } else {
+ if (bit >= ACB_ALGO)
+ return BIT(bit + 1);
+ else
+ return BIT(bit);
+ }
+}
+
/* L2-interrupt masking/unmasking helpers, does automatic saving of the applied
* mask in a software copy to avoid CPU_MASK_STATUS reads in hot-paths.
*/
@@ -143,9 +172,9 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
priv->tsb_en = !!(wanted & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM));
reg = tdma_readl(priv, TDMA_CONTROL);
if (priv->tsb_en)
- reg |= TSB_EN;
+ reg |= tdma_control_bit(priv, TSB_EN);
else
- reg &= ~TSB_EN;
+ reg &= ~tdma_control_bit(priv, TSB_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
return 0;
@@ -281,11 +310,35 @@ static void bcm_sysport_set_msglvl(struct net_device *dev, u32 enable)
priv->msg_enable = enable;
}
+static inline bool bcm_sysport_lite_stat_valid(enum bcm_sysport_stat_type type)
+{
+ switch (type) {
+ case BCM_SYSPORT_STAT_NETDEV:
+ case BCM_SYSPORT_STAT_RXCHK:
+ case BCM_SYSPORT_STAT_RBUF:
+ case BCM_SYSPORT_STAT_SOFT:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
{
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ const struct bcm_sysport_stats *s;
+ unsigned int i, j;
+
switch (string_set) {
case ETH_SS_STATS:
- return BCM_SYSPORT_STATS_LEN;
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ s = &bcm_sysport_gstrings_stats[i];
+ if (priv->is_lite &&
+ !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+ j++;
+ }
+ return j;
default:
return -EOPNOTSUPP;
}
@@ -294,14 +347,21 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
static void bcm_sysport_get_strings(struct net_device *dev,
u32 stringset, u8 *data)
{
- int i;
+ struct bcm_sysport_priv *priv = netdev_priv(dev);
+ const struct bcm_sysport_stats *s;
+ int i, j;
switch (stringset) {
case ETH_SS_STATS:
- for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
- memcpy(data + i * ETH_GSTRING_LEN,
- bcm_sysport_gstrings_stats[i].stat_string,
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ s = &bcm_sysport_gstrings_stats[i];
+ if (priv->is_lite &&
+ !bcm_sysport_lite_stat_valid(s->type))
+ continue;
+
+ memcpy(data + j * ETH_GSTRING_LEN, s->stat_string,
ETH_GSTRING_LEN);
+ j++;
}
break;
default:
@@ -327,6 +387,9 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
case BCM_SYSPORT_STAT_MIB_RX:
case BCM_SYSPORT_STAT_MIB_TX:
case BCM_SYSPORT_STAT_RUNT:
+ if (priv->is_lite)
+ continue;
+
if (s->type != BCM_SYSPORT_STAT_MIB_RX)
offset = UMAC_MIB_STAT_OFFSET;
val = umac_readl(priv, UMAC_MIB_START + j + offset);
@@ -355,12 +418,12 @@ static void bcm_sysport_get_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
- int i;
+ int i, j;
if (netif_running(dev))
bcm_sysport_update_mib_counters(priv);
- for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
+ for (i = 0, j = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
const struct bcm_sysport_stats *s;
char *p;
@@ -370,7 +433,8 @@ static void bcm_sysport_get_stats(struct net_device *dev,
else
p = (char *)priv;
p += s->stat_offset;
- data[i] = *(unsigned long *)p;
+ data[j] = *(unsigned long *)p;
+ j++;
}
}
@@ -573,8 +637,14 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
u16 len, status;
struct bcm_rsb *rsb;
- /* Determine how much we should process since last call */
- p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ /* Determine how much we should process since last call, SYSTEMPORT Lite
+ * groups the producer and consumer indexes into the same 32-bit
+ * which we access using RDMA_CONS_INDEX
+ */
+ if (!priv->is_lite)
+ p_index = rdma_readl(priv, RDMA_PROD_INDEX);
+ else
+ p_index = rdma_readl(priv, RDMA_CONS_INDEX);
p_index &= RDMA_PROD_INDEX_MASK;
if (p_index < priv->rx_c_index)
@@ -710,11 +780,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
unsigned int c_index, last_c_index, last_tx_cn, num_tx_cbs;
unsigned int pkts_compl = 0, bytes_compl = 0;
struct bcm_sysport_cb *cb;
- struct netdev_queue *txq;
u32 hw_ind;
- txq = netdev_get_tx_queue(ndev, ring->index);
-
/* Compute how many descriptors have been processed since last call */
hw_ind = tdma_readl(priv, TDMA_DESC_RING_PROD_CONS_INDEX(ring->index));
c_index = (hw_ind >> RING_CONS_INDEX_SHIFT) & RING_CONS_INDEX_MASK;
@@ -745,9 +812,6 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
ring->c_index = c_index;
- if (netif_tx_queue_stopped(txq) && pkts_compl)
- netif_tx_wake_queue(txq);
-
netif_dbg(priv, tx_done, ndev,
"ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
ring->index, ring->c_index, pkts_compl, bytes_compl);
@@ -759,16 +823,33 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
static unsigned int bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
struct bcm_sysport_tx_ring *ring)
{
+ struct netdev_queue *txq;
unsigned int released;
unsigned long flags;
+ txq = netdev_get_tx_queue(priv->netdev, ring->index);
+
spin_lock_irqsave(&ring->lock, flags);
released = __bcm_sysport_tx_reclaim(priv, ring);
+ if (released)
+ netif_tx_wake_queue(txq);
+
spin_unlock_irqrestore(&ring->lock, flags);
return released;
}
+/* Locked version of the per-ring TX reclaim, but does not wake the queue */
+static void bcm_sysport_tx_clean(struct bcm_sysport_priv *priv,
+ struct bcm_sysport_tx_ring *ring)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ __bcm_sysport_tx_reclaim(priv, ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+}
+
static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
{
struct bcm_sysport_tx_ring *ring =
@@ -780,7 +861,11 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
if (work_done == 0) {
napi_complete(napi);
/* re-enable TX interrupt */
- intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ if (!ring->priv->is_lite)
+ intrl2_1_mask_clear(ring->priv, BIT(ring->index));
+ else
+ intrl2_0_mask_clear(ring->priv, BIT(ring->index +
+ INTRL2_0_TDMA_MBDONE_SHIFT));
return 0;
}
@@ -806,7 +891,15 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
priv->rx_c_index += work_done;
priv->rx_c_index &= RDMA_CONS_INDEX_MASK;
- rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+
+ /* SYSTEMPORT Lite groups the producer/consumer index, producer is
+ * maintained by HW, but writes to it will be ignore while RDMA
+ * is active
+ */
+ if (!priv->is_lite)
+ rdma_writel(priv, priv->rx_c_index, RDMA_CONS_INDEX);
+ else
+ rdma_writel(priv, priv->rx_c_index << 16, RDMA_CONS_INDEX);
if (work_done < budget) {
napi_complete_done(napi, work_done);
@@ -837,6 +930,8 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct bcm_sysport_priv *priv = netdev_priv(dev);
+ struct bcm_sysport_tx_ring *txr;
+ unsigned int ring, ring_bit;
priv->irq0_stat = intrl2_0_readl(priv, INTRL2_CPU_STATUS) &
~intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
@@ -866,6 +961,22 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
bcm_sysport_resume_from_wol(priv);
}
+ if (!priv->is_lite)
+ goto out;
+
+ for (ring = 0; ring < dev->num_tx_queues; ring++) {
+ ring_bit = BIT(ring + INTRL2_0_TDMA_MBDONE_SHIFT);
+ if (!(priv->irq0_stat & ring_bit))
+ continue;
+
+ txr = &priv->tx_rings[ring];
+
+ if (likely(napi_schedule_prep(&txr->napi))) {
+ intrl2_0_mask_set(priv, ring_bit);
+ __napi_schedule(&txr->napi);
+ }
+ }
+out:
return IRQ_HANDLED;
}
@@ -919,9 +1030,11 @@ static void bcm_sysport_poll_controller(struct net_device *dev)
bcm_sysport_rx_isr(priv->irq0, priv);
enable_irq(priv->irq0);
- disable_irq(priv->irq1);
- bcm_sysport_tx_isr(priv->irq1, priv);
- enable_irq(priv->irq1);
+ if (!priv->is_lite) {
+ disable_irq(priv->irq1);
+ bcm_sysport_tx_isr(priv->irq1, priv);
+ enable_irq(priv->irq1);
+ }
}
#endif
@@ -1118,6 +1231,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
priv->old_duplex = phydev->duplex;
}
+ if (priv->is_lite)
+ goto out;
+
switch (phydev->speed) {
case SPEED_2500:
cmd_bits = CMD_SPEED_2500;
@@ -1158,8 +1274,9 @@ static void bcm_sysport_adj_link(struct net_device *dev)
reg |= cmd_bits;
umac_writel(priv, reg, UMAC_CMD);
}
-
- phy_print_status(phydev);
+out:
+ if (changed)
+ phy_print_status(phydev);
}
static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
@@ -1252,7 +1369,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
napi_disable(&ring->napi);
netif_napi_del(&ring->napi);
- bcm_sysport_tx_reclaim(priv, ring);
+ bcm_sysport_tx_clean(priv, ring);
kfree(ring->cbs);
ring->cbs = NULL;
@@ -1304,9 +1421,9 @@ static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
reg = tdma_readl(priv, TDMA_CONTROL);
if (enable)
- reg |= TDMA_EN;
+ reg |= tdma_control_bit(priv, TDMA_EN);
else
- reg &= ~TDMA_EN;
+ reg &= ~tdma_control_bit(priv, TDMA_EN);
tdma_writel(priv, reg, TDMA_CONTROL);
/* Poll for TMDA disabling completion */
@@ -1331,7 +1448,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
int i;
/* Initialize SW view of the RX ring */
- priv->num_rx_bds = NUM_RX_DESC;
+ priv->num_rx_bds = priv->num_rx_desc_words / WORDS_PER_DESC;
priv->rx_bds = priv->base + SYS_PORT_RDMA_OFFSET;
priv->rx_c_index = 0;
priv->rx_read_ptr = 0;
@@ -1368,7 +1485,7 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
rdma_writel(priv, 0, RDMA_START_ADDR_HI);
rdma_writel(priv, 0, RDMA_START_ADDR_LO);
rdma_writel(priv, 0, RDMA_END_ADDR_HI);
- rdma_writel(priv, NUM_HW_RX_DESC_WORDS - 1, RDMA_END_ADDR_LO);
+ rdma_writel(priv, priv->num_rx_desc_words - 1, RDMA_END_ADDR_LO);
rdma_writel(priv, 1, RDMA_MBDONE_INTR);
@@ -1410,6 +1527,9 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
struct bcm_sysport_priv *priv = netdev_priv(dev);
u32 reg;
+ if (priv->is_lite)
+ return;
+
reg = umac_readl(priv, UMAC_CMD);
if (dev->flags & IFF_PROMISC)
reg |= CMD_PROMISC;
@@ -1427,12 +1547,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
{
u32 reg;
- reg = umac_readl(priv, UMAC_CMD);
- if (enable)
- reg |= mask;
- else
- reg &= ~mask;
- umac_writel(priv, reg, UMAC_CMD);
+ if (!priv->is_lite) {
+ reg = umac_readl(priv, UMAC_CMD);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ umac_writel(priv, reg, UMAC_CMD);
+ } else {
+ reg = gib_readl(priv, GIB_CONTROL);
+ if (enable)
+ reg |= mask;
+ else
+ reg &= ~mask;
+ gib_writel(priv, reg, GIB_CONTROL);
+ }
/* UniMAC stops on a packet boundary, wait for a full-sized packet
* to be processed (1 msec).
@@ -1445,6 +1574,9 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
{
u32 reg;
+ if (priv->is_lite)
+ return;
+
reg = umac_readl(priv, UMAC_CMD);
reg |= CMD_SW_RESET;
umac_writel(priv, reg, UMAC_CMD);
@@ -1457,9 +1589,17 @@ static inline void umac_reset(struct bcm_sysport_priv *priv)
static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
unsigned char *addr)
{
- umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
- (addr[2] << 8) | addr[3], UMAC_MAC0);
- umac_writel(priv, (addr[4] << 8) | addr[5], UMAC_MAC1);
+ u32 mac0 = (addr[0] << 24) | (addr[1] << 16) | (addr[2] << 8) |
+ addr[3];
+ u32 mac1 = (addr[4] << 8) | addr[5];
+
+ if (!priv->is_lite) {
+ umac_writel(priv, mac0, UMAC_MAC0);
+ umac_writel(priv, mac1, UMAC_MAC1);
+ } else {
+ gib_writel(priv, mac0, GIB_MAC0);
+ gib_writel(priv, mac1, GIB_MAC1);
+ }
}
static void topctrl_flush(struct bcm_sysport_priv *priv)
@@ -1504,8 +1644,11 @@ static void bcm_sysport_netif_start(struct net_device *dev)
phy_start(dev->phydev);
- /* Enable TX interrupts for the 32 TXQs */
- intrl2_1_mask_clear(priv, 0xffffffff);
+ /* Enable TX interrupts for the TXQs */
+ if (!priv->is_lite)
+ intrl2_1_mask_clear(priv, 0xffffffff);
+ else
+ intrl2_0_mask_clear(priv, INTRL2_0_TDMA_MBDONE_MASK);
/* Last call before we start the real business */
netif_tx_start_all_queues(dev);
@@ -1517,9 +1660,37 @@ static void rbuf_init(struct bcm_sysport_priv *priv)
reg = rbuf_readl(priv, RBUF_CONTROL);
reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+ /* Set a correct RSB format on SYSTEMPORT Lite */
+ if (priv->is_lite) {
+ reg &= ~RBUF_RSB_SWAP1;
+ reg |= RBUF_RSB_SWAP0;
+ }
rbuf_writel(priv, reg, RBUF_CONTROL);
}
+static inline void bcm_sysport_mask_all_intrs(struct bcm_sysport_priv *priv)
+{
+ intrl2_0_mask_set(priv, 0xffffffff);
+ intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ if (!priv->is_lite) {
+ intrl2_1_mask_set(priv, 0xffffffff);
+ intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ }
+}
+
+static inline void gib_set_pad_extension(struct bcm_sysport_priv *priv)
+{
+ u32 __maybe_unused reg;
+
+ /* Include Broadcom tag in pad extension */
+ if (netdev_uses_dsa(priv->netdev)) {
+ reg = gib_readl(priv, GIB_CONTROL);
+ reg &= ~(GIB_PAD_EXTENSION_MASK << GIB_PAD_EXTENSION_SHIFT);
+ reg |= ENET_BRCM_TAG_LEN << GIB_PAD_EXTENSION_SHIFT;
+ gib_writel(priv, reg, GIB_CONTROL);
+ }
+}
+
static int bcm_sysport_open(struct net_device *dev)
{
struct bcm_sysport_priv *priv = netdev_priv(dev);
@@ -1540,13 +1711,20 @@ static int bcm_sysport_open(struct net_device *dev)
rbuf_init(priv);
/* Set maximum frame length */
- umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ if (!priv->is_lite)
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ else
+ gib_set_pad_extension(priv);
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
/* Read CRC forward */
- priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ if (!priv->is_lite)
+ priv->crc_fwd = !!(umac_readl(priv, UMAC_CMD) & CMD_CRC_FWD);
+ else
+ priv->crc_fwd = !!(gib_readl(priv, GIB_CONTROL) &
+ GIB_FCS_STRIP);
phydev = of_phy_connect(dev, priv->phy_dn, bcm_sysport_adj_link,
0, priv->phy_interface);
@@ -1561,12 +1739,7 @@ static int bcm_sysport_open(struct net_device *dev)
priv->old_pause = -1;
/* mask all interrupts and request them */
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+ bcm_sysport_mask_all_intrs(priv);
ret = request_irq(priv->irq0, bcm_sysport_rx_isr, 0, dev->name, dev);
if (ret) {
@@ -1574,10 +1747,13 @@ static int bcm_sysport_open(struct net_device *dev)
goto out_phy_disconnect;
}
- ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0, dev->name, dev);
- if (ret) {
- netdev_err(dev, "failed to request TX interrupt\n");
- goto out_free_irq0;
+ if (!priv->is_lite) {
+ ret = request_irq(priv->irq1, bcm_sysport_tx_isr, 0,
+ dev->name, dev);
+ if (ret) {
+ netdev_err(dev, "failed to request TX interrupt\n");
+ goto out_free_irq0;
+ }
}
/* Initialize both hardware and software ring */
@@ -1624,7 +1800,8 @@ out_free_rx_ring:
out_free_tx_ring:
for (i = 0; i < dev->num_tx_queues; i++)
bcm_sysport_fini_tx_ring(priv, i);
- free_irq(priv->irq1, dev);
+ if (!priv->is_lite)
+ free_irq(priv->irq1, dev);
out_free_irq0:
free_irq(priv->irq0, dev);
out_phy_disconnect:
@@ -1642,10 +1819,7 @@ static void bcm_sysport_netif_stop(struct net_device *dev)
phy_stop(dev->phydev);
/* mask all interrupts */
- intrl2_0_mask_set(priv, 0xffffffff);
- intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
- intrl2_1_mask_set(priv, 0xffffffff);
- intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+ bcm_sysport_mask_all_intrs(priv);
}
static int bcm_sysport_stop(struct net_device *dev)
@@ -1683,7 +1857,8 @@ static int bcm_sysport_stop(struct net_device *dev)
bcm_sysport_fini_rx_ring(priv);
free_irq(priv->irq0, dev);
- free_irq(priv->irq1, dev);
+ if (!priv->is_lite)
+ free_irq(priv->irq1, dev);
/* Disconnect from PHY */
phy_disconnect(dev->phydev);
@@ -1722,8 +1897,32 @@ static const struct net_device_ops bcm_sysport_netdev_ops = {
#define REV_FMT "v%2x.%02x"
+static const struct bcm_sysport_hw_params bcm_sysport_params[] = {
+ [SYSTEMPORT] = {
+ .is_lite = false,
+ .num_rx_desc_words = SP_NUM_HW_RX_DESC_WORDS,
+ },
+ [SYSTEMPORT_LITE] = {
+ .is_lite = true,
+ .num_rx_desc_words = SP_LT_NUM_HW_RX_DESC_WORDS,
+ },
+};
+
+static const struct of_device_id bcm_sysport_of_match[] = {
+ { .compatible = "brcm,systemportlite-v1.00",
+ .data = &bcm_sysport_params[SYSTEMPORT_LITE] },
+ { .compatible = "brcm,systemport-v1.00",
+ .data = &bcm_sysport_params[SYSTEMPORT] },
+ { .compatible = "brcm,systemport",
+ .data = &bcm_sysport_params[SYSTEMPORT] },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
+
static int bcm_sysport_probe(struct platform_device *pdev)
{
+ const struct bcm_sysport_hw_params *params;
+ const struct of_device_id *of_id = NULL;
struct bcm_sysport_priv *priv;
struct device_node *dn;
struct net_device *dev;
@@ -1734,6 +1933,12 @@ static int bcm_sysport_probe(struct platform_device *pdev)
dn = pdev->dev.of_node;
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ of_id = of_match_node(bcm_sysport_of_match, dn);
+ if (!of_id || !of_id->data)
+ return -EINVAL;
+
+ /* Fairly quickly we need to know the type of adapter we have */
+ params = of_id->data;
/* Read the Transmit/Receive Queue properties */
if (of_property_read_u32(dn, "systemport,num-txq", &txq))
@@ -1741,6 +1946,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
if (of_property_read_u32(dn, "systemport,num-rxq", &rxq))
rxq = 1;
+ /* Sanity check the number of transmit queues */
+ if (!txq || txq > TDMA_NUM_RINGS)
+ return -EINVAL;
+
dev = alloc_etherdev_mqs(sizeof(*priv), txq, rxq);
if (!dev)
return -ENOMEM;
@@ -1748,10 +1957,21 @@ static int bcm_sysport_probe(struct platform_device *pdev)
/* Initialize private members */
priv = netdev_priv(dev);
+ /* Allocate number of TX rings */
+ priv->tx_rings = devm_kcalloc(&pdev->dev, txq,
+ sizeof(struct bcm_sysport_tx_ring),
+ GFP_KERNEL);
+ if (!priv->tx_rings)
+ return -ENOMEM;
+
+ priv->is_lite = params->is_lite;
+ priv->num_rx_desc_words = params->num_rx_desc_words;
+
priv->irq0 = platform_get_irq(pdev, 0);
- priv->irq1 = platform_get_irq(pdev, 1);
+ if (!priv->is_lite)
+ priv->irq1 = platform_get_irq(pdev, 1);
priv->wol_irq = platform_get_irq(pdev, 2);
- if (priv->irq0 <= 0 || priv->irq1 <= 0) {
+ if (priv->irq0 <= 0 || (priv->irq1 <= 0 && !priv->is_lite)) {
dev_err(&pdev->dev, "invalid interrupts\n");
ret = -EINVAL;
goto err_free_netdev;
@@ -1825,8 +2045,9 @@ static int bcm_sysport_probe(struct platform_device *pdev)
priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
dev_info(&pdev->dev,
- "Broadcom SYSTEMPORT" REV_FMT
+ "Broadcom SYSTEMPORT%s" REV_FMT
" at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+ priv->is_lite ? " Lite" : "",
(priv->rev >> 8) & 0xff, priv->rev & 0xff,
priv->base, priv->irq0, priv->irq1, txq, rxq);
@@ -2022,7 +2243,10 @@ static int bcm_sysport_resume(struct device *d)
rbuf_init(priv);
/* Set maximum frame length */
- umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ if (!priv->is_lite)
+ umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+ else
+ gib_set_pad_extension(priv);
/* Set MAC address */
umac_set_hw_addr(priv, dev->dev_addr);
@@ -2058,13 +2282,6 @@ out_free_tx_rings:
static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
bcm_sysport_suspend, bcm_sysport_resume);
-static const struct of_device_id bcm_sysport_of_match[] = {
- { .compatible = "brcm,systemport-v1.00" },
- { .compatible = "brcm,systemport" },
- { /* sentinel */ }
-};
-MODULE_DEVICE_TABLE(of, bcm_sysport_of_match);
-
static struct platform_driver bcm_sysport_driver = {
.probe = bcm_sysport_probe,
.remove = bcm_sysport_remove,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 1c82e3d..863ddd7 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -127,6 +127,10 @@ struct bcm_rsb {
#define INTRL2_0_DESC_ALLOC_ERR (1 << 10)
#define INTRL2_0_UNEXP_PKTSIZE_ACK (1 << 11)
+/* SYSTEMPORT Lite groups the TX queues interrupts on instance 0 */
+#define INTRL2_0_TDMA_MBDONE_SHIFT 12
+#define INTRL2_0_TDMA_MBDONE_MASK (0xffff << INTRL2_0_TDMA_MBDONE_SHIFT)
+
/* RXCHK offset and defines */
#define SYS_PORT_RXCHK_OFFSET 0x300
@@ -176,7 +180,9 @@ struct bcm_rsb {
#define RBUF_OK_TO_SEND_MASK 0xff
#define RBUF_CRC_REPLACE (1 << 20)
#define RBUF_OK_TO_SEND_MODE (1 << 21)
-#define RBUF_RSB_SWAP (1 << 22)
+/* SYSTEMPORT Lite uses two bits here */
+#define RBUF_RSB_SWAP0 (1 << 22)
+#define RBUF_RSB_SWAP1 (1 << 23)
#define RBUF_ACPI_EN (1 << 23)
#define RBUF_PKT_RDY_THRESH 0x04
@@ -247,6 +253,7 @@ struct bcm_rsb {
#define MIB_RUNT_CNT_RST (1 << 1)
#define MIB_TX_CNT_RST (1 << 2)
+/* These offsets are valid for SYSTEMPORT and SYSTEMPORT Lite */
#define UMAC_MPD_CTRL 0x620
#define MPD_EN (1 << 0)
#define MSEQ_LEN_SHIFT 16
@@ -258,6 +265,34 @@ struct bcm_rsb {
#define UMAC_MDF_CTRL 0x650
#define UMAC_MDF_ADDR 0x654
+/* Only valid on SYSTEMPORT Lite */
+#define SYS_PORT_GIB_OFFSET 0x1000
+
+#define GIB_CONTROL 0x00
+#define GIB_TX_EN (1 << 0)
+#define GIB_RX_EN (1 << 1)
+#define GIB_TX_FLUSH (1 << 2)
+#define GIB_RX_FLUSH (1 << 3)
+#define GIB_GTX_CLK_SEL_SHIFT 4
+#define GIB_GTX_CLK_EXT_CLK (0 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_GTX_CLK_125MHZ (1 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_GTX_CLK_250MHZ (2 << GIB_GTX_CLK_SEL_SHIFT)
+#define GIB_FCS_STRIP (1 << 6)
+#define GIB_LCL_LOOP_EN (1 << 7)
+#define GIB_LCL_LOOP_TXEN (1 << 8)
+#define GIB_RMT_LOOP_EN (1 << 9)
+#define GIB_RMT_LOOP_RXEN (1 << 10)
+#define GIB_RX_PAUSE_EN (1 << 11)
+#define GIB_PREAMBLE_LEN_SHIFT 12
+#define GIB_PREAMBLE_LEN_MASK 0xf
+#define GIB_IPG_LEN_SHIFT 16
+#define GIB_IPG_LEN_MASK 0x3f
+#define GIB_PAD_EXTENSION_SHIFT 22
+#define GIB_PAD_EXTENSION_MASK 0x3f
+
+#define GIB_MAC1 0x08
+#define GIB_MAC0 0x0c
+
/* Receive DMA offset and defines */
#define SYS_PORT_RDMA_OFFSET 0x2000
@@ -409,16 +444,19 @@ struct bcm_rsb {
RING_PCP_DEI_VID)
#define TDMA_CONTROL 0x600
-#define TDMA_EN (1 << 0)
-#define TSB_EN (1 << 1)
-#define TSB_SWAP (1 << 2)
-#define ACB_ALGO (1 << 3)
+#define TDMA_EN 0
+#define TSB_EN 1
+/* Uses 2 bits on SYSTEMPORT Lite and shifts everything by 1 bit, we
+ * keep the SYSTEMPORT layout here and adjust with tdma_control_bit()
+ */
+#define TSB_SWAP 2
+#define ACB_ALGO 3
#define BUF_DATA_OFFSET_SHIFT 4
#define BUF_DATA_OFFSET_MASK 0x3ff
-#define VLAN_EN (1 << 14)
-#define SW_BRCM_TAG (1 << 15)
-#define WNC_KPT_SIZE_UPDATE (1 << 16)
-#define SYNC_PKT_SIZE (1 << 17)
+#define VLAN_EN 14
+#define SW_BRCM_TAG 15
+#define WNC_KPT_SIZE_UPDATE 16
+#define SYNC_PKT_SIZE 17
#define ACH_TXDONE_DELAY_SHIFT 18
#define ACH_TXDONE_DELAY_MASK 0xff
@@ -475,12 +513,12 @@ struct dma_desc {
};
/* Number of Receive hardware descriptor words */
-#define NUM_HW_RX_DESC_WORDS 1024
-/* Real number of usable descriptors */
-#define NUM_RX_DESC (NUM_HW_RX_DESC_WORDS / WORDS_PER_DESC)
+#define SP_NUM_HW_RX_DESC_WORDS 1024
+#define SP_LT_NUM_HW_RX_DESC_WORDS 256
-/* Internal linked-list RAM has up to 1536 entries */
-#define NUM_TX_DESC 1536
+/* Internal linked-list RAM size */
+#define SP_NUM_TX_DESC 1536
+#define SP_LT_NUM_TX_DESC 256
#define WORDS_PER_DESC (sizeof(struct dma_desc) / sizeof(u32))
@@ -627,6 +665,16 @@ struct bcm_sysport_cb {
DEFINE_DMA_UNMAP_LEN(dma_len);
};
+enum bcm_sysport_type {
+ SYSTEMPORT = 0,
+ SYSTEMPORT_LITE,
+};
+
+struct bcm_sysport_hw_params {
+ bool is_lite;
+ unsigned int num_rx_desc_words;
+};
+
/* Software view of the TX ring */
struct bcm_sysport_tx_ring {
spinlock_t lock; /* Ring lock for tx reclaim/xmit */
@@ -651,6 +699,8 @@ struct bcm_sysport_priv {
u32 irq0_mask;
u32 irq1_stat;
u32 irq1_mask;
+ bool is_lite;
+ unsigned int num_rx_desc_words;
struct napi_struct napi ____cacheline_aligned;
struct net_device *netdev;
struct platform_device *pdev;
@@ -659,7 +709,7 @@ struct bcm_sysport_priv {
int wol_irq;
/* Transmit rings */
- struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
+ struct bcm_sysport_tx_ring *tx_rings;
/* Receive queue */
void __iomem *rx_bds;
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
index 7c19c8e..6ce80cb 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma-mdio.c
@@ -12,11 +12,6 @@
#include <linux/brcmphy.h>
#include "bgmac.h"
-struct bcma_mdio {
- struct bcma_device *core;
- u8 phyaddr;
-};
-
static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
u32 value, int timeout)
{
@@ -37,7 +32,7 @@ static bool bcma_mdio_wait_value(struct bcma_device *core, u16 reg, u32 mask,
* PHY ops
**************************************************/
-static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
+static u16 bcma_mdio_phy_read(struct bgmac *bgmac, u8 phyaddr, u8 reg)
{
struct bcma_device *core;
u16 phy_access_addr;
@@ -56,12 +51,12 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
BUILD_BUG_ON(BGMAC_PC_MCT_SHIFT != BCMA_GMAC_CMN_PC_MCT_SHIFT);
BUILD_BUG_ON(BGMAC_PC_MTE != BCMA_GMAC_CMN_PC_MTE);
- if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
} else {
- core = bcma_mdio->core;
+ core = bgmac->bcma.core;
phy_access_addr = BGMAC_PHY_ACCESS;
phy_ctl_addr = BGMAC_PHY_CNTL;
}
@@ -87,7 +82,7 @@ static u16 bcma_mdio_phy_read(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg)
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphywr */
-static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
+static int bcma_mdio_phy_write(struct bgmac *bgmac, u8 phyaddr, u8 reg,
u16 value)
{
struct bcma_device *core;
@@ -95,12 +90,12 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
u16 phy_ctl_addr;
u32 tmp;
- if (bcma_mdio->core->id.id == BCMA_CORE_4706_MAC_GBIT) {
- core = bcma_mdio->core->bus->drv_gmac_cmn.core;
+ if (bgmac->bcma.core->id.id == BCMA_CORE_4706_MAC_GBIT) {
+ core = bgmac->bcma.core->bus->drv_gmac_cmn.core;
phy_access_addr = BCMA_GMAC_CMN_PHY_ACCESS;
phy_ctl_addr = BCMA_GMAC_CMN_PHY_CTL;
} else {
- core = bcma_mdio->core;
+ core = bgmac->bcma.core;
phy_access_addr = BGMAC_PHY_ACCESS;
phy_ctl_addr = BGMAC_PHY_CNTL;
}
@@ -110,8 +105,8 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
tmp |= phyaddr;
bcma_write32(core, phy_ctl_addr, tmp);
- bcma_write32(bcma_mdio->core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
- if (bcma_read32(bcma_mdio->core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
+ bcma_write32(bgmac->bcma.core, BGMAC_INT_STATUS, BGMAC_IS_MDIO);
+ if (bcma_read32(bgmac->bcma.core, BGMAC_INT_STATUS) & BGMAC_IS_MDIO)
dev_warn(&core->dev, "Error setting MDIO int\n");
tmp = BGMAC_PA_START;
@@ -132,57 +127,67 @@ static int bcma_mdio_phy_write(struct bcma_mdio *bcma_mdio, u8 phyaddr, u8 reg,
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyinit */
-static void bcma_mdio_phy_init(struct bcma_mdio *bcma_mdio)
+static void bcma_mdio_phy_init(struct bgmac *bgmac)
{
- struct bcma_chipinfo *ci = &bcma_mdio->core->bus->chipinfo;
+ struct bcma_chipinfo *ci = &bgmac->bcma.core->bus->chipinfo;
u8 i;
+ /* For some legacy hardware we do chipset-based PHY initialization here
+ * without even detecting PHY ID. It's hacky and should be cleaned as
+ * soon as someone can test it.
+ */
if (ci->id == BCMA_CHIP_ID_BCM5356) {
for (i = 0; i < 5; i++) {
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x008b);
- bcma_mdio_phy_write(bcma_mdio, i, 0x15, 0x0100);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x12, 0x2aaa);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x008b);
+ bcma_mdio_phy_write(bgmac, i, 0x15, 0x0100);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x12, 0x2aaa);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
}
+ return;
}
if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg != 10) ||
(ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg != 10) ||
(ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg != 9)) {
- struct bcma_drv_cc *cc = &bcma_mdio->core->bus->drv_cc;
+ struct bcma_drv_cc *cc = &bgmac->bcma.core->bus->drv_cc;
bcma_chipco_chipctl_maskset(cc, 2, ~0xc0000000, 0);
bcma_chipco_chipctl_maskset(cc, 4, ~0x80000000, 0);
for (i = 0; i < 5; i++) {
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5284);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x0010);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000f);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x5296);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x1073);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9073);
- bcma_mdio_phy_write(bcma_mdio, i, 0x16, 0x52b6);
- bcma_mdio_phy_write(bcma_mdio, i, 0x17, 0x9273);
- bcma_mdio_phy_write(bcma_mdio, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x5284);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x0010);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000f);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x5296);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x1073);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x9073);
+ bcma_mdio_phy_write(bgmac, i, 0x16, 0x52b6);
+ bcma_mdio_phy_write(bgmac, i, 0x17, 0x9273);
+ bcma_mdio_phy_write(bgmac, i, 0x1f, 0x000b);
}
+ return;
}
+
+ /* For all other hw do initialization using PHY subsystem. */
+ if (bgmac->net_dev && bgmac->net_dev->phydev)
+ phy_init_hw(bgmac->net_dev->phydev);
}
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipphyreset */
static int bcma_mdio_phy_reset(struct mii_bus *bus)
{
- struct bcma_mdio *bcma_mdio = bus->priv;
- u8 phyaddr = bcma_mdio->phyaddr;
+ struct bgmac *bgmac = bus->priv;
+ u8 phyaddr = bgmac->phyaddr;
- if (bcma_mdio->phyaddr == BGMAC_PHY_NOREGS)
+ if (phyaddr == BGMAC_PHY_NOREGS)
return 0;
- bcma_mdio_phy_write(bcma_mdio, phyaddr, MII_BMCR, BMCR_RESET);
+ bcma_mdio_phy_write(bgmac, phyaddr, MII_BMCR, BMCR_RESET);
udelay(100);
- if (bcma_mdio_phy_read(bcma_mdio, phyaddr, MII_BMCR) & BMCR_RESET)
- dev_err(&bcma_mdio->core->dev, "PHY reset failed\n");
- bcma_mdio_phy_init(bcma_mdio);
+ if (bcma_mdio_phy_read(bgmac, phyaddr, MII_BMCR) & BMCR_RESET)
+ dev_err(bgmac->dev, "PHY reset failed\n");
+ bcma_mdio_phy_init(bgmac);
return 0;
}
@@ -202,16 +207,12 @@ static int bcma_mdio_mii_write(struct mii_bus *bus, int mii_id, int regnum,
return bcma_mdio_phy_write(bus->priv, mii_id, regnum, value);
}
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac)
{
- struct bcma_mdio *bcma_mdio;
+ struct bcma_device *core = bgmac->bcma.core;
struct mii_bus *mii_bus;
int err;
- bcma_mdio = kzalloc(sizeof(*bcma_mdio), GFP_KERNEL);
- if (!bcma_mdio)
- return ERR_PTR(-ENOMEM);
-
mii_bus = mdiobus_alloc();
if (!mii_bus) {
err = -ENOMEM;
@@ -221,15 +222,12 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
mii_bus->name = "bcma_mdio mii bus";
sprintf(mii_bus->id, "%s-%d-%d", "bcma_mdio", core->bus->num,
core->core_unit);
- mii_bus->priv = bcma_mdio;
+ mii_bus->priv = bgmac;
mii_bus->read = bcma_mdio_mii_read;
mii_bus->write = bcma_mdio_mii_write;
mii_bus->reset = bcma_mdio_phy_reset;
mii_bus->parent = &core->dev;
- mii_bus->phy_mask = ~(1 << phyaddr);
-
- bcma_mdio->core = core;
- bcma_mdio->phyaddr = phyaddr;
+ mii_bus->phy_mask = ~(1 << bgmac->phyaddr);
err = mdiobus_register(mii_bus);
if (err) {
@@ -242,23 +240,17 @@ struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr)
err_free_bus:
mdiobus_free(mii_bus);
err:
- kfree(bcma_mdio);
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(bcma_mdio_mii_register);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus)
{
- struct bcma_mdio *bcma_mdio;
-
if (!mii_bus)
return;
- bcma_mdio = mii_bus->priv;
-
mdiobus_unregister(mii_bus);
mdiobus_free(mii_bus);
- kfree(bcma_mdio);
}
EXPORT_SYMBOL_GPL(bcma_mdio_mii_unregister);
diff --git a/drivers/net/ethernet/broadcom/bgmac-bcma.c b/drivers/net/ethernet/broadcom/bgmac-bcma.c
index 4a4ffc0..5ef60d4 100644
--- a/drivers/net/ethernet/broadcom/bgmac-bcma.c
+++ b/drivers/net/ethernet/broadcom/bgmac-bcma.c
@@ -117,12 +117,11 @@ static int bgmac_probe(struct bcma_device *core)
u8 *mac;
int err;
- bgmac = kzalloc(sizeof(*bgmac), GFP_KERNEL);
+ bgmac = bgmac_alloc(&core->dev);
if (!bgmac)
return -ENOMEM;
bgmac->bcma.core = core;
- bgmac->dev = &core->dev;
bgmac->dma_dev = core->dma_dev;
bgmac->irq = core->irq;
@@ -178,7 +177,7 @@ static int bgmac_probe(struct bcma_device *core)
if (!bgmac_is_bcm4707_family(core) &&
!(ci->id == BCMA_CHIP_ID_BCM53573 && core->core_unit == 1)) {
- mii_bus = bcma_mdio_mii_register(core, bgmac->phyaddr);
+ mii_bus = bcma_mdio_mii_register(bgmac);
if (IS_ERR(mii_bus)) {
err = PTR_ERR(mii_bus);
goto err;
@@ -307,7 +306,6 @@ static int bgmac_probe(struct bcma_device *core)
err1:
bcma_mdio_mii_unregister(bgmac->mii_bus);
err:
- kfree(bgmac);
bcma_set_drvdata(core, NULL);
return err;
diff --git a/drivers/net/ethernet/broadcom/bgmac-platform.c b/drivers/net/ethernet/broadcom/bgmac-platform.c
index 6f736c1..805e6ed 100644
--- a/drivers/net/ethernet/broadcom/bgmac-platform.c
+++ b/drivers/net/ethernet/broadcom/bgmac-platform.c
@@ -151,7 +151,7 @@ static int bgmac_probe(struct platform_device *pdev)
struct resource *regs;
const u8 *mac_addr;
- bgmac = devm_kzalloc(&pdev->dev, sizeof(*bgmac), GFP_KERNEL);
+ bgmac = bgmac_alloc(&pdev->dev);
if (!bgmac)
return -ENOMEM;
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index 0e066dc6..fe88126 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1148,7 +1148,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
return weight;
if (handled < weight) {
- napi_complete(napi);
+ napi_complete_done(napi, handled);
bgmac_chip_intrs_on(bgmac);
}
@@ -1446,22 +1446,32 @@ int bgmac_phy_connect_direct(struct bgmac *bgmac)
}
EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct);
-int bgmac_enet_probe(struct bgmac *info)
+struct bgmac *bgmac_alloc(struct device *dev)
{
struct net_device *net_dev;
struct bgmac *bgmac;
- int err;
/* Allocation and references */
- net_dev = alloc_etherdev(sizeof(*bgmac));
+ net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac));
if (!net_dev)
- return -ENOMEM;
+ return NULL;
net_dev->netdev_ops = &bgmac_netdev_ops;
net_dev->ethtool_ops = &bgmac_ethtool_ops;
+
bgmac = netdev_priv(net_dev);
- memcpy(bgmac, info, sizeof(*bgmac));
+ bgmac->dev = dev;
bgmac->net_dev = net_dev;
+
+ return bgmac;
+}
+EXPORT_SYMBOL_GPL(bgmac_alloc);
+
+int bgmac_enet_probe(struct bgmac *bgmac)
+{
+ struct net_device *net_dev = bgmac->net_dev;
+ int err;
+
net_dev->irq = bgmac->irq;
SET_NETDEV_DEV(net_dev, bgmac->dev);
@@ -1488,7 +1498,7 @@ int bgmac_enet_probe(struct bgmac *info)
err = bgmac_dma_alloc(bgmac);
if (err) {
dev_err(bgmac->dev, "Unable to alloc memory for DMA\n");
- goto err_netdev_free;
+ goto err_out;
}
bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK;
@@ -1521,8 +1531,7 @@ err_phy_disconnect:
phy_disconnect(net_dev->phydev);
err_dma_free:
bgmac_dma_free(bgmac);
-err_netdev_free:
- free_netdev(net_dev);
+err_out:
return err;
}
diff --git a/drivers/net/ethernet/broadcom/bgmac.h b/drivers/net/ethernet/broadcom/bgmac.h
index 71f493f..ab2db76 100644
--- a/drivers/net/ethernet/broadcom/bgmac.h
+++ b/drivers/net/ethernet/broadcom/bgmac.h
@@ -517,12 +517,13 @@ struct bgmac {
int (*phy_connect)(struct bgmac *bgmac);
};
-int bgmac_enet_probe(struct bgmac *info);
+struct bgmac *bgmac_alloc(struct device *dev);
+int bgmac_enet_probe(struct bgmac *bgmac);
void bgmac_enet_remove(struct bgmac *bgmac);
void bgmac_adjust_link(struct net_device *net_dev);
int bgmac_phy_connect_direct(struct bgmac *bgmac);
-struct mii_bus *bcma_mdio_mii_register(struct bcma_device *core, u8 phyaddr);
+struct mii_bus *bcma_mdio_mii_register(struct bgmac *bgmac);
void bcma_mdio_mii_unregister(struct mii_bus *mii_bus);
static inline u32 bgmac_read(struct bgmac *bgmac, u16 offset)
diff --git a/drivers/net/ethernet/broadcom/bnx2.c b/drivers/net/ethernet/broadcom/bnx2.c
index de1d07c..e3af1f3 100644
--- a/drivers/net/ethernet/broadcom/bnx2.c
+++ b/drivers/net/ethernet/broadcom/bnx2.c
@@ -3515,7 +3515,7 @@ static int bnx2_poll_msix(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_fast_work(bnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
bnapi->last_status_idx);
@@ -3552,7 +3552,7 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
rmb();
if (likely(!bnx2_has_work(bnapi))) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
BNX2_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 3e199d3..9e8c061 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -549,14 +549,7 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
struct bnx2x_alloc_pool *pool = &fp->page_pool;
dma_addr_t mapping;
- if (!pool->page || (PAGE_SIZE - pool->offset) < SGE_PAGE_SIZE) {
-
- /* put page reference used by the memory pool, since we
- * won't be using this page as the mempool anymore.
- */
- if (pool->page)
- put_page(pool->page);
-
+ if (!pool->page) {
pool->page = alloc_pages(gfp_mask, PAGES_PER_SGE_SHIFT);
if (unlikely(!pool->page))
return -ENOMEM;
@@ -571,7 +564,6 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
return -ENOMEM;
}
- get_page(pool->page);
sw_buf->page = pool->page;
sw_buf->offset = pool->offset;
@@ -581,7 +573,10 @@ static int bnx2x_alloc_rx_sge(struct bnx2x *bp, struct bnx2x_fastpath *fp,
sge->addr_lo = cpu_to_le32(U64_LO(mapping));
pool->offset += SGE_PAGE_SIZE;
-
+ if (PAGE_SIZE - pool->offset >= SGE_PAGE_SIZE)
+ get_page(pool->page);
+ else
+ pool->page = NULL;
return 0;
}
@@ -3229,7 +3224,7 @@ static int bnx2x_poll(struct napi_struct *napi, int budget)
* has been updated when NAPI was scheduled.
*/
if (IS_FCOE_FP(fp)) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
} else {
bnx2x_update_fpsb_idx(fp);
/* bnx2x_has_rx_work() reads the status block,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 5f19427..4342374 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -216,165 +216,184 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
return port_type;
}
-static int bnx2x_get_vf_settings(struct net_device *dev,
- struct ethtool_cmd *cmd)
+static int bnx2x_get_vf_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
+ u32 supported, advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (bp->state == BNX2X_STATE_OPEN) {
if (test_bit(BNX2X_LINK_REPORT_FD,
&bp->vf_link_vars.link_report_flags))
- cmd->duplex = DUPLEX_FULL;
+ cmd->base.duplex = DUPLEX_FULL;
else
- cmd->duplex = DUPLEX_HALF;
+ cmd->base.duplex = DUPLEX_HALF;
- ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+ cmd->base.speed = bp->vf_link_vars.line_speed;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
- cmd->port = PORT_OTHER;
- cmd->phy_address = 0;
- cmd->transceiver = XCVR_INTERNAL;
- cmd->autoneg = AUTONEG_DISABLE;
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ cmd->base.port = PORT_OTHER;
+ cmd->base.phy_address = 0;
+ cmd->base.autoneg = AUTONEG_DISABLE;
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
return 0;
}
-static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
int cfg_idx = bnx2x_get_link_cfg_idx(bp);
u32 media_type;
+ u32 supported, advertising, lp_advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&lp_advertising,
+ cmd->link_modes.lp_advertising);
/* Dual Media boards present all available port types */
- cmd->supported = bp->port.supported[cfg_idx] |
+ supported = bp->port.supported[cfg_idx] |
(bp->port.supported[cfg_idx ^ 1] &
(SUPPORTED_TP | SUPPORTED_FIBRE));
- cmd->advertising = bp->port.advertising[cfg_idx];
+ advertising = bp->port.advertising[cfg_idx];
media_type = bp->link_params.phy[bnx2x_get_cur_phy_idx(bp)].media_type;
if (media_type == ETH_PHY_SFP_1G_FIBER) {
- cmd->supported &= ~(SUPPORTED_10000baseT_Full);
- cmd->advertising &= ~(ADVERTISED_10000baseT_Full);
+ supported &= ~(SUPPORTED_10000baseT_Full);
+ advertising &= ~(ADVERTISED_10000baseT_Full);
}
if ((bp->state == BNX2X_STATE_OPEN) && bp->link_vars.link_up &&
!(bp->flags & MF_FUNC_DIS)) {
- cmd->duplex = bp->link_vars.duplex;
+ cmd->base.duplex = bp->link_vars.duplex;
if (IS_MF(bp) && !BP_NOMCP(bp))
- ethtool_cmd_speed_set(cmd, bnx2x_get_mf_speed(bp));
+ cmd->base.speed = bnx2x_get_mf_speed(bp);
else
- ethtool_cmd_speed_set(cmd, bp->link_vars.line_speed);
+ cmd->base.speed = bp->link_vars.line_speed;
} else {
- cmd->duplex = DUPLEX_UNKNOWN;
- ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+ cmd->base.duplex = DUPLEX_UNKNOWN;
+ cmd->base.speed = SPEED_UNKNOWN;
}
- cmd->port = bnx2x_get_port_type(bp);
+ cmd->base.port = bnx2x_get_port_type(bp);
- cmd->phy_address = bp->mdio.prtad;
- cmd->transceiver = XCVR_INTERNAL;
+ cmd->base.phy_address = bp->mdio.prtad;
if (bp->link_params.req_line_speed[cfg_idx] == SPEED_AUTO_NEG)
- cmd->autoneg = AUTONEG_ENABLE;
+ cmd->base.autoneg = AUTONEG_ENABLE;
else
- cmd->autoneg = AUTONEG_DISABLE;
+ cmd->base.autoneg = AUTONEG_DISABLE;
/* Publish LP advertised speeds and FC */
if (bp->link_vars.link_status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE) {
u32 status = bp->link_vars.link_status;
- cmd->lp_advertising |= ADVERTISED_Autoneg;
+ lp_advertising |= ADVERTISED_Autoneg;
if (status & LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Pause;
+ lp_advertising |= ADVERTISED_Pause;
if (status & LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE)
- cmd->lp_advertising |= ADVERTISED_Asym_Pause;
+ lp_advertising |= ADVERTISED_Asym_Pause;
if (status & LINK_STATUS_LINK_PARTNER_10THD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10baseT_Half;
+ lp_advertising |= ADVERTISED_10baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_10TFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_10baseT_Full;
+ lp_advertising |= ADVERTISED_10baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_100TXHD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_100baseT_Half;
+ lp_advertising |= ADVERTISED_100baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_100TXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_100baseT_Full;
+ lp_advertising |= ADVERTISED_100baseT_Full;
if (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_1000baseT_Half;
+ lp_advertising |= ADVERTISED_1000baseT_Half;
if (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) {
if (media_type == ETH_PHY_KR) {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_1000baseKX_Full;
} else {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_1000baseT_Full;
}
}
if (status & LINK_STATUS_LINK_PARTNER_2500XFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_2500baseX_Full;
+ lp_advertising |= ADVERTISED_2500baseX_Full;
if (status & LINK_STATUS_LINK_PARTNER_10GXFD_CAPABLE) {
if (media_type == ETH_PHY_KR) {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_10000baseKR_Full;
} else {
- cmd->lp_advertising |=
+ lp_advertising |=
ADVERTISED_10000baseT_Full;
}
}
if (status & LINK_STATUS_LINK_PARTNER_20GXFD_CAPABLE)
- cmd->lp_advertising |= ADVERTISED_20000baseKR2_Full;
+ lp_advertising |= ADVERTISED_20000baseKR2_Full;
}
- cmd->maxtxpkt = 0;
- cmd->maxrxpkt = 0;
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.lp_advertising,
+ lp_advertising);
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
return 0;
}
-static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int bnx2x_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct bnx2x *bp = netdev_priv(dev);
u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
u32 speed, phy_idx;
+ u32 supported;
+ u8 duplex = cmd->base.duplex;
+
+ ethtool_convert_link_mode_to_legacy_u32(&supported,
+ cmd->link_modes.supported);
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
if (IS_MF_SD(bp))
return 0;
DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
" supported 0x%x advertising 0x%x speed %u\n"
- " duplex %d port %d phy_address %d transceiver %d\n"
- " autoneg %d maxtxpkt %d maxrxpkt %d\n",
- cmd->cmd, cmd->supported, cmd->advertising,
- ethtool_cmd_speed(cmd),
- cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
- cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+ " duplex %d port %d phy_address %d\n"
+ " autoneg %d\n",
+ cmd->base.cmd, supported, advertising,
+ cmd->base.speed,
+ cmd->base.duplex, cmd->base.port, cmd->base.phy_address,
+ cmd->base.autoneg);
- speed = ethtool_cmd_speed(cmd);
+ speed = cmd->base.speed;
/* If received a request for an unknown duplex, assume full*/
- if (cmd->duplex == DUPLEX_UNKNOWN)
- cmd->duplex = DUPLEX_FULL;
+ if (duplex == DUPLEX_UNKNOWN)
+ duplex = DUPLEX_FULL;
if (IS_MF_SI(bp)) {
u32 part;
@@ -410,8 +429,8 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
cfg_idx = bnx2x_get_link_cfg_idx(bp);
old_multi_phy_config = bp->link_params.multi_phy_config;
- if (cmd->port != bnx2x_get_port_type(bp)) {
- switch (cmd->port) {
+ if (cmd->base.port != bnx2x_get_port_type(bp)) {
+ switch (cmd->base.port) {
case PORT_TP:
if (!(bp->port.supported[0] & SUPPORTED_TP ||
bp->port.supported[1] & SUPPORTED_TP)) {
@@ -461,7 +480,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
bp->link_params.multi_phy_config = old_multi_phy_config;
DP(BNX2X_MSG_ETHTOOL, "cfg_idx = %x\n", cfg_idx);
- if (cmd->autoneg == AUTONEG_ENABLE) {
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
u32 an_supported_speed = bp->port.supported[cfg_idx];
if (bp->link_params.phy[EXT_PHY1].type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833)
@@ -473,51 +492,51 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
/* advertise the requested speed and duplex if supported */
- if (cmd->advertising & ~an_supported_speed) {
+ if (advertising & ~an_supported_speed) {
DP(BNX2X_MSG_ETHTOOL,
"Advertisement parameters are not supported\n");
return -EINVAL;
}
bp->link_params.req_line_speed[cfg_idx] = SPEED_AUTO_NEG;
- bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
bp->port.advertising[cfg_idx] = (ADVERTISED_Autoneg |
- cmd->advertising);
- if (cmd->advertising) {
+ advertising);
+ if (advertising) {
bp->link_params.speed_cap_mask[cfg_idx] = 0;
- if (cmd->advertising & ADVERTISED_10baseT_Half) {
+ if (advertising & ADVERTISED_10baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF;
}
- if (cmd->advertising & ADVERTISED_10baseT_Full)
+ if (advertising & ADVERTISED_10baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Full)
+ if (advertising & ADVERTISED_100baseT_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL;
- if (cmd->advertising & ADVERTISED_100baseT_Half) {
+ if (advertising & ADVERTISED_100baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF;
}
- if (cmd->advertising & ADVERTISED_1000baseT_Half) {
+ if (advertising & ADVERTISED_1000baseT_Half) {
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
}
- if (cmd->advertising & (ADVERTISED_1000baseT_Full |
+ if (advertising & (ADVERTISED_1000baseT_Full |
ADVERTISED_1000baseKX_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_1G;
- if (cmd->advertising & (ADVERTISED_10000baseT_Full |
+ if (advertising & (ADVERTISED_10000baseT_Full |
ADVERTISED_10000baseKX4_Full |
ADVERTISED_10000baseKR_Full))
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_10G;
- if (cmd->advertising & ADVERTISED_20000baseKR2_Full)
+ if (advertising & ADVERTISED_20000baseKR2_Full)
bp->link_params.speed_cap_mask[cfg_idx] |=
PORT_HW_CFG_SPEED_CAPABILITY_D0_20G;
}
@@ -525,7 +544,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
/* advertise the requested speed and duplex if supported */
switch (speed) {
case SPEED_10:
- if (cmd->duplex == DUPLEX_FULL) {
+ if (duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_10baseT_Full)) {
DP(BNX2X_MSG_ETHTOOL,
@@ -549,7 +568,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_100:
- if (cmd->duplex == DUPLEX_FULL) {
+ if (duplex == DUPLEX_FULL) {
if (!(bp->port.supported[cfg_idx] &
SUPPORTED_100baseT_Full)) {
DP(BNX2X_MSG_ETHTOOL,
@@ -573,7 +592,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_1000:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"1G half not supported\n");
return -EINVAL;
@@ -596,7 +615,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_2500:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"2.5G half not supported\n");
return -EINVAL;
@@ -614,7 +633,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
break;
case SPEED_10000:
- if (cmd->duplex != DUPLEX_FULL) {
+ if (duplex != DUPLEX_FULL) {
DP(BNX2X_MSG_ETHTOOL,
"10G half not supported\n");
return -EINVAL;
@@ -644,7 +663,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
}
bp->link_params.req_line_speed[cfg_idx] = speed;
- bp->link_params.req_duplex[cfg_idx] = cmd->duplex;
+ bp->link_params.req_duplex[cfg_idx] = duplex;
bp->port.advertising[cfg_idx] = advertising;
}
@@ -3605,8 +3624,6 @@ static int bnx2x_get_ts_info(struct net_device *dev,
}
static const struct ethtool_ops bnx2x_ethtool_ops = {
- .get_settings = bnx2x_get_settings,
- .set_settings = bnx2x_set_settings,
.get_drvinfo = bnx2x_get_drvinfo,
.get_regs_len = bnx2x_get_regs_len,
.get_regs = bnx2x_get_regs,
@@ -3646,10 +3663,11 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
.get_eee = bnx2x_get_eee,
.set_eee = bnx2x_set_eee,
.get_ts_info = bnx2x_get_ts_info,
+ .get_link_ksettings = bnx2x_get_link_ksettings,
+ .set_link_ksettings = bnx2x_set_link_ksettings,
};
static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
- .get_settings = bnx2x_get_vf_settings,
.get_drvinfo = bnx2x_get_drvinfo,
.get_msglevel = bnx2x_get_msglevel,
.set_msglevel = bnx2x_set_msglevel,
@@ -3667,6 +3685,7 @@ static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
.set_rxfh = bnx2x_set_rxfh,
.get_channels = bnx2x_get_channels,
.set_channels = bnx2x_set_channels,
+ .get_link_ksettings = bnx2x_get_vf_link_ksettings,
};
void bnx2x_set_ethtool_ops(struct bnx2x *bp, struct net_device *netdev)
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 7bd2a85..aff3dc1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1096,7 +1096,7 @@ static struct sk_buff *bnxt_gro_func_5730x(struct bnxt_tpa_info *tpa_info,
{
#ifdef CONFIG_INET
struct tcphdr *th;
- int len, nw_off, tcp_opt_len;
+ int len, nw_off, tcp_opt_len = 0;
if (tcp_ts)
tcp_opt_len = 12;
@@ -1759,7 +1759,7 @@ static int bnxt_poll_nitroa0(struct napi_struct *napi, int budget)
}
if (!bnxt_has_work(bp, cpr) && rx_pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_pkts);
BNXT_CP_DB_REARM(cpr->cp_doorbell, cpr->cp_raw_cons);
}
return rx_pkts;
@@ -2467,6 +2467,8 @@ static int bnxt_calc_nr_ring_pages(u32 ring_size, int desc_per_pg)
static void bnxt_set_tpa_flags(struct bnxt *bp)
{
bp->flags &= ~BNXT_FLAG_TPA;
+ if (bp->flags & BNXT_FLAG_NO_AGG_RINGS)
+ return;
if (bp->dev->features & NETIF_F_LRO)
bp->flags |= BNXT_FLAG_LRO;
if (bp->dev->features & NETIF_F_GRO)
@@ -4944,6 +4946,7 @@ static int bnxt_setup_int_mode(struct bnxt *bp)
return rc;
}
+#ifdef CONFIG_RFS_ACCEL
static unsigned int bnxt_get_max_func_rss_ctxs(struct bnxt *bp)
{
#if defined(CONFIG_BNXT_SRIOV)
@@ -4961,6 +4964,7 @@ static unsigned int bnxt_get_max_func_vnics(struct bnxt *bp)
#endif
return bp->pf.max_vnics;
}
+#endif
unsigned int bnxt_get_max_func_stat_ctxs(struct bnxt *bp)
{
@@ -5437,17 +5441,12 @@ static int bnxt_update_link(struct bnxt *bp, bool chng_link_state)
if ((link_info->support_auto_speeds | diff) !=
link_info->support_auto_speeds) {
/* An advertised speed is no longer supported, so we need to
- * update the advertisement settings. See bnxt_reset() for
- * comments about the rtnl_lock() sequence below.
+ * update the advertisement settings. Caller holds RTNL
+ * so we can modify link settings.
*/
- clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_lock();
link_info->advertising = link_info->support_auto_speeds;
- if (test_bit(BNXT_STATE_OPEN, &bp->state) &&
- (link_info->autoneg & BNXT_AUTONEG_SPEED))
+ if (link_info->autoneg & BNXT_AUTONEG_SPEED)
bnxt_hwrm_set_link_setting(bp, true, false);
- set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
- rtnl_unlock();
}
return 0;
}
@@ -5617,6 +5616,45 @@ static int bnxt_hwrm_shutdown_link(struct bnxt *bp)
return hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
}
+static int bnxt_hwrm_port_led_qcaps(struct bnxt *bp)
+{
+ struct hwrm_port_led_qcaps_output *resp = bp->hwrm_cmd_resp_addr;
+ struct hwrm_port_led_qcaps_input req = {0};
+ struct bnxt_pf_info *pf = &bp->pf;
+ int rc;
+
+ if (BNXT_VF(bp) || bp->hwrm_spec_code < 0x10601)
+ return 0;
+
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_QCAPS, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ mutex_lock(&bp->hwrm_cmd_lock);
+ rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc) {
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return rc;
+ }
+ if (resp->num_leds > 0 && resp->num_leds < BNXT_MAX_LED) {
+ int i;
+
+ bp->num_leds = resp->num_leds;
+ memcpy(bp->leds, &resp->led0_id, sizeof(bp->leds[0]) *
+ bp->num_leds);
+ for (i = 0; i < bp->num_leds; i++) {
+ struct bnxt_led_info *led = &bp->leds[i];
+ __le16 caps = led->led_state_caps;
+
+ if (!led->led_group_id ||
+ !BNXT_LED_ALT_BLINK_CAP(caps)) {
+ bp->num_leds = 0;
+ break;
+ }
+ }
+ }
+ mutex_unlock(&bp->hwrm_cmd_lock);
+ return 0;
+}
+
static bool bnxt_eee_config_ok(struct bnxt *bp)
{
struct ethtool_eee *eee = &bp->eee;
@@ -6324,29 +6362,37 @@ bnxt_restart_timer:
mod_timer(&bp->timer, jiffies + bp->current_interval);
}
-/* Only called from bnxt_sp_task() */
-static void bnxt_reset(struct bnxt *bp, bool silent)
+static void bnxt_rtnl_lock_sp(struct bnxt *bp)
{
- /* bnxt_reset_task() calls bnxt_close_nic() which waits
- * for BNXT_STATE_IN_SP_TASK to clear.
- * If there is a parallel dev_close(), bnxt_close() may be holding
+ /* We are called from bnxt_sp_task which has BNXT_STATE_IN_SP_TASK
+ * set. If the device is being closed, bnxt_close() may be holding
* rtnl() and waiting for BNXT_STATE_IN_SP_TASK to clear. So we
* must clear BNXT_STATE_IN_SP_TASK before holding rtnl().
*/
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_lock();
- if (test_bit(BNXT_STATE_OPEN, &bp->state))
- bnxt_reset_task(bp, silent);
+}
+
+static void bnxt_rtnl_unlock_sp(struct bnxt *bp)
+{
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
rtnl_unlock();
}
+/* Only called from bnxt_sp_task() */
+static void bnxt_reset(struct bnxt *bp, bool silent)
+{
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_reset_task(bp, silent);
+ bnxt_rtnl_unlock_sp(bp);
+}
+
static void bnxt_cfg_ntp_filters(struct bnxt *);
static void bnxt_sp_task(struct work_struct *work)
{
struct bnxt *bp = container_of(work, struct bnxt, sp_task);
- int rc;
set_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
smp_mb__after_atomic();
@@ -6360,16 +6406,6 @@ static void bnxt_sp_task(struct work_struct *work)
if (test_and_clear_bit(BNXT_RX_NTP_FLTR_SP_EVENT, &bp->sp_event))
bnxt_cfg_ntp_filters(bp);
- if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
- if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
- &bp->sp_event))
- bnxt_hwrm_phy_qcaps(bp);
-
- rc = bnxt_update_link(bp, true);
- if (rc)
- netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
- rc);
- }
if (test_and_clear_bit(BNXT_HWRM_EXEC_FWD_REQ_SP_EVENT, &bp->sp_event))
bnxt_hwrm_exec_fwd_req(bp);
if (test_and_clear_bit(BNXT_VXLAN_ADD_PORT_SP_EVENT, &bp->sp_event)) {
@@ -6390,18 +6426,39 @@ static void bnxt_sp_task(struct work_struct *work)
bnxt_hwrm_tunnel_dst_port_free(
bp, TUNNEL_DST_PORT_FREE_REQ_TUNNEL_TYPE_GENEVE);
}
+ if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
+ bnxt_hwrm_port_qstats(bp);
+
+ /* These functions below will clear BNXT_STATE_IN_SP_TASK. They
+ * must be the last functions to be called before exiting.
+ */
+ if (test_and_clear_bit(BNXT_LINK_CHNG_SP_EVENT, &bp->sp_event)) {
+ int rc = 0;
+
+ if (test_and_clear_bit(BNXT_LINK_SPEED_CHNG_SP_EVENT,
+ &bp->sp_event))
+ bnxt_hwrm_phy_qcaps(bp);
+
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ rc = bnxt_update_link(bp, true);
+ bnxt_rtnl_unlock_sp(bp);
+ if (rc)
+ netdev_err(bp->dev, "SP task can't update link (rc: %x)\n",
+ rc);
+ }
+ if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event)) {
+ bnxt_rtnl_lock_sp(bp);
+ if (test_bit(BNXT_STATE_OPEN, &bp->state))
+ bnxt_get_port_module_status(bp);
+ bnxt_rtnl_unlock_sp(bp);
+ }
if (test_and_clear_bit(BNXT_RESET_TASK_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, false);
if (test_and_clear_bit(BNXT_RESET_TASK_SILENT_SP_EVENT, &bp->sp_event))
bnxt_reset(bp, true);
- if (test_and_clear_bit(BNXT_HWRM_PORT_MODULE_SP_EVENT, &bp->sp_event))
- bnxt_get_port_module_status(bp);
-
- if (test_and_clear_bit(BNXT_PERIODIC_STATS_SP_EVENT, &bp->sp_event))
- bnxt_hwrm_port_qstats(bp);
-
smp_mb__before_atomic();
clear_bit(BNXT_STATE_IN_SP_TASK, &bp->state);
}
@@ -7240,6 +7297,7 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
}
bnxt_hwrm_func_qcfg(bp);
+ bnxt_hwrm_port_led_qcaps(bp);
bnxt_set_tpa_flags(bp);
bnxt_set_ring_params(bp);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index f6b9b1c..52a1cc0 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -868,6 +868,20 @@ struct bnxt_queue_info {
u8 queue_profile;
};
+#define BNXT_MAX_LED 4
+
+struct bnxt_led_info {
+ u8 led_id;
+ u8 led_type;
+ u8 led_group_id;
+ u8 unused;
+ __le16 led_state_caps;
+#define BNXT_LED_ALT_BLINK_CAP(x) ((x) & \
+ cpu_to_le16(PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED))
+
+ __le16 led_color_caps;
+};
+
#define BNXT_GRCPF_REG_WINDOW_BASE_OUT 0x400
#define BNXT_CAG_REG_LEGACY_INT_STATUS 0x4014
#define BNXT_CAG_REG_BASE 0x300000
@@ -1123,6 +1137,9 @@ struct bnxt {
struct ethtool_eee eee;
u32 lpi_tmr_lo;
u32 lpi_tmr_hi;
+
+ u8 num_leds;
+ struct bnxt_led_info leds[BNXT_MAX_LED];
};
#define BNXT_RX_STATS_OFFSET(counter) \
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index dd21be4..24818e1 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2080,6 +2080,47 @@ static int bnxt_nway_reset(struct net_device *dev)
return rc;
}
+static int bnxt_set_phys_id(struct net_device *dev,
+ enum ethtool_phys_id_state state)
+{
+ struct hwrm_port_led_cfg_input req = {0};
+ struct bnxt *bp = netdev_priv(dev);
+ struct bnxt_pf_info *pf = &bp->pf;
+ struct bnxt_led_cfg *led_cfg;
+ u8 led_state;
+ __le16 duration;
+ int i, rc;
+
+ if (!bp->num_leds || BNXT_VF(bp))
+ return -EOPNOTSUPP;
+
+ if (state == ETHTOOL_ID_ACTIVE) {
+ led_state = PORT_LED_CFG_REQ_LED0_STATE_BLINKALT;
+ duration = cpu_to_le16(500);
+ } else if (state == ETHTOOL_ID_INACTIVE) {
+ led_state = PORT_LED_CFG_REQ_LED1_STATE_DEFAULT;
+ duration = cpu_to_le16(0);
+ } else {
+ return -EINVAL;
+ }
+ bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_PORT_LED_CFG, -1, -1);
+ req.port_id = cpu_to_le16(pf->port_id);
+ req.num_leds = bp->num_leds;
+ led_cfg = (struct bnxt_led_cfg *)&req.led0_id;
+ for (i = 0; i < bp->num_leds; i++, led_cfg++) {
+ req.enables |= BNXT_LED_DFLT_ENABLES(i);
+ led_cfg->led_id = bp->leds[i].led_id;
+ led_cfg->led_state = led_state;
+ led_cfg->led_blink_on = duration;
+ led_cfg->led_blink_off = duration;
+ led_cfg->led_group_id = bp->leds[i].led_group_id;
+ }
+ rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
+ if (rc)
+ rc = -EIO;
+ return rc;
+}
+
const struct ethtool_ops bnxt_ethtool_ops = {
.get_link_ksettings = bnxt_get_link_ksettings,
.set_link_ksettings = bnxt_set_link_ksettings,
@@ -2111,5 +2152,6 @@ const struct ethtool_ops bnxt_ethtool_ops = {
.set_eee = bnxt_set_eee,
.get_module_info = bnxt_get_module_info,
.get_module_eeprom = bnxt_get_module_eeprom,
- .nway_reset = bnxt_nway_reset
+ .nway_reset = bnxt_nway_reset,
+ .set_phys_id = bnxt_set_phys_id,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
index 3abc03b..ed1e555 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.h
@@ -10,6 +10,29 @@
#ifndef BNXT_ETHTOOL_H
#define BNXT_ETHTOOL_H
+struct bnxt_led_cfg {
+ u8 led_id;
+ u8 led_state;
+ u8 led_color;
+ u8 unused;
+ __le16 led_blink_on;
+ __le16 led_blink_off;
+ u8 led_group_id;
+ u8 rsvd;
+};
+
+#define BNXT_LED_DFLT_ENA \
+ (PORT_LED_CFG_REQ_ENABLES_LED0_ID | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_STATE | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF | \
+ PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID)
+
+#define BNXT_LED_DFLT_ENA_SHIFT 6
+
+#define BNXT_LED_DFLT_ENABLES(x) \
+ cpu_to_le32(BNXT_LED_DFLT_ENA << (BNXT_LED_DFLT_ENA_SHIFT * (x)))
+
extern const struct ethtool_ops bnxt_ethtool_ops;
u32 _bnxt_fw_to_ethtool_adv_spds(u16, u8);
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
index d0d49ed..5df32ab 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_hsi.h
@@ -1,7 +1,7 @@
/* Broadcom NetXtreme-C/E network driver.
*
* Copyright (c) 2014-2016 Broadcom Corporation
- * Copyright (c) 2016 Broadcom Limited
+ * Copyright (c) 2016-2017 Broadcom Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -11,12 +11,12 @@
#ifndef BNXT_HSI_H
#define BNXT_HSI_H
-/* HSI and HWRM Specification 1.6.0 */
+/* HSI and HWRM Specification 1.6.1 */
#define HWRM_VERSION_MAJOR 1
#define HWRM_VERSION_MINOR 6
-#define HWRM_VERSION_UPDATE 0
+#define HWRM_VERSION_UPDATE 1
-#define HWRM_VERSION_STR "1.6.0"
+#define HWRM_VERSION_STR "1.6.1"
/*
* Following is the signature for HWRM message field that indicates not
* applicable (All F's). Need to cast it the size of the field if needed.
@@ -549,6 +549,8 @@ struct hwrm_ver_get_output {
__le32 dev_caps_cfg;
#define VER_GET_RESP_DEV_CAPS_CFG_SECURE_FW_UPD_SUPPORTED 0x1UL
#define VER_GET_RESP_DEV_CAPS_CFG_FW_DCBX_AGENT_SUPPORTED 0x2UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_SUPPORTED 0x4UL
+ #define VER_GET_RESP_DEV_CAPS_CFG_SHORT_CMD_REQUIRED 0x8UL
u8 roce_fw_maj;
u8 roce_fw_min;
u8 roce_fw_bld;
@@ -1919,6 +1921,219 @@ struct hwrm_port_phy_i2c_read_output {
u8 valid;
};
+/* hwrm_port_led_cfg */
+/* Input (64 bytes) */
+struct hwrm_port_led_cfg_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le32 enables;
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_ID 0x1UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_STATE 0x2UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_COLOR 0x4UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_ON 0x8UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_BLINK_OFF 0x10UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED0_GROUP_ID 0x20UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_ID 0x40UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_STATE 0x80UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_COLOR 0x100UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_ON 0x200UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_BLINK_OFF 0x400UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED1_GROUP_ID 0x800UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_ID 0x1000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_STATE 0x2000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_COLOR 0x4000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_ON 0x8000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_BLINK_OFF 0x10000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED2_GROUP_ID 0x20000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_ID 0x40000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_STATE 0x80000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_COLOR 0x100000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_ON 0x200000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_BLINK_OFF 0x400000UL
+ #define PORT_LED_CFG_REQ_ENABLES_LED3_GROUP_ID 0x800000UL
+ __le16 port_id;
+ u8 num_leds;
+ u8 rsvd;
+ u8 led0_id;
+ u8 led0_state;
+ #define PORT_LED_CFG_REQ_LED0_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED0_STATE_BLINKALT 0x4UL
+ u8 led0_color;
+ #define PORT_LED_CFG_REQ_LED0_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED0_COLOR_GREENAMBER 0x3UL
+ u8 unused_0;
+ __le16 led0_blink_on;
+ __le16 led0_blink_off;
+ u8 led0_group_id;
+ u8 rsvd0;
+ u8 led1_id;
+ u8 led1_state;
+ #define PORT_LED_CFG_REQ_LED1_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED1_STATE_BLINKALT 0x4UL
+ u8 led1_color;
+ #define PORT_LED_CFG_REQ_LED1_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED1_COLOR_GREENAMBER 0x3UL
+ u8 unused_1;
+ __le16 led1_blink_on;
+ __le16 led1_blink_off;
+ u8 led1_group_id;
+ u8 rsvd1;
+ u8 led2_id;
+ u8 led2_state;
+ #define PORT_LED_CFG_REQ_LED2_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED2_STATE_BLINKALT 0x4UL
+ u8 led2_color;
+ #define PORT_LED_CFG_REQ_LED2_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED2_COLOR_GREENAMBER 0x3UL
+ u8 unused_2;
+ __le16 led2_blink_on;
+ __le16 led2_blink_off;
+ u8 led2_group_id;
+ u8 rsvd2;
+ u8 led3_id;
+ u8 led3_state;
+ #define PORT_LED_CFG_REQ_LED3_STATE_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_OFF 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_ON 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINK 0x3UL
+ #define PORT_LED_CFG_REQ_LED3_STATE_BLINKALT 0x4UL
+ u8 led3_color;
+ #define PORT_LED_CFG_REQ_LED3_COLOR_DEFAULT 0x0UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_AMBER 0x1UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREEN 0x2UL
+ #define PORT_LED_CFG_REQ_LED3_COLOR_GREENAMBER 0x3UL
+ u8 unused_3;
+ __le16 led3_blink_on;
+ __le16 led3_blink_off;
+ u8 led3_group_id;
+ u8 rsvd3;
+};
+
+/* Output (16 bytes) */
+struct hwrm_port_led_cfg_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ __le32 unused_0;
+ u8 unused_1;
+ u8 unused_2;
+ u8 unused_3;
+ u8 valid;
+};
+
+/* hwrm_port_led_qcaps */
+/* Input (24 bytes) */
+struct hwrm_port_led_qcaps_input {
+ __le16 req_type;
+ __le16 cmpl_ring;
+ __le16 seq_id;
+ __le16 target_id;
+ __le64 resp_addr;
+ __le16 port_id;
+ __le16 unused_0[3];
+};
+
+/* Output (48 bytes) */
+struct hwrm_port_led_qcaps_output {
+ __le16 error_code;
+ __le16 req_type;
+ __le16 seq_id;
+ __le16 resp_len;
+ u8 num_leds;
+ u8 unused_0[3];
+ u8 led0_id;
+ u8 led0_type;
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_TYPE_INVALID 0xffUL
+ u8 led0_group_id;
+ u8 unused_1;
+ __le16 led0_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED0_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led0_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED0_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led1_id;
+ u8 led1_type;
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_TYPE_INVALID 0xffUL
+ u8 led1_group_id;
+ u8 unused_2;
+ __le16 led1_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED1_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led1_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED1_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led2_id;
+ u8 led2_type;
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_TYPE_INVALID 0xffUL
+ u8 led2_group_id;
+ u8 unused_3;
+ __le16 led2_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED2_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led2_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED2_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 led3_id;
+ u8 led3_type;
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_SPEED 0x0UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_ACTIVITY 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_TYPE_INVALID 0xffUL
+ u8 led3_group_id;
+ u8 unused_4;
+ __le16 led3_state_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ENABLED 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_OFF_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_ON_SUPPORTED 0x4UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_SUPPORTED 0x8UL
+ #define PORT_LED_QCAPS_RESP_LED3_STATE_CAPS_BLINK_ALT_SUPPORTED 0x10UL
+ __le16 led3_color_caps;
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_RSVD 0x1UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_AMBER_SUPPORTED 0x2UL
+ #define PORT_LED_QCAPS_RESP_LED3_COLOR_CAPS_GREEN_SUPPORTED 0x4UL
+ u8 unused_5;
+ u8 unused_6;
+ u8 unused_7;
+ u8 valid;
+};
+
/* hwrm_queue_qportcfg */
/* Input (24 bytes) */
struct hwrm_queue_qportcfg_input {
@@ -4092,9 +4307,7 @@ struct hwrm_fw_set_structured_data_input {
__le64 src_data_addr;
__le16 data_len;
u8 hdr_cnt;
- u8 unused_0;
- __le16 port_id;
- __le16 unused_1;
+ u8 unused_0[5];
};
/* Output (16 bytes) */
@@ -4111,7 +4324,7 @@ struct hwrm_fw_set_structured_data_output {
};
/* hwrm_fw_get_structured_data */
-/* Input (40 bytes) */
+/* Input (32 bytes) */
struct hwrm_fw_get_structured_data_input {
__le16 req_type;
__le16 cmpl_ring;
@@ -4131,8 +4344,6 @@ struct hwrm_fw_get_structured_data_input {
#define FW_GET_STRUCTURED_DATA_REQ_SUBTYPE_NON_TPMR_OPERATIONAL 0x202UL
u8 count;
u8 unused_0;
- __le16 port_id;
- __le16 unused_1[3];
};
/* Output (16 bytes) */
@@ -4616,7 +4827,8 @@ struct hwrm_nvm_install_update_input {
__le32 install_type;
#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_NORMAL 0x0UL
#define NVM_INSTALL_UPDATE_REQ_INSTALL_TYPE_ALL 0xffffffffUL
- __le32 unused_0;
+ __le16 flags;
+ __le16 unused_0;
};
/* Output (24 bytes) */
@@ -4973,12 +5185,13 @@ struct ctx_hw_stats {
struct hwrm_struct_hdr {
__le16 struct_id;
#define STRUCT_HDR_STRUCT_ID_LLDP_CFG 0x41bUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_ETS_CFG 0x41dUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_PFC_CFG 0x41fUL
- #define STRUCT_HDR_STRUCT_ID_DCBX_APP_CFG 0x421UL
- #define STRUCT_HDR_STRUCT_ID_DCBX_STATE_CFG 0x422UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC_CFG 0x424UL
- #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE_CFG 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_ETS 0x41dUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_PFC 0x41fUL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_APP 0x421UL
+ #define STRUCT_HDR_STRUCT_ID_DCBX_FEATURE_STATE 0x422UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_GENERIC 0x424UL
+ #define STRUCT_HDR_STRUCT_ID_LLDP_DEVICE 0x426UL
+ #define STRUCT_HDR_STRUCT_ID_PORT_DESCRIPTION 0xaUL
__le16 len;
u8 version;
u8 count;
@@ -4988,14 +5201,14 @@ struct hwrm_struct_hdr {
__le16 unused_0[3];
};
-/* DCBX Application configuration structure (8 bytes) */
-struct hwrm_struct_data_dcbx_app_cfg {
- __le16 protocol_id;
+/* DCBX Application configuration structure (1057) (8 bytes) */
+struct hwrm_struct_data_dcbx_app {
+ __be16 protocol_id;
u8 protocol_selector;
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
- #define STRUCT_DATA_DCBX_APP_CFG_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_ETHER_TYPE 0x1UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_PORT 0x2UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_UDP_PORT 0x3UL
+ #define STRUCT_DATA_DCBX_APP_PROTOCOL_SELECTOR_TCP_UDP_PORT 0x4UL
u8 priority;
u8 valid;
u8 unused_0[3];
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
index 64ef0e5..0b8cd74 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_sriov.c
@@ -15,6 +15,7 @@
#include <linux/etherdevice.h>
#include "bnxt_hsi.h"
#include "bnxt.h"
+#include "bnxt_ulp.h"
#include "bnxt_sriov.h"
#include "bnxt_ethtool.h"
@@ -555,6 +556,8 @@ static int bnxt_sriov_enable(struct bnxt *bp, int *num_vfs)
if (rc)
goto err_out2;
+ bnxt_ulp_sriov_cfg(bp, *num_vfs);
+
rc = pci_enable_sriov(bp->pdev, *num_vfs);
if (rc)
goto err_out2;
@@ -596,6 +599,8 @@ void bnxt_sriov_disable(struct bnxt *bp)
rtnl_lock();
bnxt_restore_pf_fw_resources(bp);
rtnl_unlock();
+
+ bnxt_ulp_sriov_cfg(bp, 0);
}
int bnxt_sriov_configure(struct pci_dev *pdev, int num_vfs)
diff --git a/drivers/net/ethernet/broadcom/sb1250-mac.c b/drivers/net/ethernet/broadcom/sb1250-mac.c
index 435a2e4..89d4feb 100644
--- a/drivers/net/ethernet/broadcom/sb1250-mac.c
+++ b/drivers/net/ethernet/broadcom/sb1250-mac.c
@@ -2537,7 +2537,7 @@ static int sbmac_poll(struct napi_struct *napi, int budget)
sbdma_tx_process(sc, &(sc->sbm_txdma), 1);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
#ifdef CONFIG_SBMAC_COALESCE
__raw_writeq(((M_MAC_INT_EOP_COUNT | M_MAC_INT_EOP_TIMER) << S_MAC_TX_CH0) |
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 73a9411..6e13c93 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -1881,7 +1881,7 @@ bnad_napi_poll_rx(struct napi_struct *napi, int budget)
return rcvd;
poll_exit:
- napi_complete(napi);
+ napi_complete_done(napi, rcvd);
rx_ctrl->rx_complete++;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index c0fb80a..d7d135c 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -1090,7 +1090,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
work_done = bp->macbgem_ops.mog_rx(bp, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Packets received while interrupts were disabled */
status = macb_readl(bp, RSR);
@@ -2085,6 +2085,9 @@ static int macb_open(struct net_device *dev)
netif_tx_start_all_queues(dev);
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_init(dev);
+
return 0;
}
@@ -2106,6 +2109,9 @@ static int macb_close(struct net_device *dev)
macb_free_consistent(bp);
+ if (bp->ptp_info)
+ bp->ptp_info->ptp_remove(dev);
+
return 0;
}
@@ -2379,6 +2385,17 @@ static int macb_set_ringparam(struct net_device *netdev,
return 0;
}
+static int macb_get_ts_info(struct net_device *netdev,
+ struct ethtool_ts_info *info)
+{
+ struct macb *bp = netdev_priv(netdev);
+
+ if (bp->ptp_info)
+ return bp->ptp_info->get_ts_info(netdev, info);
+
+ return ethtool_op_get_ts_info(netdev, info);
+}
+
static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
@@ -2396,7 +2413,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs,
.get_link = ethtool_op_get_link,
- .get_ts_info = ethtool_op_get_ts_info,
+ .get_ts_info = macb_get_ts_info,
.get_ethtool_stats = gem_get_ethtool_stats,
.get_strings = gem_get_ethtool_strings,
.get_sset_count = gem_get_sset_count,
@@ -2409,6 +2426,7 @@ static const struct ethtool_ops gem_ethtool_ops = {
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct phy_device *phydev = dev->phydev;
+ struct macb *bp = netdev_priv(dev);
if (!netif_running(dev))
return -EINVAL;
@@ -2416,7 +2434,17 @@ static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
if (!phydev)
return -ENODEV;
- return phy_mii_ioctl(phydev, rq, cmd);
+ if (!bp->ptp_info)
+ return phy_mii_ioctl(phydev, rq, cmd);
+
+ switch (cmd) {
+ case SIOCSHWTSTAMP:
+ return bp->ptp_info->set_hwtst(dev, rq, cmd);
+ case SIOCGHWTSTAMP:
+ return bp->ptp_info->get_hwtst(dev, rq);
+ default:
+ return phy_mii_ioctl(phydev, rq, cmd);
+ }
}
static int macb_set_features(struct net_device *netdev,
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d67adad..94ddedd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -131,6 +131,20 @@
#define GEM_RXIPCCNT 0x01a8 /* IP header Checksum Error Counter */
#define GEM_RXTCPCCNT 0x01ac /* TCP Checksum Error Counter */
#define GEM_RXUDPCCNT 0x01b0 /* UDP Checksum Error Counter */
+#define GEM_TISUBN 0x01bc /* 1588 Timer Increment Sub-ns */
+#define GEM_TSH 0x01c0 /* 1588 Timer Seconds High */
+#define GEM_TSL 0x01d0 /* 1588 Timer Seconds Low */
+#define GEM_TN 0x01d4 /* 1588 Timer Nanoseconds */
+#define GEM_TA 0x01d8 /* 1588 Timer Adjust */
+#define GEM_TI 0x01dc /* 1588 Timer Increment */
+#define GEM_EFTSL 0x01e0 /* PTP Event Frame Tx Seconds Low */
+#define GEM_EFTN 0x01e4 /* PTP Event Frame Tx Nanoseconds */
+#define GEM_EFRSL 0x01e8 /* PTP Event Frame Rx Seconds Low */
+#define GEM_EFRN 0x01ec /* PTP Event Frame Rx Nanoseconds */
+#define GEM_PEFTSL 0x01f0 /* PTP Peer Event Frame Tx Secs Low */
+#define GEM_PEFTN 0x01f4 /* PTP Peer Event Frame Tx Ns */
+#define GEM_PEFRSL 0x01f8 /* PTP Peer Event Frame Rx Sec Low */
+#define GEM_PEFRN 0x01fc /* PTP Peer Event Frame Rx Ns */
#define GEM_DCFG1 0x0280 /* Design Config 1 */
#define GEM_DCFG2 0x0284 /* Design Config 2 */
#define GEM_DCFG3 0x0288 /* Design Config 3 */
@@ -174,6 +188,7 @@
#define MACB_NCR_TPF_SIZE 1
#define MACB_TZQ_OFFSET 12 /* Transmit zero quantum pause frame */
#define MACB_TZQ_SIZE 1
+#define MACB_SRTSM_OFFSET 15
/* Bitfields in NCFGR */
#define MACB_SPD_OFFSET 0 /* Speed */
@@ -319,6 +334,32 @@
#define MACB_PTZ_SIZE 1
#define MACB_WOL_OFFSET 14 /* Enable wake-on-lan interrupt */
#define MACB_WOL_SIZE 1
+#define MACB_DRQFR_OFFSET 18 /* PTP Delay Request Frame Received */
+#define MACB_DRQFR_SIZE 1
+#define MACB_SFR_OFFSET 19 /* PTP Sync Frame Received */
+#define MACB_SFR_SIZE 1
+#define MACB_DRQFT_OFFSET 20 /* PTP Delay Request Frame Transmitted */
+#define MACB_DRQFT_SIZE 1
+#define MACB_SFT_OFFSET 21 /* PTP Sync Frame Transmitted */
+#define MACB_SFT_SIZE 1
+#define MACB_PDRQFR_OFFSET 22 /* PDelay Request Frame Received */
+#define MACB_PDRQFR_SIZE 1
+#define MACB_PDRSFR_OFFSET 23 /* PDelay Response Frame Received */
+#define MACB_PDRSFR_SIZE 1
+#define MACB_PDRQFT_OFFSET 24 /* PDelay Request Frame Transmitted */
+#define MACB_PDRQFT_SIZE 1
+#define MACB_PDRSFT_OFFSET 25 /* PDelay Response Frame Transmitted */
+#define MACB_PDRSFT_SIZE 1
+#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */
+#define MACB_SRI_SIZE 1
+
+/* Timer increment fields */
+#define MACB_TI_CNS_OFFSET 0
+#define MACB_TI_CNS_SIZE 8
+#define MACB_TI_ACNS_OFFSET 8
+#define MACB_TI_ACNS_SIZE 8
+#define MACB_TI_NIT_OFFSET 16
+#define MACB_TI_NIT_SIZE 8
/* Bitfields in MAN */
#define MACB_DATA_OFFSET 0 /* data */
@@ -386,6 +427,17 @@
#define GEM_PBUF_LSO_OFFSET 27
#define GEM_PBUF_LSO_SIZE 1
+/* Bitfields in TISUBN */
+#define GEM_SUBNSINCR_OFFSET 0
+#define GEM_SUBNSINCR_SIZE 16
+
+/* Bitfields in TI */
+#define GEM_NSINCR_OFFSET 0
+#define GEM_NSINCR_SIZE 8
+
+/* Bitfields in ADJ */
+#define GEM_ADDSUB_OFFSET 31
+#define GEM_ADDSUB_SIZE 1
/* Constants for CLK */
#define MACB_CLK_DIV8 0
#define MACB_CLK_DIV16 1
@@ -413,6 +465,7 @@
#define MACB_CAPS_NO_GIGABIT_HALF 0x00000008
#define MACB_CAPS_USRIO_DISABLED 0x00000010
#define MACB_CAPS_JUMBO 0x00000020
+#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -782,6 +835,20 @@ struct macb_or_gem_ops {
int (*mog_rx)(struct macb *bp, int budget);
};
+/* MACB-PTP interface: adapt to platform needs. */
+struct macb_ptp_info {
+ void (*ptp_init)(struct net_device *ndev);
+ void (*ptp_remove)(struct net_device *ndev);
+ s32 (*get_ptp_max_adj)(void);
+ unsigned int (*get_tsu_rate)(struct macb *bp);
+ int (*get_ts_info)(struct net_device *dev,
+ struct ethtool_ts_info *info);
+ int (*get_hwtst)(struct net_device *netdev,
+ struct ifreq *ifr);
+ int (*set_hwtst)(struct net_device *netdev,
+ struct ifreq *ifr, int cmd);
+};
+
struct macb_config {
u32 caps;
unsigned int dma_burst_length;
@@ -874,6 +941,8 @@ struct macb {
unsigned int jumbo_max_len;
u32 wol;
+
+ struct macb_ptp_info *ptp_info; /* macb-ptp interface */
};
static inline bool macb_is_gem(struct macb *bp)
@@ -881,4 +950,9 @@ static inline bool macb_is_gem(struct macb *bp)
return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
}
+static inline bool gem_has_ptp(struct macb *bp)
+{
+ return !!(bp->caps & MACB_CAPS_GEM_HAS_PTP);
+}
+
#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/calxeda/xgmac.c b/drivers/net/ethernet/calxeda/xgmac.c
index b054065..2bd7c63 100644
--- a/drivers/net/ethernet/calxeda/xgmac.c
+++ b/drivers/net/ethernet/calxeda/xgmac.c
@@ -1247,7 +1247,7 @@ static int xgmac_poll(struct napi_struct *napi, int budget)
work_done = xgmac_rx(priv, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
__raw_writel(DMA_INTR_DEFAULT_MASK, priv->base + XGMAC_DMA_INTR_ENA);
}
return work_done;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index b8b579d..c12cfa4 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -2223,25 +2223,6 @@ static void if_cfg_callback(struct octeon_device *oct,
wake_up_interruptible(&ctx->wc);
}
-/**
- * \brief Select queue based on hash
- * @param dev Net device
- * @param skb sk_buff structure
- * @returns selected queue number
- */
-static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv __attribute__((unused)),
- select_queue_fallback_t fallback __attribute__((unused)))
-{
- u32 qindex = 0;
- struct lio *lio;
-
- lio = GET_LIO(dev);
- qindex = skb_tx_hash(dev, skb);
-
- return (u16)(qindex % (lio->linfo.num_txpciq));
-}
-
/** Routine to push packets arriving on Octeon interface upto network layer.
* @param oct_id - octeon device id.
* @param skbuff - skbuff struct to be passed to network layer.
@@ -2379,7 +2360,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
if (packet_was_received) {
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
- netdev->last_rx = jiffies;
} else {
droq->stats.rx_dropped++;
netif_info(lio, rx_err, lio->netdev,
@@ -2465,8 +2445,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
__func__, iq_no);
}
- if ((work_done < budget) && (tx_done)) {
- napi_complete(napi);
+ /* force enable interrupt if reg cnts are high to avoid wraparound */
+ if ((work_done < budget && tx_done) ||
+ (iq->pkt_in_done >= MAX_REG_CNT) ||
+ (droq->pkt_count >= MAX_REG_CNT)) {
+ tx_done = 1;
+ napi_complete_done(napi, work_done);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
@@ -2693,13 +2677,7 @@ static int liquidio_stop(struct net_device *netdev)
lio->linfo.link.s.link_up = 0;
lio->link_changes++;
- /* Pause for a moment and wait for Octeon to flush out (to the wire) any
- * egress packets that are in-flight.
- */
- set_current_state(TASK_INTERRUPTIBLE);
- schedule_timeout(msecs_to_jiffies(100));
-
- /* Now it should be safe to tell Octeon that nic interface is down. */
+ /* Tell Octeon that nic interface is down. */
send_rx_ctrl_cmd(lio, 0);
if (OCTEON_CN23XX_PF(oct)) {
@@ -3342,11 +3320,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_trans_update(netdev);
- if (skb_shinfo(skb)->gso_size)
- stats->tx_done += skb_shinfo(skb)->gso_segs;
+ if (tx_info->s.gso_segs)
+ stats->tx_done += tx_info->s.gso_segs;
else
stats->tx_done++;
- stats->tx_tot_bytes += skb->len;
+ stats->tx_tot_bytes += ndata.datasize;
return NETDEV_TX_OK;
@@ -3761,7 +3739,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_vf_vlan = liquidio_set_vf_vlan,
.ndo_get_vf_config = liquidio_get_vf_config,
.ndo_set_vf_link_state = liquidio_set_vf_link_state,
- .ndo_select_queue = select_q
};
/** \brief Entry point for the liquidio module
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
index ad2e72d7..631f1c0f 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c
@@ -1455,26 +1455,6 @@ static void if_cfg_callback(struct octeon_device *oct,
wake_up_interruptible(&ctx->wc);
}
-/**
- * \brief Select queue based on hash
- * @param dev Net device
- * @param skb sk_buff structure
- * @returns selected queue number
- */
-static u16 select_q(struct net_device *dev, struct sk_buff *skb,
- void *accel_priv __attribute__((unused)),
- select_queue_fallback_t fallback __attribute__((unused)))
-{
- struct lio *lio;
- u32 qindex;
-
- lio = GET_LIO(dev);
-
- qindex = skb_tx_hash(dev, skb);
-
- return (u16)(qindex % (lio->linfo.num_txpciq));
-}
-
/** Routine to push packets arriving on Octeon interface upto network layer.
* @param oct_id - octeon device id.
* @param skbuff - skbuff struct to be passed to network layer.
@@ -1591,7 +1571,6 @@ liquidio_push_packet(u32 octeon_id __attribute__((unused)),
if (packet_was_received) {
droq->stats.rx_bytes_received += len;
droq->stats.rx_pkts_received++;
- netdev->last_rx = jiffies;
} else {
droq->stats.rx_dropped++;
netif_info(lio, rx_err, lio->netdev,
@@ -1651,8 +1630,12 @@ static int liquidio_napi_poll(struct napi_struct *napi, int budget)
__func__, iq_no);
}
- if ((work_done < budget) && (tx_done)) {
- napi_complete(napi);
+ /* force enable interrupt if reg cnts are high to avoid wraparound */
+ if ((work_done < budget && tx_done) ||
+ (iq->pkt_in_done >= MAX_REG_CNT) ||
+ (droq->pkt_count >= MAX_REG_CNT)) {
+ tx_done = 1;
+ napi_complete_done(napi, work_done);
octeon_process_droq_poll_cmd(droq->oct_dev, droq->q_no,
POLL_EVENT_ENABLE_INTR, 0);
return 0;
@@ -2454,11 +2437,11 @@ static int liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
netif_trans_update(netdev);
- if (skb_shinfo(skb)->gso_size)
- stats->tx_done += skb_shinfo(skb)->gso_segs;
+ if (tx_info->s.gso_segs)
+ stats->tx_done += tx_info->s.gso_segs;
else
stats->tx_done++;
- stats->tx_tot_bytes += skb->len;
+ stats->tx_tot_bytes += ndata.datasize;
return NETDEV_TX_OK;
@@ -2717,7 +2700,6 @@ static const struct net_device_ops lionetdevops = {
.ndo_set_features = liquidio_set_features,
.ndo_udp_tunnel_add = liquidio_add_vxlan_port,
.ndo_udp_tunnel_del = liquidio_del_vxlan_port,
- .ndo_select_queue = select_q,
};
static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
diff --git a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
index bc0af8a..294c6f3 100644
--- a/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
+++ b/drivers/net/ethernet/cavium/liquidio/liquidio_common.h
@@ -99,6 +99,7 @@ enum octeon_tag_type {
#define CVM_DRV_APP_END (CVM_DRV_INVALID_APP - 1)
#define BYTES_PER_DHLEN_UNIT 8
+#define MAX_REG_CNT 2000000U
static inline u32 incr_index(u32 index, u32 count, u32 max)
{
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_device.c b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
index a8df493..9675ffb 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_device.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_device.c
@@ -1361,6 +1361,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
spin_lock_bh(&droq->lock);
writel(droq->pkt_count, droq->pkts_sent_reg);
droq->pkt_count = 0;
+ /* this write needs to be flushed before we release the lock */
+ mmiowb();
spin_unlock_bh(&droq->lock);
oct = droq->oct_dev;
}
@@ -1368,6 +1370,8 @@ void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq)
spin_lock_bh(&iq->lock);
writel(iq->pkt_in_done, iq->inst_cnt_reg);
iq->pkt_in_done = 0;
+ /* this write needs to be flushed before we release the lock */
+ mmiowb();
spin_unlock_bh(&iq->lock);
oct = iq->oct_dev;
}
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
index 73696b42..201b987 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.c
@@ -131,6 +131,7 @@ int octeon_mbox_write(struct octeon_device *oct,
{
struct octeon_mbox *mbox = oct->mbox[mbox_cmd->q_no];
u32 count, i, ret = OCTEON_MBOX_STATUS_SUCCESS;
+ long timeout = LIO_MBOX_WRITE_WAIT_TIME;
unsigned long flags;
spin_lock_irqsave(&mbox->lock, flags);
@@ -158,7 +159,7 @@ int octeon_mbox_write(struct octeon_device *oct,
count = 0;
while (readq(mbox->mbox_write_reg) != OCTEON_PFVFSIG) {
- schedule_timeout_uninterruptible(LIO_MBOX_WRITE_WAIT_TIME);
+ schedule_timeout_uninterruptible(timeout);
if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
ret = OCTEON_MBOX_STATUS_FAILED;
break;
@@ -171,7 +172,7 @@ int octeon_mbox_write(struct octeon_device *oct,
count = 0;
while (readq(mbox->mbox_write_reg) !=
OCTEON_PFVFACK) {
- schedule_timeout_uninterruptible(10);
+ schedule_timeout_uninterruptible(timeout);
if (count++ == LIO_MBOX_WRITE_WAIT_CNT) {
ret = OCTEON_MBOX_STATUS_FAILED;
break;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
index fe60a3e..c9376fe 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_mailbox.h
@@ -31,8 +31,8 @@
#define OCTEON_PFVFSIG 0x1122334455667788
#define OCTEON_PFVFERR 0xDEADDEADDEADDEAD
-#define LIO_MBOX_WRITE_WAIT_CNT 1000
-#define LIO_MBOX_WRITE_WAIT_TIME 10
+#define LIO_MBOX_WRITE_WAIT_CNT 1000
+#define LIO_MBOX_WRITE_WAIT_TIME msecs_to_jiffies(1)
enum octeon_mbox_cmd_status {
OCTEON_MBOX_STATUS_SUCCESS = 0,
diff --git a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
index 21f80f5..a213868 100644
--- a/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
+++ b/drivers/net/ethernet/cavium/octeon/octeon_mgmt.c
@@ -501,7 +501,7 @@ static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
/* We stopped because no more packets were available. */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
octeon_mgmt_enable_rx_irq(p);
}
octeon_mgmt_update_rx_stats(netdev);
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 2e74bba..02a986c 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -471,12 +471,46 @@ static void nicvf_get_ringparam(struct net_device *netdev,
struct nicvf *nic = netdev_priv(netdev);
struct queue_set *qs = nic->qs;
- ring->rx_max_pending = MAX_RCV_BUF_COUNT;
- ring->rx_pending = qs->rbdr_len;
+ ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
+ ring->rx_pending = qs->cq_len;
ring->tx_max_pending = MAX_SND_QUEUE_LEN;
ring->tx_pending = qs->sq_len;
}
+static int nicvf_set_ringparam(struct net_device *netdev,
+ struct ethtool_ringparam *ring)
+{
+ struct nicvf *nic = netdev_priv(netdev);
+ struct queue_set *qs = nic->qs;
+ u32 rx_count, tx_count;
+
+ /* Due to HW errata this is not supported on T88 pass 1.x silicon */
+ if (pass1_silicon(nic->pdev))
+ return -EINVAL;
+
+ if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+ return -EINVAL;
+
+ tx_count = clamp_t(u32, ring->tx_pending,
+ MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
+ rx_count = clamp_t(u32, ring->rx_pending,
+ MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
+
+ if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
+ return 0;
+
+ /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
+ qs->sq_len = rounddown_pow_of_two(tx_count);
+ qs->cq_len = rounddown_pow_of_two(rx_count);
+
+ if (netif_running(netdev)) {
+ nicvf_stop(netdev);
+ nicvf_open(netdev);
+ }
+
+ return 0;
+}
+
static int nicvf_get_rss_hash_opts(struct nicvf *nic,
struct ethtool_rxnfc *info)
{
@@ -635,7 +669,7 @@ static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
}
static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
- const u8 *hkey, u8 hfunc)
+ const u8 *hkey, const u8 hfunc)
{
struct nicvf *nic = netdev_priv(dev);
struct nicvf_rss_info *rss = &nic->rss_info;
@@ -787,6 +821,7 @@ static const struct ethtool_ops nicvf_ethtool_ops = {
.get_regs = nicvf_get_regs,
.get_coalesce = nicvf_get_coalesce,
.get_ringparam = nicvf_get_ringparam,
+ .set_ringparam = nicvf_set_ringparam,
.get_rxnfc = nicvf_get_rxnfc,
.set_rxnfc = nicvf_set_rxnfc,
.get_rxfh_key_size = nicvf_get_rxfh_key_size,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 273eafd..6feaa24 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -749,7 +749,7 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
/* Slow packet rate, exit polling */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Re-enable interrupts */
cq_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD,
cq->cq_idx);
@@ -1274,7 +1274,8 @@ int nicvf_open(struct net_device *netdev)
/* Configure receive side scaling and MTU */
if (!nic->sqs_mode) {
nicvf_rss_init(nic);
- if (nicvf_update_hw_max_frs(nic, netdev->mtu))
+ err = nicvf_update_hw_max_frs(nic, netdev->mtu);
+ if (err)
goto cleanup;
/* Clear percpu stats */
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d2ac133..ac0390b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -603,7 +603,7 @@ void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
cq_cfg.ena = 1;
cq_cfg.reset = 0;
cq_cfg.caching = 0;
- cq_cfg.qsize = CMP_QSIZE;
+ cq_cfg.qsize = ilog2(qs->cq_len >> 10);
cq_cfg.avg_con = 0;
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, *(u64 *)&cq_cfg);
@@ -652,9 +652,12 @@ static void nicvf_snd_queue_config(struct nicvf *nic, struct queue_set *qs,
sq_cfg.ena = 1;
sq_cfg.reset = 0;
sq_cfg.ldwb = 0;
- sq_cfg.qsize = SND_QSIZE;
+ sq_cfg.qsize = ilog2(qs->sq_len >> 10);
sq_cfg.tstmp_bgx_intf = 0;
- sq_cfg.cq_limit = 0;
+ /* CQ's level at which HW will stop processing SQEs to avoid
+ * transmitting a pkt with no space in CQ to post CQE_TX.
+ */
+ sq_cfg.cq_limit = (CMP_QUEUE_PIPELINE_RSVD * 256) / qs->cq_len;
nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, *(u64 *)&sq_cfg);
/* Set threshold value for interrupt generation */
@@ -816,11 +819,21 @@ int nicvf_config_data_transfer(struct nicvf *nic, bool enable)
{
bool disable = false;
struct queue_set *qs = nic->qs;
+ struct queue_set *pqs = nic->pnicvf->qs;
int qidx;
if (!qs)
return 0;
+ /* Take primary VF's queue lengths.
+ * This is needed to take queue lengths set from ethtool
+ * into consideration.
+ */
+ if (nic->sqs_mode && pqs) {
+ qs->cq_len = pqs->cq_len;
+ qs->sq_len = pqs->sq_len;
+ }
+
if (enable) {
if (nicvf_alloc_resources(nic))
return -ENOMEM;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 9e21046..5cb84da 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -59,8 +59,9 @@
/* Default queue count per QS, its lengths and threshold values */
#define DEFAULT_RBDR_CNT 1
-#define SND_QSIZE SND_QUEUE_SIZE2
+#define SND_QSIZE SND_QUEUE_SIZE0
#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
+#define MIN_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE0 + 10))
#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
#define SND_QUEUE_THRESH 2ULL
#define MIN_SQ_DESC_PER_PKT_XMIT 2
@@ -70,11 +71,18 @@
/* Keep CQ and SQ sizes same, if timestamping
* is enabled this equation will change.
*/
-#define CMP_QSIZE CMP_QUEUE_SIZE2
+#define CMP_QSIZE CMP_QUEUE_SIZE0
#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
+#define MIN_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE0 + 10))
+#define MAX_CMP_QUEUE_LEN (1ULL << (CMP_QUEUE_SIZE6 + 10))
#define CMP_QUEUE_CQE_THRESH (NAPI_POLL_WEIGHT / 2)
#define CMP_QUEUE_TIMER_THRESH 80 /* ~2usec */
+/* No of CQEs that might anyway gets used by HW due to pipelining
+ * effects irrespective of PASS/DROP/LEVELS being configured
+ */
+#define CMP_QUEUE_PIPELINE_RSVD 544
+
#define RBDR_SIZE RBDR_SIZE0
#define RCV_BUF_COUNT (1ULL << (RBDR_SIZE + 13))
#define MAX_RCV_BUF_COUNT (1ULL << (RBDR_SIZE6 + 13))
@@ -93,8 +101,8 @@
* RED accepts pkt if unused CQE < 2304 & >= 2560
* DROPs pkts if unused CQE < 2304
*/
-#define RQ_PASS_CQ_LVL 160ULL
-#define RQ_DROP_CQ_LVL 144ULL
+#define RQ_PASS_CQ_LVL 192ULL
+#define RQ_DROP_CQ_LVL 184ULL
/* RED and Backpressure levels of RBDR for pkt reception
* For RBDR, level is a measure of fullness i.e 0x0 means empty
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 9211c75..dfb2bad 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -47,8 +47,9 @@ struct lmac {
struct bgx {
u8 bgx_id;
struct lmac lmac[MAX_LMAC_PER_BGX];
- int lmac_count;
+ u8 lmac_count;
u8 max_lmac;
+ u8 acpi_lmac_idx;
void __iomem *reg_base;
struct pci_dev *pdev;
bool is_dlm;
@@ -893,17 +894,15 @@ static void bgx_print_qlm_mode(struct bgx *bgx, u8 lmacid)
struct device *dev = &bgx->pdev->dev;
struct lmac *lmac;
char str[20];
- u8 dlm;
- if (lmacid > bgx->max_lmac)
+ if (!bgx->is_dlm && lmacid)
return;
lmac = &bgx->lmac[lmacid];
- dlm = (lmacid / 2) + (bgx->bgx_id * 2);
if (!bgx->is_dlm)
sprintf(str, "BGX%d QLM mode", bgx->bgx_id);
else
- sprintf(str, "BGX%d DLM%d mode", bgx->bgx_id, dlm);
+ sprintf(str, "BGX%d LMAC%d mode", bgx->bgx_id, lmacid);
switch (lmac->lmac_type) {
case BGX_MODE_SGMII:
@@ -989,7 +988,6 @@ static void lmac_set_training(struct bgx *bgx, struct lmac *lmac, int lmacid)
static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
{
struct lmac *lmac;
- struct lmac *olmac;
u64 cmr_cfg;
u8 lmac_type;
u8 lane_to_sds;
@@ -1009,62 +1007,26 @@ static void bgx_set_lmac_config(struct bgx *bgx, u8 idx)
return;
}
- /* On 81xx BGX can be split across 2 DLMs
- * firmware programs lmac_type of LMAC0 and LMAC2
+ /* For DLMs or SLMs on 80/81/83xx so many lane configurations
+ * are possible and vary across boards. Also Kernel doesn't have
+ * any way to identify board type/info and since firmware does,
+ * just take lmac type and serdes lane config as is.
*/
- if ((idx == 0) || (idx == 2)) {
- cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
- lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
- lane_to_sds = (u8)(cmr_cfg & 0xFF);
- /* Check if config is not reset value */
- if ((lmac_type == 0) && (lane_to_sds == 0xE4))
- lmac->lmac_type = BGX_MODE_INVALID;
- else
- lmac->lmac_type = lmac_type;
- lmac_set_training(bgx, lmac, lmac->lmacid);
- lmac_set_lane2sds(bgx, lmac);
-
- olmac = &bgx->lmac[idx + 1];
- /* Check if other LMAC on the same DLM is already configured by
- * firmware, if so use the same config or else set as same, as
- * that of LMAC 0/2.
- * This check is needed as on 80xx only one lane of each of the
- * DLM of BGX0 is used, so have to rely on firmware for
- * distingushing 80xx from 81xx.
- */
- cmr_cfg = bgx_reg_read(bgx, idx + 1, BGX_CMRX_CFG);
- lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
- lane_to_sds = (u8)(cmr_cfg & 0xFF);
- if ((lmac_type == 0) && (lane_to_sds == 0xE4)) {
- olmac->lmac_type = lmac->lmac_type;
- lmac_set_lane2sds(bgx, olmac);
- } else {
- olmac->lmac_type = lmac_type;
- olmac->lane_to_sds = lane_to_sds;
- }
- lmac_set_training(bgx, olmac, olmac->lmacid);
- }
-}
-
-static bool is_dlm0_in_bgx_mode(struct bgx *bgx)
-{
- struct lmac *lmac;
-
- if (!bgx->is_dlm)
- return true;
-
- lmac = &bgx->lmac[0];
- if (lmac->lmac_type == BGX_MODE_INVALID)
- return false;
-
- return true;
+ cmr_cfg = bgx_reg_read(bgx, idx, BGX_CMRX_CFG);
+ lmac_type = (u8)((cmr_cfg >> 8) & 0x07);
+ lane_to_sds = (u8)(cmr_cfg & 0xFF);
+ /* Check if config is reset value */
+ if ((lmac_type == 0) && (lane_to_sds == 0xE4))
+ lmac->lmac_type = BGX_MODE_INVALID;
+ else
+ lmac->lmac_type = lmac_type;
+ lmac->lane_to_sds = lane_to_sds;
+ lmac_set_training(bgx, lmac, lmac->lmacid);
}
static void bgx_get_qlm_mode(struct bgx *bgx)
{
struct lmac *lmac;
- struct lmac *lmac01;
- struct lmac *lmac23;
u8 idx;
/* Init all LMAC's type to invalid */
@@ -1080,29 +1042,9 @@ static void bgx_get_qlm_mode(struct bgx *bgx)
if (bgx->lmac_count > bgx->max_lmac)
bgx->lmac_count = bgx->max_lmac;
- for (idx = 0; idx < bgx->max_lmac; idx++)
- bgx_set_lmac_config(bgx, idx);
-
- if (!bgx->is_dlm || bgx->is_rgx) {
- bgx_print_qlm_mode(bgx, 0);
- return;
- }
-
- if (bgx->lmac_count) {
- bgx_print_qlm_mode(bgx, 0);
- bgx_print_qlm_mode(bgx, 2);
- }
-
- /* If DLM0 is not in BGX mode then LMAC0/1 have
- * to be configured with serdes lanes of DLM1
- */
- if (is_dlm0_in_bgx_mode(bgx) || (bgx->lmac_count > 2))
- return;
for (idx = 0; idx < bgx->lmac_count; idx++) {
- lmac01 = &bgx->lmac[idx];
- lmac23 = &bgx->lmac[idx + 2];
- lmac01->lmac_type = lmac23->lmac_type;
- lmac01->lane_to_sds = lmac23->lane_to_sds;
+ bgx_set_lmac_config(bgx, idx);
+ bgx_print_qlm_mode(bgx, idx);
}
}
@@ -1143,13 +1085,13 @@ static acpi_status bgx_acpi_register_phy(acpi_handle handle,
if (acpi_bus_get_device(handle, &adev))
goto out;
- acpi_get_mac_address(dev, adev, bgx->lmac[bgx->lmac_count].mac);
+ acpi_get_mac_address(dev, adev, bgx->lmac[bgx->acpi_lmac_idx].mac);
- SET_NETDEV_DEV(&bgx->lmac[bgx->lmac_count].netdev, dev);
+ SET_NETDEV_DEV(&bgx->lmac[bgx->acpi_lmac_idx].netdev, dev);
- bgx->lmac[bgx->lmac_count].lmacid = bgx->lmac_count;
+ bgx->lmac[bgx->acpi_lmac_idx].lmacid = bgx->acpi_lmac_idx;
+ bgx->acpi_lmac_idx++; /* move to next LMAC */
out:
- bgx->lmac_count++;
return AE_OK;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb/sge.c b/drivers/net/ethernet/chelsio/cxgb/sge.c
index 86f467a..d56142b 100644
--- a/drivers/net/ethernet/chelsio/cxgb/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb/sge.c
@@ -1605,7 +1605,7 @@ int t1_poll(struct napi_struct *napi, int budget)
int work_done = process_responses(adapter, budget);
if (likely(work_done < budget)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
writel(adapter->sge->respQ.cidx,
adapter->regs + A_SG_SLEEPING);
}
diff --git a/drivers/net/ethernet/chelsio/cxgb3/sge.c b/drivers/net/ethernet/chelsio/cxgb3/sge.c
index e4b5b05..1b9d154 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/sge.c
@@ -1843,7 +1843,7 @@ static int ofld_poll(struct napi_struct *napi, int budget)
__skb_queue_head_init(&queue);
skb_queue_splice_init(&q->rx_queue, &queue);
if (skb_queue_empty(&queue)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
spin_unlock_irq(&q->lock);
return work_done;
}
@@ -2414,7 +2414,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
int work_done = process_responses(adap, qs, budget);
if (likely(work_done < budget)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/*
* Because we don't atomically flush the following
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
index ad0096e..ccb455f 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
@@ -1501,6 +1501,7 @@ int t4_prep_fw(struct adapter *adap, struct fw_info *fw_info,
const u8 *fw_data, unsigned int fw_size,
struct fw_hdr *card_fw, enum dev_state state, int *reset);
int t4_prep_adapter(struct adapter *adapter);
+int t4_shutdown_adapter(struct adapter *adapter);
enum t4_bar2_qtype { T4_BAR2_QTYPE_EGRESS, T4_BAR2_QTYPE_INGRESS };
int t4_bar2_sge_qregs(struct adapter *adapter,
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3349e1f..49e000e 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -2583,6 +2583,19 @@ static int cxgb_get_vf_config(struct net_device *dev,
ether_addr_copy(ivi->mac, adap->vfinfo[vf].vf_mac_addr);
return 0;
}
+
+static int cxgb_get_phys_port_id(struct net_device *dev,
+ struct netdev_phys_item_id *ppid)
+{
+ struct port_info *pi = netdev_priv(dev);
+ unsigned int phy_port_id;
+
+ phy_port_id = pi->adapter->adap_idx * 10 + pi->port_id;
+ ppid->id_len = sizeof(phy_port_id);
+ memcpy(ppid->id, &phy_port_id, ppid->id_len);
+ return 0;
+}
+
#endif
static int cxgb_set_mac_addr(struct net_device *dev, void *p)
@@ -2762,6 +2775,7 @@ static const struct net_device_ops cxgb4_mgmt_netdev_ops = {
.ndo_open = dummy_open,
.ndo_set_vf_mac = cxgb_set_vf_mac,
.ndo_get_vf_config = cxgb_get_vf_config,
+ .ndo_get_phys_port_id = cxgb_get_phys_port_id,
};
#endif
@@ -2782,8 +2796,24 @@ static const struct ethtool_ops cxgb4_mgmt_ethtool_ops = {
void t4_fatal_err(struct adapter *adap)
{
- t4_set_reg_field(adap, SGE_CONTROL_A, GLOBALENABLE_F, 0);
- t4_intr_disable(adap);
+ int port;
+
+ /* Disable the SGE since ULDs are going to free resources that
+ * could be exposed to the adapter. RDMA MWs for example...
+ */
+ t4_shutdown_adapter(adap);
+ for_each_port(adap, port) {
+ struct net_device *dev = adap->port[port];
+
+ /* If we get here in very early initialization the network
+ * devices may not have been set up yet.
+ */
+ if (!dev)
+ continue;
+
+ netif_tx_stop_all_queues(dev);
+ netif_carrier_off(dev);
+ }
dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
}
@@ -4516,12 +4546,14 @@ static int config_mgmt_dev(struct pci_dev *pdev)
int err;
snprintf(name, IFNAMSIZ, "mgmtpf%d%d", adap->adap_idx, adap->pf);
- netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, dummy_setup);
+ netdev = alloc_netdev(sizeof(struct port_info), name, NET_NAME_UNKNOWN,
+ dummy_setup);
if (!netdev)
return -ENOMEM;
pi = netdev_priv(netdev);
pi->adapter = adap;
+ pi->port_id = adap->pf % adap->params.nports;
SET_NETDEV_DEV(netdev, &pdev->dev);
adap->port[0] = netdev;
@@ -4611,6 +4643,9 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
u32 whoami, pl_rev;
enum chip_type chip;
static int adap_idx = 1;
+#ifdef CONFIG_PCI_IOV
+ u32 v, port_vec;
+#endif
printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
@@ -4990,6 +5025,19 @@ sriov:
err = -ENOMEM;
goto free_adapter;
}
+ spin_lock_init(&adapter->mbox_lock);
+ INIT_LIST_HEAD(&adapter->mlist.list);
+
+ v = FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
+ FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_PORTVEC);
+ err = t4_query_params(adapter, adapter->mbox, adapter->pf, 0, 1,
+ &v, &port_vec);
+ if (err < 0) {
+ dev_err(adapter->pdev_dev, "Could not fetch port params\n");
+ goto free_adapter;
+ }
+
+ adapter->params.nports = hweight32(port_vec);
pci_set_drvdata(pdev, adapter);
return 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sched.c b/drivers/net/ethernet/chelsio/cxgb4/sched.c
index cbd68a8..c902635 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sched.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sched.c
@@ -397,9 +397,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
struct ch_sched_params info;
struct ch_sched_params tp;
- memset(&info, 0, sizeof(info));
- memset(&tp, 0, sizeof(tp));
-
memcpy(&tp, p, sizeof(tp));
/* Don't try to match class parameter */
tp.u.params.class = SCHED_CLS_NONE;
@@ -409,7 +406,6 @@ static struct sched_class *t4_sched_class_lookup(struct port_info *pi,
if (e->state == SCHED_STATE_UNUSED)
continue;
- memset(&info, 0, sizeof(info));
memcpy(&info, &e->info, sizeof(info));
/* Don't try to match class parameter */
info.u.params.class = SCHED_CLS_NONE;
@@ -458,7 +454,6 @@ static struct sched_class *t4_sched_class_alloc(struct port_info *pi,
if (!e)
goto out;
- memset(&np, 0, sizeof(np));
memcpy(&np, p, sizeof(np));
np.u.params.class = e->idx;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index f113015..87000cd 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -330,11 +330,12 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
* mailbox access list but this is a start. We very rearely
* contend on access to the mailbox ...
*/
- if (i > FW_CMD_MAX_TIMEOUT) {
+ pcie_fw = t4_read_reg(adap, PCIE_FW_A);
+ if (i > FW_CMD_MAX_TIMEOUT || (pcie_fw & PCIE_FW_ERR_F)) {
spin_lock(&adap->mbox_lock);
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
- ret = -EBUSY;
+ ret = (pcie_fw & PCIE_FW_ERR_F) ? -ENXIO : -EBUSY;
t4_record_mbox(adap, cmd, size, access, ret);
return ret;
}
@@ -432,6 +433,7 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
spin_lock(&adap->mbox_lock);
list_del(&entry.list);
spin_unlock(&adap->mbox_lock);
+ t4_fatal_err(adap);
return ret;
}
@@ -5501,6 +5503,7 @@ void t4_get_port_stats_offset(struct adapter *adap, int idx,
void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
{
u32 bgmap = t4_get_mps_bg_map(adap, idx);
+ u32 stat_ctl = t4_read_reg(adap, MPS_STAT_CTL_A);
#define GET_STAT(name) \
t4_read_reg64(adap, \
@@ -5532,6 +5535,14 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->tx_ppp6 = GET_STAT(TX_PORT_PPP6);
p->tx_ppp7 = GET_STAT(TX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & COUNTPAUSESTATTX_F) {
+ p->tx_frames -= p->tx_pause;
+ p->tx_octets -= p->tx_pause * 64;
+ }
+ if (stat_ctl & COUNTPAUSEMCTX_F)
+ p->tx_mcast_frames -= p->tx_pause;
+ }
p->rx_octets = GET_STAT(RX_PORT_BYTES);
p->rx_frames = GET_STAT(RX_PORT_FRAMES);
p->rx_bcast_frames = GET_STAT(RX_PORT_BCAST);
@@ -5560,6 +5571,15 @@ void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
p->rx_ppp6 = GET_STAT(RX_PORT_PPP6);
p->rx_ppp7 = GET_STAT(RX_PORT_PPP7);
+ if (CHELSIO_CHIP_VERSION(adap->params.chip) >= CHELSIO_T5) {
+ if (stat_ctl & COUNTPAUSESTATRX_F) {
+ p->rx_frames -= p->rx_pause;
+ p->rx_octets -= p->rx_pause * 64;
+ }
+ if (stat_ctl & COUNTPAUSEMCRX_F)
+ p->rx_mcast_frames -= p->rx_pause;
+ }
+
p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
@@ -7540,6 +7560,39 @@ int t4_prep_adapter(struct adapter *adapter)
}
/**
+ * t4_shutdown_adapter - shut down adapter, host & wire
+ * @adapter: the adapter
+ *
+ * Perform an emergency shutdown of the adapter and stop it from
+ * continuing any further communication on the ports or DMA to the
+ * host. This is typically used when the adapter and/or firmware
+ * have crashed and we want to prevent any further accidental
+ * communication with the rest of the world. This will also force
+ * the port Link Status to go down -- if register writes work --
+ * which should help our peers figure out that we're down.
+ */
+int t4_shutdown_adapter(struct adapter *adapter)
+{
+ int port;
+
+ t4_intr_disable(adapter);
+ t4_write_reg(adapter, DBG_GPIO_EN_A, 0);
+ for_each_port(adapter, port) {
+ u32 a_port_cfg = PORT_REG(port,
+ is_t4(adapter->params.chip)
+ ? XGMAC_PORT_CFG_A
+ : MAC_PORT_CFG_A);
+
+ t4_write_reg(adapter, a_port_cfg,
+ t4_read_reg(adapter, a_port_cfg)
+ & ~SIGNAL_DET_V(1));
+ }
+ t4_set_reg_field(adapter, SGE_CONTROL_A, GLOBALENABLE_F, 0);
+
+ return 0;
+}
+
+/**
* t4_bar2_sge_qregs - return BAR2 SGE Queue register information
* @adapter: the adapter
* @qid: the Queue ID
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index e685163..3348d33 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -855,6 +855,14 @@
#define PERR_INT_CAUSE_V(x) ((x) << PERR_INT_CAUSE_S)
#define PERR_INT_CAUSE_F PERR_INT_CAUSE_V(1U)
+#define DBG_GPIO_EN_A 0x6010
+#define XGMAC_PORT_CFG_A 0x1000
+#define MAC_PORT_CFG_A 0x800
+
+#define SIGNAL_DET_S 14
+#define SIGNAL_DET_V(x) ((x) << SIGNAL_DET_S)
+#define SIGNAL_DET_F SIGNAL_DET_V(1U)
+
#define MC_ECC_STATUS_A 0x751c
#define MC_P_ECC_STATUS_A 0x4131c
@@ -1798,12 +1806,29 @@
#define MPS_CMN_CTL_A 0x9000
+#define COUNTPAUSEMCRX_S 5
+#define COUNTPAUSEMCRX_V(x) ((x) << COUNTPAUSEMCRX_S)
+#define COUNTPAUSEMCRX_F COUNTPAUSEMCRX_V(1U)
+
+#define COUNTPAUSESTATRX_S 4
+#define COUNTPAUSESTATRX_V(x) ((x) << COUNTPAUSESTATRX_S)
+#define COUNTPAUSESTATRX_F COUNTPAUSESTATRX_V(1U)
+
+#define COUNTPAUSEMCTX_S 3
+#define COUNTPAUSEMCTX_V(x) ((x) << COUNTPAUSEMCTX_S)
+#define COUNTPAUSEMCTX_F COUNTPAUSEMCTX_V(1U)
+
+#define COUNTPAUSESTATTX_S 2
+#define COUNTPAUSESTATTX_V(x) ((x) << COUNTPAUSESTATTX_S)
+#define COUNTPAUSESTATTX_F COUNTPAUSESTATTX_V(1U)
+
#define NUMPORTS_S 0
#define NUMPORTS_M 0x3U
#define NUMPORTS_G(x) (((x) >> NUMPORTS_S) & NUMPORTS_M)
#define MPS_INT_CAUSE_A 0x9008
#define MPS_TX_INT_CAUSE_A 0x9408
+#define MPS_STAT_CTL_A 0x9600
#define FRMERR_S 15
#define FRMERR_V(x) ((x) << FRMERR_S)
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
index 2accab3..5fdaa16 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4fw_version.h
@@ -36,8 +36,8 @@
#define __T4FW_VERSION_H__
#define T4FW_VERSION_MAJOR 0x01
-#define T4FW_VERSION_MINOR 0x0F
-#define T4FW_VERSION_MICRO 0x25
+#define T4FW_VERSION_MINOR 0x10
+#define T4FW_VERSION_MICRO 0x1A
#define T4FW_VERSION_BUILD 0x00
#define T4FW_MIN_VERSION_MAJOR 0x01
@@ -45,8 +45,8 @@
#define T4FW_MIN_VERSION_MICRO 0x00
#define T5FW_VERSION_MAJOR 0x01
-#define T5FW_VERSION_MINOR 0x0F
-#define T5FW_VERSION_MICRO 0x25
+#define T5FW_VERSION_MINOR 0x10
+#define T5FW_VERSION_MICRO 0x1A
#define T5FW_VERSION_BUILD 0x00
#define T5FW_MIN_VERSION_MAJOR 0x00
@@ -54,8 +54,8 @@
#define T5FW_MIN_VERSION_MICRO 0x00
#define T6FW_VERSION_MAJOR 0x01
-#define T6FW_VERSION_MINOR 0x0F
-#define T6FW_VERSION_MICRO 0x25
+#define T6FW_VERSION_MINOR 0x10
+#define T6FW_VERSION_MICRO 0x1A
#define T6FW_VERSION_BUILD 0x00
#define T6FW_MIN_VERSION_MAJOR 0x00
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index f3ed9ce..e37dde2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -1889,7 +1889,7 @@ static int napi_rx_handler(struct napi_struct *napi, int budget)
u32 val;
if (likely(work_done < budget)) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
intr_params = rspq->next_intr_params;
rspq->next_intr_params = rspq->intr_params;
} else
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index c5842c5..91e42be 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -1343,7 +1343,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
* exit polling
*/
- napi_complete(napi);
+ napi_complete_done(napi, rq_work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[0]);
vnic_intr_unmask(&enic->intr[intr]);
@@ -1500,7 +1500,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
* exit polling
*/
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
enic_set_int_moderation(enic, &enic->rq[rq]);
vnic_intr_unmask(&enic->intr[intr]);
diff --git a/drivers/net/ethernet/dec/tulip/interrupt.c b/drivers/net/ethernet/dec/tulip/interrupt.c
index 92306b3..ba6ae24 100644
--- a/drivers/net/ethernet/dec/tulip/interrupt.c
+++ b/drivers/net/ethernet/dec/tulip/interrupt.c
@@ -319,8 +319,8 @@ int tulip_poll(struct napi_struct *napi, int budget)
/* Remove us from polling list and enable RX intr. */
- napi_complete(napi);
- iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
+ napi_complete_done(napi, work_done);
+ iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);
/* The last op happens after poll completion. Which means the following:
* 1. it can race with disabling irqs in irq handler
@@ -355,7 +355,7 @@ int tulip_poll(struct napi_struct *napi, int budget)
* before we did napi_complete(). See? We would lose it. */
/* remove ourselves from the polling list */
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
return work_done;
}
diff --git a/drivers/net/ethernet/dnet.c b/drivers/net/ethernet/dnet.c
index 2a17c59..3e77dd8 100644
--- a/drivers/net/ethernet/dnet.c
+++ b/drivers/net/ethernet/dnet.c
@@ -415,7 +415,7 @@ static int dnet_poll(struct napi_struct *napi, int budget)
/* We processed all packets available. Tell NAPI it can
* stop polling then re-enable rx interrupts.
*/
- napi_complete(napi);
+ napi_complete_done(napi, npackets);
int_enable = dnet_readl(bp, INTR_ENB);
int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF;
dnet_writel(bp, int_enable, INTR_ENB);
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c
index 0e74529..30e8550 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.c
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.c
@@ -1118,7 +1118,7 @@ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
err:
mutex_unlock(&adapter->mcc_lock);
- if (status == MCC_STATUS_UNAUTHORIZED_REQUEST)
+ if (base_status(status) == MCC_STATUS_UNAUTHORIZED_REQUEST)
status = -EPERM;
return status;
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index b75744f..b3a0540 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -318,6 +318,13 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
if (ether_addr_equal(addr->sa_data, adapter->dev_mac))
return 0;
+ /* BE3 VFs without FILTMGMT privilege are not allowed to set its MAC
+ * address
+ */
+ if (BEx_chip(adapter) && be_virtfn(adapter) &&
+ !check_privilege(adapter, BE_PRIV_FILTMGMT))
+ return -EPERM;
+
/* if device is not running, copy MAC to netdev->dev_addr */
if (!netif_running(netdev))
goto done;
@@ -3317,7 +3324,7 @@ int be_poll(struct napi_struct *napi, int budget)
be_process_mcc(adapter);
if (max_work < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, max_work);
/* Skyhawk EQ_DB has a provision to set the rearm to interrupt
* delay via a delay multiplier encoding value
@@ -3608,7 +3615,11 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
static void be_disable_if_filters(struct be_adapter *adapter)
{
- be_dev_mac_del(adapter, adapter->pmac_id[0]);
+ /* Don't delete MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT))
+ be_dev_mac_del(adapter, adapter->pmac_id[0]);
+
be_clear_uc_list(adapter);
be_clear_mc_list(adapter);
@@ -3761,8 +3772,9 @@ static int be_enable_if_filters(struct be_adapter *adapter)
if (status)
return status;
- /* For BE3 VFs, the PF programs the initial MAC address */
- if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
+ /* Don't add MAC on BE3 VFs without FILTMGMT privilege */
+ if (!BEx_chip(adapter) || !be_virtfn(adapter) ||
+ check_privilege(adapter, BE_PRIV_FILTMGMT)) {
status = be_dev_mac_add(adapter, adapter->netdev->dev_addr);
if (status)
return status;
diff --git a/drivers/net/ethernet/ethoc.c b/drivers/net/ethernet/ethoc.c
index 63e5e14..f18aba0 100644
--- a/drivers/net/ethernet/ethoc.c
+++ b/drivers/net/ethernet/ethoc.c
@@ -614,7 +614,7 @@ static int ethoc_poll(struct napi_struct *napi, int budget)
tx_work_done = ethoc_tx(priv->netdev, budget);
if (rx_work_done < budget && tx_work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
}
@@ -995,7 +995,7 @@ static int ethoc_set_ringparam(struct net_device *dev,
return 0;
}
-const struct ethtool_ops ethoc_ethtool_ops = {
+static const struct ethtool_ops ethoc_ethtool_ops = {
.get_regs_len = ethoc_get_regs_len,
.get_regs = ethoc_get_regs,
.nway_reset = phy_ethtool_nway_reset,
diff --git a/drivers/net/ethernet/ezchip/nps_enet.c b/drivers/net/ethernet/ezchip/nps_enet.c
index 223f35c..992ebe9 100644
--- a/drivers/net/ethernet/ezchip/nps_enet.c
+++ b/drivers/net/ethernet/ezchip/nps_enet.c
@@ -192,7 +192,7 @@ static int nps_enet_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
u32 buf_int_enable_value = 0;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* set tx_done and rx_rdy bits */
buf_int_enable_value |= NPS_ENET_ENABLE << RX_RDY_SHIFT;
diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
index b7cbc26..25a14a3 100644
--- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
+++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c
@@ -2001,7 +2001,7 @@ static int dpaa_eth_poll(struct napi_struct *napi, int budget)
int cleaned = qman_p_poll_dqrr(np->p, budget);
if (cleaned < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, cleaned);
qman_p_irqsource_add(np->p, QM_PIRQ_DQRI);
} else if (np->down) {
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38160c2..2cc552d 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -1615,7 +1615,7 @@ static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
fec_enet_tx(ndev);
if (pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts);
writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
}
return pkts;
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 1f98838..54e3ce9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -301,7 +301,7 @@ static int fs_enet_napi(struct napi_struct *napi, int budget)
if (received < budget && tx_left) {
/* done */
- napi_complete(napi);
+ napi_complete_done(napi, received);
(*fep->ops->napi_enable)(dev);
return received;
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index a6e7afa..d0ebab7 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -2948,7 +2948,7 @@ static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
}
/* try reuse page */
- if (unlikely(page_count(page) != 1))
+ if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
return false;
/* change offset to the other half */
@@ -3183,7 +3183,7 @@ static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
if (work_done < budget) {
u32 imask;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
@@ -3272,7 +3272,7 @@ static int gfar_poll_rx(struct napi_struct *napi, int budget)
if (!num_act_queues) {
u32 imask;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Clear the halt bit in RSTAT */
gfar_write(&regs->rstat, gfargrp->rstat);
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index 9d66088..3f7ae9f 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -3303,7 +3303,7 @@ static int ucc_geth_poll(struct napi_struct *napi, int budget)
howmany += ucc_geth_rx(ugeth, i, budget - howmany);
if (howmany < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, howmany);
setbits32(ugeth->uccf->p_uccm, UCCE_RX_EVENTS | UCCE_TX_EVENTS);
}
diff --git a/drivers/net/ethernet/hisilicon/hip04_eth.c b/drivers/net/ethernet/hisilicon/hip04_eth.c
index 97b1847..6e50ec8 100644
--- a/drivers/net/ethernet/hisilicon/hip04_eth.c
+++ b/drivers/net/ethernet/hisilicon/hip04_eth.c
@@ -555,7 +555,7 @@ refill:
priv->reg_inten |= RCV_INT;
writel_relaxed(priv->reg_inten, priv->base + PPE_INTEN);
}
- napi_complete(napi);
+ napi_complete_done(napi, rx);
done:
/* clean up tx descriptors and start a new timer if necessary */
tx_remaining = hip04_tx_reclaim(ndev, false);
diff --git a/drivers/net/ethernet/hisilicon/hisi_femac.c b/drivers/net/ethernet/hisilicon/hisi_femac.c
index 979852d..2c28088 100644
--- a/drivers/net/ethernet/hisilicon/hisi_femac.c
+++ b/drivers/net/ethernet/hisilicon/hisi_femac.c
@@ -330,7 +330,7 @@ static int hisi_femac_poll(struct napi_struct *napi, int budget)
} while (ints & DEF_INT_MASK);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
hisi_femac_irq_enable(priv, DEF_INT_MASK &
(~IRQ_INT_TX_PER_PACKET));
}
diff --git a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
index 418ca1f3..25a6c87 100644
--- a/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
+++ b/drivers/net/ethernet/hisilicon/hix5hd2_gmac.c
@@ -662,7 +662,7 @@ static int hix5hd2_poll(struct napi_struct *napi, int budget)
} while (ints & DEF_INT_MASK);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
hix5hd2_irq_enable(priv);
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index b7cb613..f7b75e9 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -797,7 +797,6 @@ static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
skb->protocol = eth_type_trans(skb, ndev);
(void)napi_gro_receive(&ring_data->napi, skb);
- ndev->last_rx = jiffies;
}
static int hns_desc_unused(struct hnae_ring *ring)
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index c6ba75c..72ab7b6 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1326,7 +1326,7 @@ restart_poll:
ibmveth_replenish_task(adapter);
if (frames_processed < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, frames_processed);
/* We think we are done - reenable interrupts,
* then check once more to make sure we are done.
@@ -1607,8 +1607,11 @@ static int ibmveth_probe(struct vio_dev *dev, const struct vio_device_id *id)
netdev->netdev_ops = &ibmveth_netdev_ops;
netdev->ethtool_ops = &netdev_ethtool_ops;
SET_NETDEV_DEV(netdev, &dev->dev);
- netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
- NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ netdev->hw_features = NETIF_F_SG;
+ if (vio_get_attribute(dev, "ibm,illan-options", NULL) != NULL) {
+ netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
+ NETIF_F_RXCSUM;
+ }
netdev->features |= netdev->hw_features;
diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c
index 3c2526b..c46935d 100644
--- a/drivers/net/ethernet/ibm/ibmvnic.c
+++ b/drivers/net/ethernet/ibm/ibmvnic.c
@@ -987,7 +987,7 @@ restart_poll:
if (frames_processed < budget) {
enable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
- napi_complete(napi);
+ napi_complete_done(napi, frames_processed);
if (pending_scrq(adapter, adapter->rx_scrq[scrq_num]) &&
napi_reschedule(napi)) {
disable_scrq_irq(adapter, adapter->rx_scrq[scrq_num]);
diff --git a/drivers/net/ethernet/intel/e100.c b/drivers/net/ethernet/intel/e100.c
index 04e9392..2b7323d 100644
--- a/drivers/net/ethernet/intel/e100.c
+++ b/drivers/net/ethernet/intel/e100.c
@@ -2253,7 +2253,7 @@ static int e100_poll(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
e100_enable_irq(nic);
}
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index 79651eb..2175cce 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -240,9 +240,9 @@ static void e1000e_dump(struct e1000_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev), netdev->last_rx);
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index 7546109..be456ba 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -383,9 +383,9 @@ static void igb_dump(struct igb_adapter *adapter)
/* Print netdevice Info */
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
- pr_info("Device Name state trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n", netdev->name,
- netdev->state, dev_trans_start(netdev), netdev->last_rx);
+ pr_info("Device Name state trans_start\n");
+ pr_info("%-15s %016lX %016lX\n", netdev->name,
+ netdev->state, dev_trans_start(netdev));
}
/* Print Registers */
@@ -3964,8 +3964,8 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
PAGE_SIZE,
DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_drain(buffer_info->page, 0,
- buffer_info->pagecnt_bias);
+ __page_frag_cache_drain(buffer_info->page,
+ buffer_info->pagecnt_bias);
buffer_info->page = NULL;
}
@@ -6991,7 +6991,7 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma,
PAGE_SIZE, DMA_FROM_DEVICE,
DMA_ATTR_SKIP_CPU_SYNC);
- __page_frag_drain(page, 0, rx_buffer->pagecnt_bias);
+ __page_frag_cache_drain(page, rx_buffer->pagecnt_bias);
}
/* clear contents of rx_buffer */
diff --git a/drivers/net/ethernet/intel/ixgb/ixgb_main.c b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
index 5826b1d..fbd220d 100644
--- a/drivers/net/ethernet/intel/ixgb/ixgb_main.c
+++ b/drivers/net/ethernet/intel/ixgb/ixgb_main.c
@@ -1817,7 +1817,7 @@ ixgb_clean(struct napi_struct *napi, int budget)
/* If budget not fully consumed, exit the polling mode */
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (!test_bit(__IXGB_DOWN, &adapter->flags))
ixgb_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
index 094e1d6..c38d50c 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
@@ -350,7 +350,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw)
}
IXGBE_WRITE_FLUSH(hw);
-#ifndef CONFIG_SPARC
+#ifndef CONFIG_ARCH_WANT_RELAX_ORDER
/* Disable relaxed ordering */
for (i = 0; i < hw->mac.max_tx_queues; i++) {
u32 regval;
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index ffe7d94..3b3b52b 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -611,12 +611,11 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
if (netdev) {
dev_info(&adapter->pdev->dev, "Net device Info\n");
pr_info("Device Name state "
- "trans_start last_rx\n");
- pr_info("%-15s %016lX %016lX %016lX\n",
+ "trans_start\n");
+ pr_info("%-15s %016lX %016lX\n",
netdev->name,
netdev->state,
- dev_trans_start(netdev),
- netdev->last_rx);
+ dev_trans_start(netdev));
}
/* Print Registers */
diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c
index f9fcab5..f580b49 100644
--- a/drivers/net/ethernet/jme.c
+++ b/drivers/net/ethernet/jme.c
@@ -1879,7 +1879,7 @@ jme_open(struct net_device *netdev)
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
- jme_set_settings(netdev, &jme->old_ecmd);
+ jme_set_link_ksettings(netdev, &jme->old_cmd);
else
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
@@ -2374,7 +2374,7 @@ jme_tx_timeout(struct net_device *netdev)
jme->phylink = 0;
jme_reset_phy_processor(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
- jme_set_settings(netdev, &jme->old_ecmd);
+ jme_set_link_ksettings(netdev, &jme->old_cmd);
/*
* Force to Reset the link again
@@ -2648,27 +2648,27 @@ jme_set_wol(struct net_device *netdev,
}
static int
-jme_get_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+jme_get_link_ksettings(struct net_device *netdev,
+ struct ethtool_link_ksettings *cmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc;
spin_lock_bh(&jme->phy_lock);
- rc = mii_ethtool_gset(&(jme->mii_if), ecmd);
+ rc = mii_ethtool_get_link_ksettings(&jme->mii_if, cmd);
spin_unlock_bh(&jme->phy_lock);
return rc;
}
static int
-jme_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd)
+jme_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd)
{
struct jme_adapter *jme = netdev_priv(netdev);
int rc, fdc = 0;
- if (ethtool_cmd_speed(ecmd) == SPEED_1000
- && ecmd->autoneg != AUTONEG_ENABLE)
+ if (cmd->base.speed == SPEED_1000 &&
+ cmd->base.autoneg != AUTONEG_ENABLE)
return -EINVAL;
/*
@@ -2676,18 +2676,18 @@ jme_set_settings(struct net_device *netdev,
* Hardware would not generate link change interrupt.
*/
if (jme->mii_if.force_media &&
- ecmd->autoneg != AUTONEG_ENABLE &&
- (jme->mii_if.full_duplex != ecmd->duplex))
+ cmd->base.autoneg != AUTONEG_ENABLE &&
+ (jme->mii_if.full_duplex != cmd->base.duplex))
fdc = 1;
spin_lock_bh(&jme->phy_lock);
- rc = mii_ethtool_sset(&(jme->mii_if), ecmd);
+ rc = mii_ethtool_set_link_ksettings(&jme->mii_if, cmd);
spin_unlock_bh(&jme->phy_lock);
if (!rc) {
if (fdc)
jme_reset_link(jme);
- jme->old_ecmd = *ecmd;
+ jme->old_cmd = *cmd;
set_bit(JME_FLAG_SSET, &jme->flags);
}
@@ -2716,7 +2716,7 @@ jme_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd)
if (!rc && (cmd == SIOCSMIIREG)) {
if (duplex_chg)
jme_reset_link(jme);
- jme_get_settings(netdev, &jme->old_ecmd);
+ jme_get_link_ksettings(netdev, &jme->old_cmd);
set_bit(JME_FLAG_SSET, &jme->flags);
}
@@ -2915,8 +2915,6 @@ static const struct ethtool_ops jme_ethtool_ops = {
.set_pauseparam = jme_set_pauseparam,
.get_wol = jme_get_wol,
.set_wol = jme_set_wol,
- .get_settings = jme_get_settings,
- .set_settings = jme_set_settings,
.get_link = jme_get_link,
.get_msglevel = jme_get_msglevel,
.set_msglevel = jme_set_msglevel,
@@ -2924,6 +2922,8 @@ static const struct ethtool_ops jme_ethtool_ops = {
.get_eeprom_len = jme_get_eeprom_len,
.get_eeprom = jme_get_eeprom,
.set_eeprom = jme_set_eeprom,
+ .get_link_ksettings = jme_get_link_ksettings,
+ .set_link_ksettings = jme_set_link_ksettings,
};
static int
@@ -3306,7 +3306,7 @@ jme_resume(struct device *dev)
jme_clear_pm_disable_wol(jme);
jme_phy_on(jme);
if (test_bit(JME_FLAG_SSET, &jme->flags))
- jme_set_settings(netdev, &jme->old_ecmd);
+ jme_set_link_ksettings(netdev, &jme->old_cmd);
else
jme_reset_phy_processor(jme);
jme_phy_calibration(jme);
diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h
index 58cd67c..89535c0 100644
--- a/drivers/net/ethernet/jme.h
+++ b/drivers/net/ethernet/jme.h
@@ -447,7 +447,7 @@ struct jme_adapter {
u8 chip_sub_rev;
u8 pcirev;
u32 msg_enable;
- struct ethtool_cmd old_ecmd;
+ struct ethtool_link_ksettings old_cmd;
unsigned int old_mtu;
struct dynpcc_info dpi;
atomic_t intr_sem;
@@ -1270,8 +1270,8 @@ static inline int new_phy_power_ctrl(u8 chip_main_rev)
/*
* Function prototypes
*/
-static int jme_set_settings(struct net_device *netdev,
- struct ethtool_cmd *ecmd);
+static int jme_set_link_ksettings(struct net_device *netdev,
+ const struct ethtool_link_ksettings *cmd);
static void jme_set_unicastaddr(struct net_device *netdev);
static void jme_set_multi(struct net_device *netdev);
diff --git a/drivers/net/ethernet/korina.c b/drivers/net/ethernet/korina.c
index 8037426..9fae98c 100644
--- a/drivers/net/ethernet/korina.c
+++ b/drivers/net/ethernet/korina.c
@@ -464,7 +464,7 @@ static int korina_poll(struct napi_struct *napi, int budget)
work_done = korina_rx(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
writel(readl(&lp->rx_dma_regs->dmasm) &
~(DMA_STAT_DONE | DMA_STAT_HALT | DMA_STAT_ERR),
@@ -695,25 +695,27 @@ static void netdev_get_drvinfo(struct net_device *dev,
strlcpy(info->bus_info, lp->dev->name, sizeof(info->bus_info));
}
-static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct korina_private *lp = netdev_priv(dev);
int rc;
spin_lock_irq(&lp->lock);
- rc = mii_ethtool_gset(&lp->mii_if, cmd);
+ rc = mii_ethtool_get_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
return rc;
}
-static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+static int netdev_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct korina_private *lp = netdev_priv(dev);
int rc;
spin_lock_irq(&lp->lock);
- rc = mii_ethtool_sset(&lp->mii_if, cmd);
+ rc = mii_ethtool_set_link_ksettings(&lp->mii_if, cmd);
spin_unlock_irq(&lp->lock);
korina_set_carrier(&lp->mii_if);
@@ -729,9 +731,9 @@ static u32 netdev_get_link(struct net_device *dev)
static const struct ethtool_ops netdev_ethtool_ops = {
.get_drvinfo = netdev_get_drvinfo,
- .get_settings = netdev_get_settings,
- .set_settings = netdev_set_settings,
.get_link = netdev_get_link,
+ .get_link_ksettings = netdev_get_link_ksettings,
+ .set_link_ksettings = netdev_set_link_ksettings,
};
static int korina_alloc_ring(struct net_device *dev)
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c
index faea52d..afc8100 100644
--- a/drivers/net/ethernet/lantiq_etop.c
+++ b/drivers/net/ethernet/lantiq_etop.c
@@ -156,24 +156,21 @@ ltq_etop_poll_rx(struct napi_struct *napi, int budget)
{
struct ltq_etop_chan *ch = container_of(napi,
struct ltq_etop_chan, napi);
- int rx = 0;
- int complete = 0;
+ int work_done = 0;
- while ((rx < budget) && !complete) {
+ while (work_done < budget) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
- if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
- ltq_etop_hw_receive(ch);
- rx++;
- } else {
- complete = 1;
- }
+ if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) != LTQ_DMA_C)
+ break;
+ ltq_etop_hw_receive(ch);
+ work_done++;
}
- if (complete || !rx) {
- napi_complete(&ch->napi);
+ if (work_done < budget) {
+ napi_complete_done(&ch->napi, work_done);
ltq_dma_ack_irq(&ch->dma);
}
- return rx;
+ return work_done;
}
static int
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 1fa7c03..20cb7f0 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -2319,7 +2319,7 @@ static int mv643xx_eth_poll(struct napi_struct *napi, int budget)
if (work_done < budget) {
if (mp->oom)
mod_timer(&mp->rx_oom, jiffies + (HZ / 10));
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
wrlp(mp, INT_MASK, mp->int_mask);
}
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 3607d8f..de6c477 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -224,6 +224,7 @@
#define MVNETA_TXQ_SENT_THRESH_MASK(coal) ((coal) << 16)
#define MVNETA_TXQ_UPDATE_REG(q) (0x3c60 + ((q) << 2))
#define MVNETA_TXQ_DEC_SENT_SHIFT 16
+#define MVNETA_TXQ_DEC_SENT_MASK 0xff
#define MVNETA_TXQ_STATUS_REG(q) (0x3c40 + ((q) << 2))
#define MVNETA_TXQ_SENT_DESC_SHIFT 16
#define MVNETA_TXQ_SENT_DESC_MASK 0x3fff0000
@@ -525,6 +526,7 @@ struct mvneta_tx_queue {
* descriptor ring
*/
int count;
+ int pending;
int tx_stop_threshold;
int tx_wake_threshold;
@@ -818,8 +820,9 @@ static void mvneta_txq_pend_desc_add(struct mvneta_port *pp,
/* Only 255 descriptors can be added at once ; Assume caller
* process TX desriptors in quanta less than 256
*/
- val = pend_desc;
+ val = pend_desc + txq->pending;
mvreg_write(pp, MVNETA_TXQ_UPDATE_REG(txq->id), val);
+ txq->pending = 0;
}
/* Get pointer to next TX descriptor to be processed (send) by HW */
@@ -1756,8 +1759,10 @@ static struct mvneta_tx_queue *mvneta_tx_done_policy(struct mvneta_port *pp,
/* Free tx queue skbuffs */
static void mvneta_txq_bufs_free(struct mvneta_port *pp,
- struct mvneta_tx_queue *txq, int num)
+ struct mvneta_tx_queue *txq, int num,
+ struct netdev_queue *nq)
{
+ unsigned int bytes_compl = 0, pkts_compl = 0;
int i;
for (i = 0; i < num; i++) {
@@ -1765,6 +1770,11 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
txq->txq_get_index;
struct sk_buff *skb = txq->tx_skb[txq->txq_get_index];
+ if (skb) {
+ bytes_compl += skb->len;
+ pkts_compl++;
+ }
+
mvneta_txq_inc_get(txq);
if (!IS_TSO_HEADER(txq, tx_desc->buf_phys_addr))
@@ -1775,6 +1785,8 @@ static void mvneta_txq_bufs_free(struct mvneta_port *pp,
continue;
dev_kfree_skb_any(skb);
}
+
+ netdev_tx_completed_queue(nq, pkts_compl, bytes_compl);
}
/* Handle end of transmission */
@@ -1788,7 +1800,7 @@ static void mvneta_txq_done(struct mvneta_port *pp,
if (!tx_done)
return;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
txq->count -= tx_done;
@@ -2398,12 +2410,18 @@ out:
struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats);
struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
- txq->count += frags;
- mvneta_txq_pend_desc_add(pp, txq, frags);
+ netdev_tx_sent_queue(nq, len);
+ txq->count += frags;
if (txq->count >= txq->tx_stop_threshold)
netif_tx_stop_queue(nq);
+ if (!skb->xmit_more || netif_xmit_stopped(nq) ||
+ txq->pending + frags > MVNETA_TXQ_DEC_SENT_MASK)
+ mvneta_txq_pend_desc_add(pp, txq, frags);
+ else
+ txq->pending += frags;
+
u64_stats_update_begin(&stats->syncp);
stats->tx_packets++;
stats->tx_bytes += len;
@@ -2422,9 +2440,10 @@ static void mvneta_txq_done_force(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
int tx_done = txq->count;
- mvneta_txq_bufs_free(pp, txq, tx_done);
+ mvneta_txq_bufs_free(pp, txq, tx_done, nq);
/* reset txq */
txq->count = 0;
@@ -2748,11 +2767,9 @@ static int mvneta_poll(struct napi_struct *napi, int budget)
rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]);
}
- budget -= rx_done;
-
- if (budget > 0) {
+ if (rx_done < budget) {
cause_rx_tx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
if (pp->neta_armada3700) {
unsigned long flags;
@@ -2950,6 +2967,8 @@ static int mvneta_txq_init(struct mvneta_port *pp,
static void mvneta_txq_deinit(struct mvneta_port *pp,
struct mvneta_tx_queue *txq)
{
+ struct netdev_queue *nq = netdev_get_tx_queue(pp->dev, txq->id);
+
kfree(txq->tx_skb);
if (txq->tso_hdrs)
@@ -2961,6 +2980,8 @@ static void mvneta_txq_deinit(struct mvneta_port *pp,
txq->size * MVNETA_DESC_ALIGNED_SIZE,
txq->descs, txq->descs_phys);
+ netdev_tx_reset_queue(nq);
+
txq->descs = NULL;
txq->last_desc = 0;
txq->next_desc_to_proc = 0;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 69db40e..c2fd7c3 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -5405,7 +5405,7 @@ static int mvpp2_poll(struct napi_struct *napi, int budget)
if (budget > 0) {
cause_rx = 0;
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
mvpp2_interrupts_enable(port);
}
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index 3af2814..3376a19 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1261,7 +1261,7 @@ static int pxa168_rx_poll(struct napi_struct *napi, int budget)
}
work_done = rxq_process(dev, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
wrl(pep, INT_MASK, ALL_INTS);
}
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 9146a51..81106b7 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -300,65 +300,76 @@ static u32 skge_supported_modes(const struct skge_hw *hw)
return supported;
}
-static int skge_get_settings(struct net_device *dev,
- struct ethtool_cmd *ecmd)
+static int skge_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct skge_port *skge = netdev_priv(dev);
struct skge_hw *hw = skge->hw;
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = skge_supported_modes(hw);
+ supported = skge_supported_modes(hw);
if (hw->copper) {
- ecmd->port = PORT_TP;
- ecmd->phy_address = hw->phy_addr;
+ cmd->base.port = PORT_TP;
+ cmd->base.phy_address = hw->phy_addr;
} else
- ecmd->port = PORT_FIBRE;
+ cmd->base.port = PORT_FIBRE;
+
+ advertising = skge->advertising;
+ cmd->base.autoneg = skge->autoneg;
+ cmd->base.speed = skge->speed;
+ cmd->base.duplex = skge->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
- ecmd->advertising = skge->advertising;
- ecmd->autoneg = skge->autoneg;
- ethtool_cmd_speed_set(ecmd, skge->speed);
- ecmd->duplex = skge->duplex;
return 0;
}
-static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int skge_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct skge_port *skge = netdev_priv(dev);
const struct skge_hw *hw = skge->hw;
u32 supported = skge_supported_modes(hw);
int err = 0;
+ u32 advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- ecmd->advertising = supported;
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ advertising = supported;
skge->duplex = -1;
skge->speed = -1;
} else {
u32 setting;
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
switch (speed) {
case SPEED_1000:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_1000baseT_Half;
else
return -EINVAL;
break;
case SPEED_100:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_100baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_100baseT_Half;
else
return -EINVAL;
break;
case SPEED_10:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_10baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_10baseT_Half;
else
return -EINVAL;
@@ -371,11 +382,11 @@ static int skge_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
skge->speed = speed;
- skge->duplex = ecmd->duplex;
+ skge->duplex = cmd->base.duplex;
}
- skge->autoneg = ecmd->autoneg;
- skge->advertising = ecmd->advertising;
+ skge->autoneg = cmd->base.autoneg;
+ skge->advertising = advertising;
if (netif_running(dev)) {
skge_down(dev);
@@ -875,8 +886,6 @@ static int skge_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
}
static const struct ethtool_ops skge_ethtool_ops = {
- .get_settings = skge_get_settings,
- .set_settings = skge_set_settings,
.get_drvinfo = skge_get_drvinfo,
.get_regs_len = skge_get_regs_len,
.get_regs = skge_get_regs,
@@ -899,6 +908,8 @@ static const struct ethtool_ops skge_ethtool_ops = {
.set_phys_id = skge_set_phys_id,
.get_sset_count = skge_get_sset_count,
.get_ethtool_stats = skge_get_ethtool_stats,
+ .get_link_ksettings = skge_get_link_ksettings,
+ .set_link_ksettings = skge_set_link_ksettings,
};
/*
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index 18d6336..2b2cc3f 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -2666,7 +2666,7 @@ static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
sky2->rx_stats.bytes += bytes;
u64_stats_update_end(&sky2->rx_stats.syncp);
- dev->last_rx = jiffies;
+ sky2->last_rx = jiffies;
sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
}
@@ -2953,7 +2953,7 @@ static int sky2_rx_hung(struct net_device *dev)
u8 fifo_lev = sky2_read8(hw, Q_ADDR(rxq, Q_RL));
/* If idle and MAC or PCI is stuck */
- if (sky2->check.last == dev->last_rx &&
+ if (sky2->check.last == sky2->last_rx &&
((mac_rp == sky2->check.mac_rp &&
mac_lev != 0 && mac_lev >= sky2->check.mac_lev) ||
/* Check if the PCI RX hang */
@@ -2965,7 +2965,7 @@ static int sky2_rx_hung(struct net_device *dev)
fifo_rp, sky2_read8(hw, Q_ADDR(rxq, Q_WP)));
return 1;
} else {
- sky2->check.last = dev->last_rx;
+ sky2->check.last = sky2->last_rx;
sky2->check.mac_rp = mac_rp;
sky2->check.mac_lev = mac_lev;
sky2->check.fifo_rp = fifo_rp;
@@ -3589,47 +3589,59 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
| SUPPORTED_1000baseT_Full;
}
-static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_get_link_ksettings(struct net_device *dev,
+ struct ethtool_link_ksettings *cmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
struct sky2_hw *hw = sky2->hw;
+ u32 supported, advertising;
- ecmd->transceiver = XCVR_INTERNAL;
- ecmd->supported = sky2_supported_modes(hw);
- ecmd->phy_address = PHY_ADDR_MARV;
+ supported = sky2_supported_modes(hw);
+ cmd->base.phy_address = PHY_ADDR_MARV;
if (sky2_is_copper(hw)) {
- ecmd->port = PORT_TP;
- ethtool_cmd_speed_set(ecmd, sky2->speed);
- ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
+ cmd->base.port = PORT_TP;
+ cmd->base.speed = sky2->speed;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
} else {
- ethtool_cmd_speed_set(ecmd, SPEED_1000);
- ecmd->port = PORT_FIBRE;
- ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
+ cmd->base.speed = SPEED_1000;
+ cmd->base.port = PORT_FIBRE;
+ supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
}
- ecmd->advertising = sky2->advertising;
- ecmd->autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
+ advertising = sky2->advertising;
+ cmd->base.autoneg = (sky2->flags & SKY2_FLAG_AUTO_SPEED)
? AUTONEG_ENABLE : AUTONEG_DISABLE;
- ecmd->duplex = sky2->duplex;
+ cmd->base.duplex = sky2->duplex;
+
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
+ supported);
+ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
+ advertising);
+
return 0;
}
-static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
+static int sky2_set_link_ksettings(struct net_device *dev,
+ const struct ethtool_link_ksettings *cmd)
{
struct sky2_port *sky2 = netdev_priv(dev);
const struct sky2_hw *hw = sky2->hw;
u32 supported = sky2_supported_modes(hw);
+ u32 new_advertising;
+
+ ethtool_convert_link_mode_to_legacy_u32(&new_advertising,
+ cmd->link_modes.advertising);
- if (ecmd->autoneg == AUTONEG_ENABLE) {
- if (ecmd->advertising & ~supported)
+ if (cmd->base.autoneg == AUTONEG_ENABLE) {
+ if (new_advertising & ~supported)
return -EINVAL;
if (sky2_is_copper(hw))
- sky2->advertising = ecmd->advertising |
+ sky2->advertising = new_advertising |
ADVERTISED_TP |
ADVERTISED_Autoneg;
else
- sky2->advertising = ecmd->advertising |
+ sky2->advertising = new_advertising |
ADVERTISED_FIBRE |
ADVERTISED_Autoneg;
@@ -3638,30 +3650,30 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
sky2->speed = -1;
} else {
u32 setting;
- u32 speed = ethtool_cmd_speed(ecmd);
+ u32 speed = cmd->base.speed;
switch (speed) {
case SPEED_1000:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_1000baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_1000baseT_Half;
else
return -EINVAL;
break;
case SPEED_100:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_100baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_100baseT_Half;
else
return -EINVAL;
break;
case SPEED_10:
- if (ecmd->duplex == DUPLEX_FULL)
+ if (cmd->base.duplex == DUPLEX_FULL)
setting = SUPPORTED_10baseT_Full;
- else if (ecmd->duplex == DUPLEX_HALF)
+ else if (cmd->base.duplex == DUPLEX_HALF)
setting = SUPPORTED_10baseT_Half;
else
return -EINVAL;
@@ -3674,7 +3686,7 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
return -EINVAL;
sky2->speed = speed;
- sky2->duplex = ecmd->duplex;
+ sky2->duplex = cmd->base.duplex;
sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
}
@@ -4405,8 +4417,6 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
}
static const struct ethtool_ops sky2_ethtool_ops = {
- .get_settings = sky2_get_settings,
- .set_settings = sky2_set_settings,
.get_drvinfo = sky2_get_drvinfo,
.get_wol = sky2_get_wol,
.set_wol = sky2_set_wol,
@@ -4429,6 +4439,8 @@ static const struct ethtool_ops sky2_ethtool_ops = {
.set_phys_id = sky2_set_phys_id,
.get_sset_count = sky2_get_sset_count,
.get_ethtool_stats = sky2_get_ethtool_stats,
+ .get_link_ksettings = sky2_get_link_ksettings,
+ .set_link_ksettings = sky2_set_link_ksettings,
};
#ifdef CONFIG_SKY2_DEBUG
diff --git a/drivers/net/ethernet/marvell/sky2.h b/drivers/net/ethernet/marvell/sky2.h
index ec6dcd8..0fe1607 100644
--- a/drivers/net/ethernet/marvell/sky2.h
+++ b/drivers/net/ethernet/marvell/sky2.h
@@ -2247,6 +2247,7 @@ struct sky2_port {
u16 rx_data_size;
u16 rx_nfrags;
+ unsigned long last_rx;
struct {
unsigned long last;
u32 mac_rp;
diff --git a/drivers/net/ethernet/mediatek/mtk_eth_soc.c b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
index 25ae0c5..9e75768 100644
--- a/drivers/net/ethernet/mediatek/mtk_eth_soc.c
+++ b/drivers/net/ethernet/mediatek/mtk_eth_soc.c
@@ -2515,7 +2515,7 @@ static int mtk_remove(struct platform_device *pdev)
}
const struct of_device_id of_mtk_match[] = {
- { .compatible = "mediatek,mt7623-eth" },
+ { .compatible = "mediatek,mt2701-eth" },
{},
};
MODULE_DEVICE_TABLE(of, of_mtk_match);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index a849da9..6b86353 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -101,13 +101,19 @@ void mlx4_cq_completion(struct mlx4_dev *dev, u32 cqn)
{
struct mlx4_cq *cq;
+ rcu_read_lock();
cq = radix_tree_lookup(&mlx4_priv(dev)->cq_table.tree,
cqn & (dev->caps.num_cqs - 1));
+ rcu_read_unlock();
+
if (!cq) {
mlx4_dbg(dev, "Completion event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
++cq->arm_sn;
cq->comp(cq);
@@ -118,23 +124,19 @@ void mlx4_cq_event(struct mlx4_dev *dev, u32 cqn, int event_type)
struct mlx4_cq_table *cq_table = &mlx4_priv(dev)->cq_table;
struct mlx4_cq *cq;
- spin_lock(&cq_table->lock);
-
+ rcu_read_lock();
cq = radix_tree_lookup(&cq_table->tree, cqn & (dev->caps.num_cqs - 1));
- if (cq)
- atomic_inc(&cq->refcount);
-
- spin_unlock(&cq_table->lock);
+ rcu_read_unlock();
if (!cq) {
- mlx4_warn(dev, "Async event for bogus CQ %08x\n", cqn);
+ mlx4_dbg(dev, "Async event for bogus CQ %08x\n", cqn);
return;
}
+ /* Acessing the CQ outside of rcu_read_lock is safe, because
+ * the CQ is freed only after interrupt handling is completed.
+ */
cq->event(cq, event_type);
-
- if (atomic_dec_and_test(&cq->refcount))
- complete(&cq->free);
}
static int mlx4_SW2HW_CQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox,
@@ -301,9 +303,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
if (err)
return err;
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
err = radix_tree_insert(&cq_table->tree, cq->cqn, cq);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
if (err)
goto err_icm;
@@ -349,9 +351,9 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
return 0;
err_radix:
- spin_lock_irq(&cq_table->lock);
+ spin_lock(&cq_table->lock);
radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
+ spin_unlock(&cq_table->lock);
err_icm:
mlx4_cq_free_icm(dev, cq->cqn);
@@ -370,15 +372,15 @@ void mlx4_cq_free(struct mlx4_dev *dev, struct mlx4_cq *cq)
if (err)
mlx4_warn(dev, "HW2SW_CQ failed (%d) for CQN %06x\n", err, cq->cqn);
+ spin_lock(&cq_table->lock);
+ radix_tree_delete(&cq_table->tree, cq->cqn);
+ spin_unlock(&cq_table->lock);
+
synchronize_irq(priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq);
if (priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(cq->vector)].irq !=
priv->eq_table.eq[MLX4_EQ_ASYNC].irq)
synchronize_irq(priv->eq_table.eq[MLX4_EQ_ASYNC].irq);
- spin_lock_irq(&cq_table->lock);
- radix_tree_delete(&cq_table->tree, cq->cqn);
- spin_unlock_irq(&cq_table->lock);
-
if (atomic_dec_and_test(&cq->refcount))
complete(&cq->free);
wait_for_completion(&cq->free);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index d9c9f86..ca730d4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -902,6 +902,7 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_ptys_reg ptys_reg;
__be32 proto_admin;
+ u8 cur_autoneg;
int ret;
u32 ptys_adv = ethtool2ptys_link_modes(
@@ -931,10 +932,21 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
return 0;
}
- proto_admin = link_ksettings->base.autoneg == AUTONEG_ENABLE ?
- cpu_to_be32(ptys_adv) :
- speed_set_ptys_admin(priv, speed,
- ptys_reg.eth_proto_cap);
+ cur_autoneg = ptys_reg.flags & MLX4_PTYS_AN_DISABLE_ADMIN ?
+ AUTONEG_DISABLE : AUTONEG_ENABLE;
+
+ if (link_ksettings->base.autoneg == AUTONEG_DISABLE) {
+ proto_admin = speed_set_ptys_admin(priv, speed,
+ ptys_reg.eth_proto_cap);
+ if ((be32_to_cpu(proto_admin) &
+ (MLX4_PROT_MASK(MLX4_1000BASE_CX_SGMII) |
+ MLX4_PROT_MASK(MLX4_1000BASE_KX))) &&
+ (ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP))
+ ptys_reg.flags |= MLX4_PTYS_AN_DISABLE_ADMIN;
+ } else {
+ proto_admin = cpu_to_be32(ptys_adv);
+ ptys_reg.flags &= ~MLX4_PTYS_AN_DISABLE_ADMIN;
+ }
proto_admin &= ptys_reg.eth_proto_cap;
if (!proto_admin) {
@@ -942,7 +954,9 @@ mlx4_en_set_link_ksettings(struct net_device *dev,
return -EINVAL; /* nothing to change due to bad input */
}
- if (proto_admin == ptys_reg.eth_proto_admin)
+ if ((proto_admin == ptys_reg.eth_proto_admin) &&
+ ((ptys_reg.flags & MLX4_PTYS_AN_DISABLE_CAP) &&
+ (link_ksettings->base.autoneg == cur_autoneg)))
return 0; /* Nothing to change */
en_dbg(DRV, priv, "mlx4_ACCESS_PTYS_REG SET: ptys_reg.eth_proto_admin = 0x%x\n",
@@ -1732,8 +1746,6 @@ static void mlx4_en_get_channels(struct net_device *dev,
{
struct mlx4_en_priv *priv = netdev_priv(dev);
- memset(channel, 0, sizeof(*channel));
-
channel->max_rx = MAX_RX_RINGS;
channel->max_tx = MLX4_EN_MAX_TX_RING_P_UP;
@@ -1752,10 +1764,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
int xdp_count;
int err = 0;
- if (channel->other_count || channel->combined_count ||
- channel->tx_count > MLX4_EN_MAX_TX_RING_P_UP ||
- channel->rx_count > MAX_RX_RINGS ||
- !channel->tx_count || !channel->rx_count)
+ if (!channel->tx_count || !channel->rx_count)
return -EINVAL;
tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
@@ -1793,7 +1802,7 @@ static int mlx4_en_set_channels(struct net_device *dev,
netif_set_real_num_tx_queues(dev, priv->tx_ring_num[TX]);
netif_set_real_num_rx_queues(dev, priv->rx_ring_num);
- if (dev->num_tc)
+ if (netdev_get_num_tc(dev))
mlx4_en_setup_tc(dev, MLX4_EN_NUM_UP);
en_warn(priv, "Using %d TX rings\n", priv->tx_ring_num[TX]);
@@ -1985,7 +1994,7 @@ static int mlx4_en_get_module_info(struct net_device *dev,
modinfo->eeprom_len = ETH_MODULE_SFF_8472_LEN;
break;
default:
- return -ENOSYS;
+ return -EINVAL;
}
return 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 06ef23f..60a021c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1695,6 +1695,14 @@ int mlx4_en_start_port(struct net_device *dev)
priv->port, err);
goto tx_err;
}
+
+ err = mlx4_SET_PORT_user_mtu(mdev->dev, priv->port, dev->mtu);
+ if (err) {
+ en_err(priv, "Failed to pass user MTU(%d) to Firmware for port %d, with error %d\n",
+ dev->mtu, priv->port, err);
+ goto tx_err;
+ }
+
/* Set default qp number */
err = mlx4_SET_PORT_qpn_calc(mdev->dev, priv->port, priv->base_qpn, 0);
if (err) {
@@ -1746,8 +1754,11 @@ int mlx4_en_start_port(struct net_device *dev)
/* Process all completions if exist to prevent
* the queues freezing if they are full
*/
- for (i = 0; i < priv->rx_ring_num; i++)
+ for (i = 0; i < priv->rx_ring_num; i++) {
+ local_bh_disable();
napi_schedule(&priv->rx_cq[i]->napi);
+ local_bh_enable();
+ }
netif_tx_start_all_queues(dev);
netif_device_attach(dev);
@@ -2275,7 +2286,7 @@ static int mlx4_en_change_mtu(struct net_device *dev, int new_mtu)
if (priv->tx_ring_num[TX_XDP] &&
!mlx4_en_check_xdp_mtu(dev, new_mtu))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
dev->mtu = new_mtu;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.h b/drivers/net/ethernet/mellanox/mlx4/en_port.h
index 040da4b..930f961 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.h
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.h
@@ -35,7 +35,6 @@
#define _MLX4_EN_PORT_H_
-#define SET_PORT_GEN_ALL_VALID 0x7
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index eac527e..f15ddba 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -33,6 +33,7 @@
#include <net/busy_poll.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/mlx4/cq.h>
#include <linux/slab.h>
#include <linux/mlx4/qp.h>
@@ -706,7 +707,8 @@ static bool mlx4_en_refill_rx_buffers(struct mlx4_en_priv *priv,
do {
if (mlx4_en_prepare_rx_desc(priv, ring,
ring->prod & ring->size_mask,
- GFP_ATOMIC | __GFP_COLD))
+ GFP_ATOMIC | __GFP_COLD |
+ __GFP_MEMALLOC))
break;
ring->prod++;
} while (--missing);
@@ -925,10 +927,12 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
length, cq->ring,
&doorbell_pending)))
goto consumed;
+ trace_xdp_exception(dev, xdp_prog, act);
goto xdp_drop_no_cnt; /* Drop on xmit failure */
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(dev, xdp_prog, act);
case XDP_DROP:
ring->xdp_drop++;
xdp_drop_no_cnt:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 5886ad7..3ed4219 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -710,7 +710,7 @@ u16 mlx4_en_select_queue(struct net_device *dev, struct sk_buff *skb,
u16 rings_p_up = priv->num_tx_rings_p_up;
u8 up = 0;
- if (dev->num_tc)
+ if (netdev_get_num_tc(dev))
return skb_tx_hash(dev, skb);
if (skb_vlan_tag_present(skb))
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index cd3638e..0509996 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -554,8 +554,9 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
break;
case MLX4_EVENT_TYPE_SRQ_LIMIT:
- mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT\n",
- __func__);
+ mlx4_dbg(dev, "%s: MLX4_EVENT_TYPE_SRQ_LIMIT. srq_no=0x%x, eq 0x%x\n",
+ __func__, be32_to_cpu(eqe->event.srq.srqn),
+ eq->eqn);
case MLX4_EVENT_TYPE_SRQ_CATAS_ERROR:
if (mlx4_is_master(dev)) {
/* forward only to slave owning the SRQ */
@@ -570,15 +571,19 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
eq->eqn, eq->cons_index, ret);
break;
}
- mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
- __func__, slave,
- be32_to_cpu(eqe->event.srq.srqn),
- eqe->type, eqe->subtype);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: slave:%d, srq_no:0x%x, event: %02x(%02x)\n",
+ __func__, slave,
+ be32_to_cpu(eqe->event.srq.srqn),
+ eqe->type, eqe->subtype);
if (!ret && slave != dev->caps.function) {
- mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
- __func__, eqe->type,
- eqe->subtype, slave);
+ if (eqe->type ==
+ MLX4_EVENT_TYPE_SRQ_CATAS_ERROR)
+ mlx4_warn(dev, "%s: sending event %02x(%02x) to slave:%d\n",
+ __func__, eqe->type,
+ eqe->subtype, slave);
mlx4_slave_event(dev, slave, eqe);
break;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 84bab9f0..3fe885c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -672,7 +672,7 @@ int mlx4_QUERY_FUNC_CAP(struct mlx4_dev *dev, u8 gen_or_port,
MLX4_GET(field, outbox, QUERY_FUNC_CAP_PHYS_PORT_OFFSET);
func_cap->physical_port = field;
if (func_cap->physical_port != gen_or_port) {
- err = -ENOSYS;
+ err = -EINVAL;
goto out;
}
@@ -1875,7 +1875,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
*((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
*((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
- (ilog2(cache_line_size()) - 4) << 5;
+ ((ilog2(cache_line_size()) - 4) << 5) | (1 << 4);
#if defined(__LITTLE_ENDIAN)
*(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
@@ -2983,7 +2983,7 @@ static int mlx4_SET_PORT_phv_bit(struct mlx4_dev *dev, u8 port, u8 phv_bit)
return PTR_ERR(mailbox);
context = mailbox->buf;
- context->v_ignore_fcs |= SET_PORT_GEN_PHV_VALID;
+ context->flags2 |= SET_PORT_GEN_PHV_VALID;
if (phv_bit)
context->phv_en |= SET_PORT_GEN_PHV_EN;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index bffa6f3..15ef787 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -838,7 +838,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
*/
if (hca_param.global_caps) {
mlx4_err(dev, "Unknown hca global capabilities\n");
- return -ENOSYS;
+ return -EINVAL;
}
mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
@@ -896,7 +896,7 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
PF_CONTEXT_BEHAVIOUR_MASK) {
mlx4_err(dev, "Unknown pf context behaviour %x known flags %x\n",
func_cap.pf_context_behaviour, PF_CONTEXT_BEHAVIOUR_MASK);
- return -ENOSYS;
+ return -EINVAL;
}
dev->caps.num_ports = func_cap.num_ports;
@@ -3492,7 +3492,7 @@ slave_start:
mlx4_enable_msi_x(dev);
if ((mlx4_is_mfunc(dev)) &&
!(dev->flags & MLX4_FLAG_MSI_X)) {
- err = -ENOSYS;
+ err = -EOPNOTSUPP;
mlx4_err(dev, "INTx is not supported in multi-function mode, aborting\n");
goto err_free_eq;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 88ee7d8..7a49509 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -487,6 +487,7 @@ struct mlx4_slave_state {
bool vst_qinq_supported;
u8 function;
dma_addr_t vhcr_dma;
+ u16 user_mtu[MLX4_MAX_PORTS + 1];
u16 mtu[MLX4_MAX_PORTS + 1];
__be32 ib_cap_mask[MLX4_MAX_PORTS + 1];
struct mlx4_slave_eqe eq[MLX4_MFUNC_MAX_EQES];
@@ -590,6 +591,7 @@ struct mlx4_mfunc_master_ctx {
struct mlx4_master_qp0_state qp0_state[MLX4_MAX_PORTS + 1];
int init_port_ref[MLX4_MAX_PORTS + 1];
u16 max_mtu[MLX4_MAX_PORTS + 1];
+ u16 max_user_mtu[MLX4_MAX_PORTS + 1];
u8 pptx;
u8 pprx;
int disable_mcast_ref[MLX4_MAX_PORTS + 1];
@@ -774,7 +776,9 @@ struct mlx4_vlan_table {
int max;
};
-#define SET_PORT_GEN_ALL_VALID 0x7
+#define SET_PORT_GEN_ALL_VALID (MLX4_FLAG_V_MTU_MASK | \
+ MLX4_FLAG_V_PPRX_MASK | \
+ MLX4_FLAG_V_PPTX_MASK)
#define SET_PORT_PROMISC_SHIFT 31
#define SET_PORT_MC_PROMISC_SHIFT 30
@@ -787,7 +791,7 @@ enum {
struct mlx4_set_port_general_context {
u16 reserved1;
- u8 v_ignore_fcs;
+ u8 flags2;
u8 flags;
union {
u8 ignore_fcs;
@@ -803,7 +807,8 @@ struct mlx4_set_port_general_context {
u16 reserved4;
u32 reserved5;
u8 phv_en;
- u8 reserved6[3];
+ u8 reserved6[5];
+ __be16 user_mtu;
};
struct mlx4_set_port_rqp_calc_context {
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index b656dd5..5053c94 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -50,7 +50,11 @@
#define MLX4_STATS_ERROR_COUNTERS_MASK 0x1ffc30ULL
#define MLX4_STATS_PORT_COUNTERS_MASK 0x1fe00000ULL
-#define MLX4_FLAG_V_IGNORE_FCS_MASK 0x2
+#define MLX4_FLAG2_V_IGNORE_FCS_MASK BIT(1)
+#define MLX4_FLAG2_V_USER_MTU_MASK BIT(5)
+#define MLX4_FLAG_V_MTU_MASK BIT(0)
+#define MLX4_FLAG_V_PPRX_MASK BIT(1)
+#define MLX4_FLAG_V_PPTX_MASK BIT(2)
#define MLX4_IGNORE_FCS_MASK 0x1
#define MLX4_TC_MAX_NUMBER 8
@@ -1239,13 +1243,96 @@ void mlx4_reset_roce_gids(struct mlx4_dev *dev, int slave)
return;
}
+static void
+mlx4_en_set_port_mtu(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_set_port_general_context *gen_context)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ u16 mtu, prev_mtu;
+
+ /* Mtu is configured as the max USER_MTU among all
+ * the functions on the port.
+ */
+ mtu = be16_to_cpu(gen_context->mtu);
+ mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
+ prev_mtu = slave_st->mtu[port];
+ slave_st->mtu[port] = mtu;
+ if (mtu > master->max_mtu[port])
+ master->max_mtu[port] = mtu;
+ if (mtu < prev_mtu && prev_mtu == master->max_mtu[port]) {
+ int i;
+
+ slave_st->mtu[port] = mtu;
+ master->max_mtu[port] = mtu;
+ for (i = 0; i < dev->num_slaves; i++)
+ master->max_mtu[port] =
+ max_t(u16, master->max_mtu[port],
+ master->slave_state[i].mtu[port]);
+ }
+ gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
+}
+
+static void
+mlx4_en_set_port_user_mtu(struct mlx4_dev *dev, int slave, int port,
+ struct mlx4_set_port_general_context *gen_context)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+ struct mlx4_slave_state *slave_st = &master->slave_state[slave];
+ u16 user_mtu, prev_user_mtu;
+
+ /* User Mtu is configured as the max USER_MTU among all
+ * the functions on the port.
+ */
+ user_mtu = be16_to_cpu(gen_context->user_mtu);
+ user_mtu = min_t(int, user_mtu, dev->caps.eth_mtu_cap[port]);
+ prev_user_mtu = slave_st->user_mtu[port];
+ slave_st->user_mtu[port] = user_mtu;
+ if (user_mtu > master->max_user_mtu[port])
+ master->max_user_mtu[port] = user_mtu;
+ if (user_mtu < prev_user_mtu &&
+ prev_user_mtu == master->max_user_mtu[port]) {
+ int i;
+
+ slave_st->user_mtu[port] = user_mtu;
+ master->max_user_mtu[port] = user_mtu;
+ for (i = 0; i < dev->num_slaves; i++)
+ master->max_user_mtu[port] =
+ max_t(u16, master->max_user_mtu[port],
+ master->slave_state[i].user_mtu[port]);
+ }
+ gen_context->user_mtu = cpu_to_be16(master->max_user_mtu[port]);
+}
+
+static void
+mlx4_en_set_port_global_pause(struct mlx4_dev *dev, int slave,
+ struct mlx4_set_port_general_context *gen_context)
+{
+ struct mlx4_priv *priv = mlx4_priv(dev);
+ struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
+
+ /* Slave cannot change Global Pause configuration */
+ if (slave != mlx4_master_func_num(dev) &&
+ (gen_context->pptx != master->pptx ||
+ gen_context->pprx != master->pprx)) {
+ gen_context->pptx = master->pptx;
+ gen_context->pprx = master->pprx;
+ mlx4_warn(dev, "denying Global Pause change for slave:%d\n",
+ slave);
+ } else {
+ master->pptx = gen_context->pptx;
+ master->pprx = gen_context->pprx;
+ }
+}
+
static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
u8 op_mod, struct mlx4_cmd_mailbox *inbox)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_port_info *port_info;
- struct mlx4_mfunc_master_ctx *master = &priv->mfunc.master;
- struct mlx4_slave_state *slave_st = &master->slave_state[slave];
struct mlx4_set_port_rqp_calc_context *qpn_context;
struct mlx4_set_port_general_context *gen_context;
struct mlx4_roce_gid_entry *gid_entry_tbl, *gid_entry_mbox, *gid_entry_mb1;
@@ -1256,7 +1343,6 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
int base;
u32 in_modifier;
u32 promisc;
- u16 mtu, prev_mtu;
int err;
int i, j;
int offset;
@@ -1269,7 +1355,9 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
is_eth = op_mod;
port_info = &priv->port[port];
- /* Slaves cannot perform SET_PORT operations except changing MTU */
+ /* Slaves cannot perform SET_PORT operations,
+ * except for changing MTU and USER_MTU.
+ */
if (is_eth) {
if (slave != dev->caps.function &&
in_modifier != MLX4_SET_PORT_GENERAL &&
@@ -1297,40 +1385,20 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
break;
case MLX4_SET_PORT_GENERAL:
gen_context = inbox->buf;
- /* Mtu is configured as the max MTU among all the
- * the functions on the port. */
- mtu = be16_to_cpu(gen_context->mtu);
- mtu = min_t(int, mtu, dev->caps.eth_mtu_cap[port] +
- ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN);
- prev_mtu = slave_st->mtu[port];
- slave_st->mtu[port] = mtu;
- if (mtu > master->max_mtu[port])
- master->max_mtu[port] = mtu;
- if (mtu < prev_mtu && prev_mtu ==
- master->max_mtu[port]) {
- slave_st->mtu[port] = mtu;
- master->max_mtu[port] = mtu;
- for (i = 0; i < dev->num_slaves; i++) {
- master->max_mtu[port] =
- max(master->max_mtu[port],
- master->slave_state[i].mtu[port]);
- }
- }
- gen_context->mtu = cpu_to_be16(master->max_mtu[port]);
- /* Slave cannot change Global Pause configuration */
- if (slave != mlx4_master_func_num(dev) &&
- ((gen_context->pptx != master->pptx) ||
- (gen_context->pprx != master->pprx))) {
- gen_context->pptx = master->pptx;
- gen_context->pprx = master->pprx;
- mlx4_warn(dev,
- "denying Global Pause change for slave:%d\n",
- slave);
- } else {
- master->pptx = gen_context->pptx;
- master->pprx = gen_context->pprx;
- }
+ if (gen_context->flags & MLX4_FLAG_V_MTU_MASK)
+ mlx4_en_set_port_mtu(dev, slave, port,
+ gen_context);
+
+ if (gen_context->flags2 & MLX4_FLAG2_V_USER_MTU_MASK)
+ mlx4_en_set_port_user_mtu(dev, slave, port,
+ gen_context);
+
+ if (gen_context->flags &
+ (MLX4_FLAG_V_PPRX_MASK || MLX4_FLAG_V_PPTX_MASK))
+ mlx4_en_set_port_global_pause(dev, slave,
+ gen_context);
+
break;
case MLX4_SET_PORT_GID_TABLE:
/* change to MULTIPLE entries: number of guest's gids
@@ -1608,6 +1676,30 @@ int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
}
EXPORT_SYMBOL(mlx4_SET_PORT_qpn_calc);
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu)
+{
+ struct mlx4_cmd_mailbox *mailbox;
+ struct mlx4_set_port_general_context *context;
+ u32 in_mod;
+ int err;
+
+ mailbox = mlx4_alloc_cmd_mailbox(dev);
+ if (IS_ERR(mailbox))
+ return PTR_ERR(mailbox);
+ context = mailbox->buf;
+ context->flags2 |= MLX4_FLAG2_V_USER_MTU_MASK;
+ context->user_mtu = cpu_to_be16(user_mtu);
+
+ in_mod = MLX4_SET_PORT_GENERAL << 8 | port;
+ err = mlx4_cmd(dev, mailbox->dma, in_mod, MLX4_SET_PORT_ETH_OPCODE,
+ MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B,
+ MLX4_CMD_WRAPPED);
+
+ mlx4_free_cmd_mailbox(dev, mailbox);
+ return err;
+}
+EXPORT_SYMBOL(mlx4_SET_PORT_user_mtu);
+
int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
{
struct mlx4_cmd_mailbox *mailbox;
@@ -1619,7 +1711,7 @@ int mlx4_SET_PORT_fcs_check(struct mlx4_dev *dev, u8 port, u8 ignore_fcs_value)
if (IS_ERR(mailbox))
return PTR_ERR(mailbox);
context = mailbox->buf;
- context->v_ignore_fcs |= MLX4_FLAG_V_IGNORE_FCS_MASK;
+ context->flags2 |= MLX4_FLAG2_V_IGNORE_FCS_MASK;
if (ignore_fcs_value)
context->ignore_fcs |= MLX4_IGNORE_FCS_MASK;
else
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 56185a0..6fe9f76 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -77,6 +77,7 @@ struct res_common {
int from_state;
int to_state;
int removing;
+ const char *func_name;
};
enum {
@@ -236,8 +237,8 @@ static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
struct rb_node *node = root->rb_node;
while (node) {
- struct res_common *res = container_of(node, struct res_common,
- node);
+ struct res_common *res = rb_entry(node, struct res_common,
+ node);
if (res_id < res->res_id)
node = node->rb_left;
@@ -255,8 +256,8 @@ static int res_tracker_insert(struct rb_root *root, struct res_common *res)
/* Figure out where to put new node */
while (*new) {
- struct res_common *this = container_of(*new, struct res_common,
- node);
+ struct res_common *this = rb_entry(*new, struct res_common,
+ node);
parent = *new;
if (res->res_id < this->res_id)
@@ -837,6 +838,36 @@ static int mpt_mask(struct mlx4_dev *dev)
return dev->caps.num_mpts - 1;
}
+static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
+{
+ switch (t) {
+ case RES_QP:
+ return "QP";
+ case RES_CQ:
+ return "CQ";
+ case RES_SRQ:
+ return "SRQ";
+ case RES_XRCD:
+ return "XRCD";
+ case RES_MPT:
+ return "MPT";
+ case RES_MTT:
+ return "MTT";
+ case RES_MAC:
+ return "MAC";
+ case RES_VLAN:
+ return "VLAN";
+ case RES_COUNTER:
+ return "COUNTER";
+ case RES_FS_RULE:
+ return "FS_RULE";
+ case RES_EQ:
+ return "EQ";
+ default:
+ return "INVALID RESOURCE";
+ }
+}
+
static void *find_res(struct mlx4_dev *dev, u64 res_id,
enum mlx4_resource type)
{
@@ -846,9 +877,9 @@ static void *find_res(struct mlx4_dev *dev, u64 res_id,
res_id);
}
-static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
- enum mlx4_resource type,
- void *res)
+static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
+ enum mlx4_resource type,
+ void *res, const char *func_name)
{
struct res_common *r;
int err = 0;
@@ -861,6 +892,10 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
}
if (r->state == RES_ANY_BUSY) {
+ mlx4_warn(dev,
+ "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
+ func_name, slave, res_id, mlx4_resource_type_to_str(type),
+ r->func_name);
err = -EBUSY;
goto exit;
}
@@ -872,6 +907,7 @@ static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
r->from_state = r->state;
r->state = RES_ANY_BUSY;
+ r->func_name = func_name;
if (res)
*((struct res_common **)res) = r;
@@ -881,6 +917,9 @@ exit:
return err;
}
+#define get_res(dev, slave, res_id, type, res) \
+ _get_res((dev), (slave), (res_id), (type), (res), __func__)
+
int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
enum mlx4_resource type,
u64 res_id, int *slave)
@@ -911,8 +950,10 @@ static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, res_id, type);
- if (r)
+ if (r) {
r->state = r->from_state;
+ r->func_name = "";
+ }
spin_unlock_irq(mlx4_tlock(dev));
}
@@ -1396,7 +1437,7 @@ static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
case RES_MTT:
return remove_mtt_ok((struct res_mtt *)res, extra);
case RES_MAC:
- return -ENOSYS;
+ return -EOPNOTSUPP;
case RES_EQ:
return remove_eq_ok((struct res_eq *)res);
case RES_COUNTER:
@@ -2980,6 +3021,9 @@ int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
put_res(dev, slave, srqn, RES_SRQ);
qp->srq = srq;
}
+
+ /* Save param3 for dynamic changes from VST back to VGT */
+ qp->param3 = qpc->param3;
put_res(dev, slave, rcqn, RES_CQ);
put_res(dev, slave, mtt_base, RES_MTT);
res_end_move(dev, slave, RES_QP, qpn);
@@ -3772,7 +3816,6 @@ int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
int qpn = vhcr->in_modifier & 0x7fffff;
struct res_qp *qp;
u8 orig_sched_queue;
- __be32 orig_param3 = qpc->param3;
u8 orig_vlan_control = qpc->pri_path.vlan_control;
u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
u8 orig_pri_path_fl = qpc->pri_path.fl;
@@ -3814,7 +3857,6 @@ out:
*/
if (!err) {
qp->sched_queue = orig_sched_queue;
- qp->param3 = orig_param3;
qp->vlan_control = orig_vlan_control;
qp->fvl_rx = orig_fvl_rx;
qp->pri_path_fl = orig_pri_path_fl;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index a473cea..46f728d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -51,6 +51,9 @@
#define MLX5_SET_CFG(p, f, v) MLX5_SET(create_flow_group_in, p, f, v)
+#define MLX5E_HW2SW_MTU(hwmtu) ((hwmtu) - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+#define MLX5E_SW2HW_MTU(swmtu) ((swmtu) + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
+
#define MLX5E_MAX_NUM_TC 8
#define MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE 0x6
@@ -98,6 +101,7 @@
#define MLX5E_LOG_INDIR_RQT_SIZE 0x7
#define MLX5E_INDIR_RQT_SIZE BIT(MLX5E_LOG_INDIR_RQT_SIZE)
+#define MLX5E_MIN_NUM_CHANNELS 0x1
#define MLX5E_MAX_NUM_CHANNELS (MLX5E_INDIR_RQT_SIZE >> 1)
#define MLX5E_MAX_NUM_SQS (MLX5E_MAX_NUM_CHANNELS * MLX5E_MAX_NUM_TC)
#define MLX5E_TX_CQ_POLL_BUDGET 128
@@ -259,6 +263,7 @@ struct mlx5e_tstamp {
struct mlx5_core_dev *mdev;
struct ptp_clock *ptp;
struct ptp_clock_info ptp_info;
+ u8 *pps_pin_caps;
};
enum {
@@ -369,6 +374,7 @@ struct mlx5e_rq {
unsigned long state;
int ix;
+ u16 rx_headroom;
struct mlx5e_rx_am am; /* Adaptive Moderation */
struct bpf_prog *xdp_prog;
@@ -567,8 +573,9 @@ struct mlx5e_vlan_table {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct mlx5_flow_handle *active_vlans_rule[VLAN_N_VID];
struct mlx5_flow_handle *untagged_rule;
- struct mlx5_flow_handle *any_vlan_rule;
- bool filter_disabled;
+ struct mlx5_flow_handle *any_cvlan_rule;
+ struct mlx5_flow_handle *any_svlan_rule;
+ bool filter_disabled;
};
struct mlx5e_l2_table {
@@ -776,9 +783,11 @@ void mlx5e_fill_hwstamp(struct mlx5e_tstamp *clock, u64 timestamp,
struct skb_shared_hwtstamps *hwts);
void mlx5e_timestamp_init(struct mlx5e_priv *priv);
void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv);
+void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+ struct ptp_clock_event *event);
int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr);
int mlx5e_hwstamp_get(struct net_device *dev, struct ifreq *ifr);
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val);
+void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val);
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
u16 vid);
@@ -839,12 +848,6 @@ static inline u32 mlx5e_get_wqe_mtt_offset(struct mlx5e_rq *rq, u16 wqe_ix)
return wqe_ix * ALIGN(MLX5_MPWRQ_PAGES_PER_WQE, 8);
}
-static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
-{
- return min_t(int, mdev->priv.eq_table.num_comp_vectors,
- MLX5E_MAX_NUM_CHANNELS);
-}
-
extern const struct ethtool_ops mlx5e_ethtool_ops;
#ifdef CONFIG_MLX5_CORE_EN_DCB
extern const struct dcbnl_rtnl_ops mlx5e_dcbnl_ops;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
index 746a92c..37e66ee 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_clock.c
@@ -37,6 +37,22 @@ enum {
MLX5E_CYCLES_SHIFT = 23
};
+enum {
+ MLX5E_PIN_MODE_IN = 0x0,
+ MLX5E_PIN_MODE_OUT = 0x1,
+};
+
+enum {
+ MLX5E_OUT_PATTERN_PULSE = 0x0,
+ MLX5E_OUT_PATTERN_PERIODIC = 0x1,
+};
+
+enum {
+ MLX5E_EVENT_MODE_DISABLE = 0x0,
+ MLX5E_EVENT_MODE_REPETETIVE = 0x1,
+ MLX5E_EVENT_MODE_ONCE_TILL_ARM = 0x2,
+};
+
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
struct skb_shared_hwtstamps *hwts)
{
@@ -90,11 +106,12 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
return -ERANGE;
}
+ mutex_lock(&priv->state_lock);
/* RX HW timestamp */
switch (config.rx_filter) {
case HWTSTAMP_FILTER_NONE:
/* Reset CQE compression to Admin default */
- mlx5e_modify_rx_cqe_compression(priv, priv->params.rx_cqe_compress_def);
+ mlx5e_modify_rx_cqe_compression_locked(priv, priv->params.rx_cqe_compress_def);
break;
case HWTSTAMP_FILTER_ALL:
case HWTSTAMP_FILTER_SOME:
@@ -112,14 +129,16 @@ int mlx5e_hwstamp_set(struct net_device *dev, struct ifreq *ifr)
case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
/* Disable CQE compression */
netdev_warn(dev, "Disabling cqe compression");
- mlx5e_modify_rx_cqe_compression(priv, false);
+ mlx5e_modify_rx_cqe_compression_locked(priv, false);
config.rx_filter = HWTSTAMP_FILTER_ALL;
break;
default:
+ mutex_unlock(&priv->state_lock);
return -ERANGE;
}
memcpy(&priv->tstamp.hwtstamp_config, &config, sizeof(config));
+ mutex_unlock(&priv->state_lock);
return copy_to_user(ifr->ifr_data, &config,
sizeof(config)) ? -EFAULT : 0;
@@ -189,6 +208,18 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
int neg_adj = 0;
struct mlx5e_tstamp *tstamp = container_of(ptp, struct mlx5e_tstamp,
ptp_info);
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+
+ if (MLX5_CAP_GEN(priv->mdev, pps_modify)) {
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ /* For future use need to add a loop for finding all 1PPS out pins */
+ MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+ MLX5_SET(mtpps_reg, in, out_periodic_adjustment, delta & 0xFFFF);
+
+ mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ }
if (delta < 0) {
neg_adj = 1;
@@ -208,6 +239,124 @@ static int mlx5e_ptp_adjfreq(struct ptp_clock_info *ptp, s32 delta)
return 0;
}
+static int mlx5e_extts_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5e_tstamp *tstamp =
+ container_of(ptp, struct mlx5e_tstamp, ptp_info);
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u8 pattern = 0;
+ int pin = -1;
+ int err = 0;
+
+ if (!MLX5_CAP_GEN(priv->mdev, pps) ||
+ !MLX5_CAP_GEN(priv->mdev, pps_modify))
+ return -EOPNOTSUPP;
+
+ if (rq->extts.index >= tstamp->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(tstamp->ptp, PTP_PF_EXTTS, rq->extts.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+
+ if (rq->extts.flags & PTP_FALLING_EDGE)
+ pattern = 1;
+
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_IN);
+ MLX5_SET(mtpps_reg, in, pattern, pattern);
+ MLX5_SET(mtpps_reg, in, enable, on);
+
+ err = mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+ if (err)
+ return err;
+
+ return mlx5_set_mtppse(priv->mdev, pin, 0,
+ MLX5E_EVENT_MODE_REPETETIVE & on);
+}
+
+static int mlx5e_perout_configure(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ struct mlx5e_tstamp *tstamp =
+ container_of(ptp, struct mlx5e_tstamp, ptp_info);
+ struct mlx5e_priv *priv =
+ container_of(tstamp, struct mlx5e_priv, tstamp);
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+ u64 nsec_now, nsec_delta, time_stamp;
+ u64 cycles_now, cycles_delta;
+ struct timespec64 ts;
+ unsigned long flags;
+ int pin = -1;
+ s64 ns;
+
+ if (!MLX5_CAP_GEN(priv->mdev, pps_modify))
+ return -EOPNOTSUPP;
+
+ if (rq->perout.index >= tstamp->ptp_info.n_pins)
+ return -EINVAL;
+
+ if (on) {
+ pin = ptp_find_pin(tstamp->ptp, PTP_PF_PEROUT,
+ rq->perout.index);
+ if (pin < 0)
+ return -EBUSY;
+ }
+
+ ts.tv_sec = rq->perout.period.sec;
+ ts.tv_nsec = rq->perout.period.nsec;
+ ns = timespec64_to_ns(&ts);
+ if (on)
+ if ((ns >> 1) != 500000000LL)
+ return -EINVAL;
+ ts.tv_sec = rq->perout.start.sec;
+ ts.tv_nsec = rq->perout.start.nsec;
+ ns = timespec64_to_ns(&ts);
+ cycles_now = mlx5_read_internal_timer(tstamp->mdev);
+ write_lock_irqsave(&tstamp->lock, flags);
+ nsec_now = timecounter_cyc2time(&tstamp->clock, cycles_now);
+ nsec_delta = ns - nsec_now;
+ cycles_delta = div64_u64(nsec_delta << tstamp->cycles.shift,
+ tstamp->cycles.mult);
+ write_unlock_irqrestore(&tstamp->lock, flags);
+ time_stamp = cycles_now + cycles_delta;
+ MLX5_SET(mtpps_reg, in, pin, pin);
+ MLX5_SET(mtpps_reg, in, pin_mode, MLX5E_PIN_MODE_OUT);
+ MLX5_SET(mtpps_reg, in, pattern, MLX5E_OUT_PATTERN_PERIODIC);
+ MLX5_SET(mtpps_reg, in, enable, on);
+ MLX5_SET64(mtpps_reg, in, time_stamp, time_stamp);
+
+ return mlx5_set_mtpps(priv->mdev, in, sizeof(in));
+}
+
+static int mlx5e_ptp_enable(struct ptp_clock_info *ptp,
+ struct ptp_clock_request *rq,
+ int on)
+{
+ switch (rq->type) {
+ case PTP_CLK_REQ_EXTTS:
+ return mlx5e_extts_configure(ptp, rq, on);
+ case PTP_CLK_REQ_PEROUT:
+ return mlx5e_perout_configure(ptp, rq, on);
+ default:
+ return -EOPNOTSUPP;
+ }
+ return 0;
+}
+
+static int mlx5e_ptp_verify(struct ptp_clock_info *ptp, unsigned int pin,
+ enum ptp_pin_function func, unsigned int chan)
+{
+ return (func == PTP_PF_PHYSYNC) ? -EOPNOTSUPP : 0;
+}
+
static const struct ptp_clock_info mlx5e_ptp_clock_info = {
.owner = THIS_MODULE,
.max_adj = 100000000,
@@ -221,6 +370,7 @@ static const struct ptp_clock_info mlx5e_ptp_clock_info = {
.gettime64 = mlx5e_ptp_gettime,
.settime64 = mlx5e_ptp_settime,
.enable = NULL,
+ .verify = NULL,
};
static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
@@ -229,6 +379,62 @@ static void mlx5e_timestamp_init_config(struct mlx5e_tstamp *tstamp)
tstamp->hwtstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
+static int mlx5e_init_pin_config(struct mlx5e_tstamp *tstamp)
+{
+ int i;
+
+ tstamp->ptp_info.pin_config =
+ kzalloc(sizeof(*tstamp->ptp_info.pin_config) *
+ tstamp->ptp_info.n_pins, GFP_KERNEL);
+ if (!tstamp->ptp_info.pin_config)
+ return -ENOMEM;
+ tstamp->ptp_info.enable = mlx5e_ptp_enable;
+ tstamp->ptp_info.verify = mlx5e_ptp_verify;
+
+ for (i = 0; i < tstamp->ptp_info.n_pins; i++) {
+ snprintf(tstamp->ptp_info.pin_config[i].name,
+ sizeof(tstamp->ptp_info.pin_config[i].name),
+ "mlx5_pps%d", i);
+ tstamp->ptp_info.pin_config[i].index = i;
+ tstamp->ptp_info.pin_config[i].func = PTP_PF_NONE;
+ tstamp->ptp_info.pin_config[i].chan = i;
+ }
+
+ return 0;
+}
+
+static void mlx5e_get_pps_caps(struct mlx5e_priv *priv,
+ struct mlx5e_tstamp *tstamp)
+{
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ mlx5_query_mtpps(priv->mdev, out, sizeof(out));
+
+ tstamp->ptp_info.n_pins = MLX5_GET(mtpps_reg, out,
+ cap_number_of_pps_pins);
+ tstamp->ptp_info.n_ext_ts = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_in_pins);
+ tstamp->ptp_info.n_per_out = MLX5_GET(mtpps_reg, out,
+ cap_max_num_of_pps_out_pins);
+
+ tstamp->pps_pin_caps[0] = MLX5_GET(mtpps_reg, out, cap_pin_0_mode);
+ tstamp->pps_pin_caps[1] = MLX5_GET(mtpps_reg, out, cap_pin_1_mode);
+ tstamp->pps_pin_caps[2] = MLX5_GET(mtpps_reg, out, cap_pin_2_mode);
+ tstamp->pps_pin_caps[3] = MLX5_GET(mtpps_reg, out, cap_pin_3_mode);
+ tstamp->pps_pin_caps[4] = MLX5_GET(mtpps_reg, out, cap_pin_4_mode);
+ tstamp->pps_pin_caps[5] = MLX5_GET(mtpps_reg, out, cap_pin_5_mode);
+ tstamp->pps_pin_caps[6] = MLX5_GET(mtpps_reg, out, cap_pin_6_mode);
+ tstamp->pps_pin_caps[7] = MLX5_GET(mtpps_reg, out, cap_pin_7_mode);
+}
+
+void mlx5e_pps_event_handler(struct mlx5e_priv *priv,
+ struct ptp_clock_event *event)
+{
+ struct mlx5e_tstamp *tstamp = &priv->tstamp;
+
+ ptp_clock_event(tstamp->ptp, event);
+}
+
void mlx5e_timestamp_init(struct mlx5e_priv *priv)
{
struct mlx5e_tstamp *tstamp = &priv->tstamp;
@@ -272,6 +478,18 @@ void mlx5e_timestamp_init(struct mlx5e_priv *priv)
tstamp->ptp_info = mlx5e_ptp_clock_info;
snprintf(tstamp->ptp_info.name, 16, "mlx5 ptp");
+ /* Initialize 1PPS data structures */
+#define MAX_PIN_NUM 8
+ tstamp->pps_pin_caps = kzalloc(sizeof(u8) * MAX_PIN_NUM, GFP_KERNEL);
+ if (tstamp->pps_pin_caps) {
+ if (MLX5_CAP_GEN(priv->mdev, pps))
+ mlx5e_get_pps_caps(priv, tstamp);
+ if (tstamp->ptp_info.n_pins)
+ mlx5e_init_pin_config(tstamp);
+ } else {
+ mlx5_core_warn(priv->mdev, "1PPS initialization failed\n");
+ }
+
tstamp->ptp = ptp_clock_register(&tstamp->ptp_info,
&priv->mdev->pdev->dev);
if (IS_ERR(tstamp->ptp)) {
@@ -293,5 +511,8 @@ void mlx5e_timestamp_cleanup(struct mlx5e_priv *priv)
priv->tstamp.ptp = NULL;
}
+ kfree(tstamp->pps_pin_caps);
+ kfree(tstamp->ptp_info.pin_config);
+
cancel_delayed_work_sync(&tstamp->overflow_work);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
index 33a399a..6236ce9 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_ethtool.c
@@ -170,7 +170,8 @@ static int mlx5e_get_sset_count(struct net_device *dev, int sset)
case ETH_SS_STATS:
return NUM_SW_COUNTERS +
MLX5E_NUM_Q_CNTRS(priv) +
- NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS +
+ NUM_VPORT_COUNTERS + NUM_PPORT_COUNTERS(priv) +
+ NUM_PCIE_COUNTERS(priv) +
MLX5E_NUM_RQ_STATS(priv) +
MLX5E_NUM_SQ_STATS(priv) +
MLX5E_NUM_PFC_COUNTERS(priv) +
@@ -218,6 +219,14 @@ static void mlx5e_fill_stats_strings(struct mlx5e_priv *priv, uint8_t *data)
strcpy(data + (idx++) * ETH_GSTRING_LEN,
pport_2819_stats_desc[i].format);
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pport_phy_statistical_stats_desc[i].format);
+
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
+ strcpy(data + (idx++) * ETH_GSTRING_LEN,
+ pcie_perf_stats_desc[i].format);
+
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
sprintf(data + (idx++) * ETH_GSTRING_LEN,
@@ -330,6 +339,14 @@ static void mlx5e_get_ethtool_stats(struct net_device *dev,
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.RFC_2819_counters,
pport_2819_stats_desc, i);
+ for (i = 0; i < NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.phy_statistical_counters,
+ pport_phy_statistical_stats_desc, i);
+
+ for (i = 0; i < NUM_PCIE_PERF_COUNTERS(priv); i++)
+ data[idx++] = MLX5E_READ_CTR32_BE(&priv->stats.pcie.pcie_perf_counters,
+ pcie_perf_stats_desc, i);
+
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
for (i = 0; i < NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS; i++)
data[idx++] = MLX5E_READ_CTR64_BE(&priv->stats.pport.per_prio_counters[prio],
@@ -535,7 +552,7 @@ static void mlx5e_get_channels(struct net_device *dev,
{
struct mlx5e_priv *priv = netdev_priv(dev);
- ch->max_combined = mlx5e_get_max_num_channels(priv->mdev);
+ ch->max_combined = priv->profile->max_nch(priv->mdev);
ch->combined_count = priv->params.num_channels;
}
@@ -543,7 +560,6 @@ static int mlx5e_set_channels(struct net_device *dev,
struct ethtool_channels *ch)
{
struct mlx5e_priv *priv = netdev_priv(dev);
- int ncv = mlx5e_get_max_num_channels(priv->mdev);
unsigned int count = ch->combined_count;
bool arfs_enabled;
bool was_opened;
@@ -554,16 +570,6 @@ static int mlx5e_set_channels(struct net_device *dev,
__func__);
return -EINVAL;
}
- if (ch->rx_count || ch->tx_count) {
- netdev_info(dev, "%s: separate rx/tx count not supported\n",
- __func__);
- return -EINVAL;
- }
- if (count > ncv) {
- netdev_info(dev, "%s: count (%d) > max (%d)\n",
- __func__, count, ncv);
- return -EINVAL;
- }
if (priv->params.num_channels == count)
return 0;
@@ -1459,8 +1465,6 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
{
struct mlx5e_priv *priv = netdev_priv(netdev);
struct mlx5_core_dev *mdev = priv->mdev;
- int err = 0;
- bool reset;
if (!MLX5_CAP_GEN(mdev, cqe_compression))
return -ENOTSUPP;
@@ -1470,17 +1474,10 @@ static int set_pflag_rx_cqe_compress(struct net_device *netdev,
return -EINVAL;
}
- reset = test_bit(MLX5E_STATE_OPENED, &priv->state);
-
- if (reset)
- mlx5e_close_locked(netdev);
-
- MLX5E_SET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS, enable);
+ mlx5e_modify_rx_cqe_compression_locked(priv, enable);
priv->params.rx_cqe_compress_def = enable;
- if (reset)
- err = mlx5e_open_locked(netdev);
- return err;
+ return 0;
}
static int mlx5e_handle_pflag(struct net_device *netdev,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
index 1fe80de..92d8364 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs.c
@@ -150,7 +150,8 @@ static int mlx5e_vport_context_update_vlans(struct mlx5e_priv *priv)
enum mlx5e_vlan_rule_type {
MLX5E_VLAN_RULE_TYPE_UNTAGGED,
- MLX5E_VLAN_RULE_TYPE_ANY_VID,
+ MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID,
+ MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID,
MLX5E_VLAN_RULE_TYPE_MATCH_VID,
};
@@ -172,19 +173,31 @@ static int __mlx5e_add_vlan_rule(struct mlx5e_priv *priv,
dest.ft = priv->fs.l2.ft.t;
spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS;
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+
switch (rule_type) {
case MLX5E_VLAN_RULE_TYPE_UNTAGGED:
rule_p = &priv->fs.vlan.untagged_rule;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
break;
- case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- rule_p = &priv->fs.vlan.any_vlan_rule;
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
+ rule_p = &priv->fs.vlan.any_cvlan_rule;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
+ rule_p = &priv->fs.vlan.any_svlan_rule;
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.svlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.svlan_tag, 1);
break;
default: /* MLX5E_VLAN_RULE_TYPE_MATCH_VID */
rule_p = &priv->fs.vlan.active_vlans_rule[vid];
- MLX5_SET(fte_match_param, spec->match_value, outer_headers.vlan_tag, 1);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
+ outer_headers.cvlan_tag);
+ MLX5_SET(fte_match_param, spec->match_value, outer_headers.cvlan_tag, 1);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria,
outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid,
@@ -235,10 +248,16 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
priv->fs.vlan.untagged_rule = NULL;
}
break;
- case MLX5E_VLAN_RULE_TYPE_ANY_VID:
- if (priv->fs.vlan.any_vlan_rule) {
- mlx5_del_flow_rules(priv->fs.vlan.any_vlan_rule);
- priv->fs.vlan.any_vlan_rule = NULL;
+ case MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID:
+ if (priv->fs.vlan.any_cvlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.any_cvlan_rule);
+ priv->fs.vlan.any_cvlan_rule = NULL;
+ }
+ break;
+ case MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID:
+ if (priv->fs.vlan.any_svlan_rule) {
+ mlx5_del_flow_rules(priv->fs.vlan.any_svlan_rule);
+ priv->fs.vlan.any_svlan_rule = NULL;
}
break;
case MLX5E_VLAN_RULE_TYPE_MATCH_VID:
@@ -252,6 +271,23 @@ static void mlx5e_del_vlan_rule(struct mlx5e_priv *priv,
}
}
+static void mlx5e_del_any_vid_rules(struct mlx5e_priv *priv)
+{
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+}
+
+static int mlx5e_add_any_vid_rules(struct mlx5e_priv *priv)
+{
+ int err;
+
+ err = mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_CTAG_VID, 0);
+ if (err)
+ return err;
+
+ return mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_STAG_VID, 0);
+}
+
void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
{
if (!priv->fs.vlan.filter_disabled)
@@ -260,7 +296,7 @@ void mlx5e_enable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = false;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_del_any_vid_rules(priv);
}
void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
@@ -271,7 +307,7 @@ void mlx5e_disable_vlan_filter(struct mlx5e_priv *priv)
priv->fs.vlan.filter_disabled = true;
if (priv->netdev->flags & IFF_PROMISC)
return;
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_add_any_vid_rules(priv);
}
int mlx5e_vlan_rx_add_vid(struct net_device *dev, __always_unused __be16 proto,
@@ -308,7 +344,7 @@ static void mlx5e_add_vlan_rules(struct mlx5e_priv *priv)
if (priv->fs.vlan.filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_add_any_vid_rules(priv);
}
static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
@@ -323,7 +359,7 @@ static void mlx5e_del_vlan_rules(struct mlx5e_priv *priv)
if (priv->fs.vlan.filter_disabled &&
!(priv->netdev->flags & IFF_PROMISC))
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID, 0);
+ mlx5e_del_any_vid_rules(priv);
}
#define mlx5e_for_each_hash_node(hn, tmp, hash, i) \
@@ -503,8 +539,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
if (enable_promisc) {
mlx5e_add_l2_flow_rule(priv, &ea->promisc, MLX5E_PROMISC);
if (!priv->fs.vlan.filter_disabled)
- mlx5e_add_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
+ mlx5e_add_any_vid_rules(priv);
}
if (enable_allmulti)
mlx5e_add_l2_flow_rule(priv, &ea->allmulti, MLX5E_ALLMULTI);
@@ -519,8 +554,7 @@ void mlx5e_set_rx_mode_work(struct work_struct *work)
mlx5e_del_l2_flow_rule(priv, &ea->allmulti);
if (disable_promisc) {
if (!priv->fs.vlan.filter_disabled)
- mlx5e_del_vlan_rule(priv, MLX5E_VLAN_RULE_TYPE_ANY_VID,
- 0);
+ mlx5e_del_any_vid_rules(priv);
mlx5e_del_l2_flow_rule(priv, &ea->promisc);
}
@@ -976,11 +1010,13 @@ err_destroy_flow_table:
return err;
}
-#define MLX5E_NUM_VLAN_GROUPS 2
+#define MLX5E_NUM_VLAN_GROUPS 3
#define MLX5E_VLAN_GROUP0_SIZE BIT(12)
#define MLX5E_VLAN_GROUP1_SIZE BIT(1)
+#define MLX5E_VLAN_GROUP2_SIZE BIT(0)
#define MLX5E_VLAN_TABLE_SIZE (MLX5E_VLAN_GROUP0_SIZE +\
- MLX5E_VLAN_GROUP1_SIZE)
+ MLX5E_VLAN_GROUP1_SIZE +\
+ MLX5E_VLAN_GROUP2_SIZE)
static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in,
int inlen)
@@ -991,7 +1027,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.first_vid);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP0_SIZE;
@@ -1003,7 +1039,7 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
memset(in, 0, inlen);
MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.cvlan_tag);
MLX5_SET_CFG(in, start_flow_index, ix);
ix += MLX5E_VLAN_GROUP1_SIZE;
MLX5_SET_CFG(in, end_flow_index, ix - 1);
@@ -1012,6 +1048,17 @@ static int __mlx5e_create_vlan_table_groups(struct mlx5e_flow_table *ft, u32 *in
goto err_destroy_groups;
ft->num_groups++;
+ memset(in, 0, inlen);
+ MLX5_SET_CFG(in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
+ MLX5_SET_TO_ONES(fte_match_param, mc, outer_headers.svlan_tag);
+ MLX5_SET_CFG(in, start_flow_index, ix);
+ ix += MLX5E_VLAN_GROUP2_SIZE;
+ MLX5_SET_CFG(in, end_flow_index, ix - 1);
+ ft->g[ft->num_groups] = mlx5_create_flow_group(ft->t, in);
+ if (IS_ERR(ft->g[ft->num_groups]))
+ goto err_destroy_groups;
+ ft->num_groups++;
+
return 0;
err_destroy_groups:
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
index d088eff..4b4323f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c
@@ -237,9 +237,9 @@ static int set_flow_attrs(u32 *match_c, u32 *match_v,
if ((fs->flow_type & FLOW_EXT) &&
(fs->m_ext.vlan_tci & cpu_to_be16(VLAN_VID_MASK))) {
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
- vlan_tag, 1);
+ cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_c,
first_vid, 0xfff);
MLX5_SET(fte_match_set_lyr_2_4, outer_headers_v,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 2cc7742..e829143 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -31,6 +31,7 @@
*/
#include <net/tc_act/tc_gact.h>
+#include <linux/crash_dump.h>
#include <net/pkt_cls.h>
#include <linux/mlx5/fs.h>
#include <net/vxlan.h>
@@ -83,7 +84,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.rq_wq_type = rq_type;
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
+ priv->params.log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE_MPW :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
priv->params.mpwqe_log_stride_sz =
MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) ?
MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
@@ -92,7 +95,9 @@ static void mlx5e_set_rq_type_params(struct mlx5e_priv *priv, u8 rq_type)
priv->params.mpwqe_log_stride_sz;
break;
default: /* MLX5_WQ_TYPE_LINKED_LIST */
- priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
+ priv->params.log_rq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_RQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
}
priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
BIT(priv->params.log_rq_size));
@@ -268,6 +273,12 @@ static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ if (MLX5_CAP_PCAM_FEATURE(mdev, ppcnt_statistical_group)) {
+ out = pstats->phy_statistical_counters;
+ MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
+ }
+
MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
out = pstats->per_prio_counters[prio];
@@ -291,11 +302,34 @@ static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
&qcnt->rx_out_of_buffer);
}
+static void mlx5e_update_pcie_counters(struct mlx5e_priv *priv)
+{
+ struct mlx5e_pcie_stats *pcie_stats = &priv->stats.pcie;
+ struct mlx5_core_dev *mdev = priv->mdev;
+ int sz = MLX5_ST_SZ_BYTES(mpcnt_reg);
+ void *out;
+ u32 *in;
+
+ if (!MLX5_CAP_MCAM_FEATURE(mdev, pcie_performance_group))
+ return;
+
+ in = mlx5_vzalloc(sz);
+ if (!in)
+ return;
+
+ out = pcie_stats->pcie_perf_counters;
+ MLX5_SET(mpcnt_reg, in, grp, MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP);
+ mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_MPCNT, 0, 0);
+
+ kvfree(in);
+}
+
void mlx5e_update_stats(struct mlx5e_priv *priv)
{
- mlx5e_update_q_counter(priv);
- mlx5e_update_vport_counters(priv);
+ mlx5e_update_pcie_counters(priv);
mlx5e_update_pport_counters(priv);
+ mlx5e_update_vport_counters(priv);
+ mlx5e_update_q_counter(priv);
mlx5e_update_sw_counters(priv);
}
@@ -317,6 +351,8 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
enum mlx5_dev_event event, unsigned long param)
{
struct mlx5e_priv *priv = vpriv;
+ struct ptp_clock_event ptp_event;
+ struct mlx5_eqe *eqe = NULL;
if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
return;
@@ -326,7 +362,15 @@ static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
case MLX5_DEV_EVENT_PORT_DOWN:
queue_work(priv->wq, &priv->update_carrier_work);
break;
-
+ case MLX5_DEV_EVENT_PPS:
+ eqe = (struct mlx5_eqe *)param;
+ ptp_event.type = PTP_CLOCK_EXTTS;
+ ptp_event.index = eqe->data.pps.pin;
+ ptp_event.timestamp =
+ timecounter_cyc2time(&priv->tstamp.clock,
+ be64_to_cpu(eqe->data.pps.time_stamp));
+ mlx5e_pps_event_handler(vpriv, &ptp_event);
+ break;
default:
break;
}
@@ -343,9 +387,6 @@ static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
}
-#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
-
static inline int mlx5e_get_wqe_mtt_sz(void)
{
/* UMR copies MTTs in units of MLX5_UMR_MTT_ALIGNMENT bytes.
@@ -534,9 +575,13 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
goto err_rq_wq_destroy;
}
- rq->buff.map_dir = DMA_FROM_DEVICE;
- if (rq->xdp_prog)
+ if (rq->xdp_prog) {
rq->buff.map_dir = DMA_BIDIRECTIONAL;
+ rq->rx_headroom = XDP_PACKET_HEADROOM;
+ } else {
+ rq->buff.map_dir = DMA_FROM_DEVICE;
+ rq->rx_headroom = MLX5_RX_HEADROOM;
+ }
switch (priv->params.rq_wq_type) {
case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
@@ -586,7 +631,7 @@ static int mlx5e_create_rq(struct mlx5e_channel *c,
byte_count = rq->buff.wqe_sz;
/* calc the required page order */
- frag_sz = MLX5_RX_HEADROOM +
+ frag_sz = rq->rx_headroom +
byte_count /* packet data */ +
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
frag_sz = SKB_DATA_ALIGN(frag_sz);
@@ -1468,6 +1513,14 @@ static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
return err;
}
+static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev)
+{
+ return is_kdump_kernel() ?
+ MLX5E_MIN_NUM_CHANNELS :
+ min_t(int, mdev->priv.eq_table.num_comp_vectors,
+ MLX5E_MAX_NUM_CHANNELS);
+}
+
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
struct mlx5e_channel_param *cparam,
struct mlx5e_channel **cp)
@@ -2981,11 +3034,8 @@ static int mlx5e_set_vf_rate(struct net_device *dev, int vf, int min_tx_rate,
struct mlx5e_priv *priv = netdev_priv(dev);
struct mlx5_core_dev *mdev = priv->mdev;
- if (min_tx_rate)
- return -EOPNOTSUPP;
-
return mlx5_eswitch_set_vport_rate(mdev->priv.eswitch, vf + 1,
- max_tx_rate);
+ max_tx_rate, min_tx_rate);
}
static int mlx5_vport_link2ifla(u8 esw_link)
@@ -3153,11 +3203,6 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog)
bool reset, was_opened;
int i;
- if (prog && prog->xdp_adjust_head) {
- netdev_err(netdev, "Does not support bpf_xdp_adjust_head()\n");
- return -EOPNOTSUPP;
- }
-
mutex_lock(&priv->state_lock);
if ((netdev->features & NETIF_F_LRO) && prog) {
@@ -3426,22 +3471,6 @@ void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
}
-static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
- u8 *min_inline_mode)
-{
- switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
- case MLX5_CAP_INLINE_MODE_L2:
- *min_inline_mode = MLX5_INLINE_MODE_L2;
- break;
- case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
- mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
- break;
- case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
- *min_inline_mode = MLX5_INLINE_MODE_NONE;
- break;
- }
-}
-
u32 mlx5e_choose_lro_timeout(struct mlx5_core_dev *mdev, u32 wanted_timeout)
{
int i;
@@ -3475,7 +3504,9 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.lro_timeout =
mlx5e_choose_lro_timeout(mdev, MLX5E_DEFAULT_LRO_TIMEOUT);
- priv->params.log_sq_size = MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
+ priv->params.log_sq_size = is_kdump_kernel() ?
+ MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE :
+ MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
/* set CQE compression */
priv->params.rx_cqe_compress_def = false;
@@ -3501,7 +3532,7 @@ static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
priv->params.tx_cq_moderation.pkts =
MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
- mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
+ mlx5_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
priv->params.num_tc = 1;
priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
@@ -3669,14 +3700,8 @@ static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
{
- struct mlx5_core_dev *mdev = priv->mdev;
- struct mlx5_eswitch *esw = mdev->priv.eswitch;
-
mlx5e_vxlan_cleanup(priv);
- if (MLX5_CAP_GEN(mdev, vport_group_manager))
- mlx5_eswitch_unregister_vport_rep(esw, 0);
-
if (priv->xdp_prog)
bpf_prog_put(priv->xdp_prog);
}
@@ -3801,9 +3826,14 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
+ struct mlx5_core_dev *mdev = priv->mdev;
+ struct mlx5_eswitch *esw = mdev->priv.eswitch;
+
queue_work(priv->wq, &priv->set_rx_mode_work);
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ mlx5_eswitch_unregister_vport_rep(esw, 0);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(priv->mdev);
+ mlx5_lag_remove(mdev);
}
static const struct mlx5e_profile mlx5e_nic_profile = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
index 0e2fb3e..fd8dff6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c
@@ -33,6 +33,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/tcp.h>
+#include <linux/bpf_trace.h>
#include <net/busy_poll.h>
#include "en.h"
#include "en_tc.h"
@@ -155,17 +156,15 @@ static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
return mlx5e_decompress_cqes_cont(rq, cq, 1, budget_rem) - 1;
}
-void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
+void mlx5e_modify_rx_cqe_compression_locked(struct mlx5e_priv *priv, bool val)
{
bool was_opened;
if (!MLX5_CAP_GEN(priv->mdev, cqe_compression))
return;
- mutex_lock(&priv->state_lock);
-
if (MLX5E_GET_PFLAG(priv, MLX5E_PFLAG_RX_CQE_COMPRESS) == val)
- goto unlock;
+ return;
was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
if (was_opened)
@@ -176,8 +175,6 @@ void mlx5e_modify_rx_cqe_compression(struct mlx5e_priv *priv, bool val)
if (was_opened)
mlx5e_open_locked(priv->netdev);
-unlock:
- mutex_unlock(&priv->state_lock);
}
#define RQ_PAGE_SIZE(rq) ((1 << rq->buff.page_order) << PAGE_SHIFT)
@@ -193,6 +190,9 @@ static inline bool mlx5e_rx_cache_put(struct mlx5e_rq *rq,
return false;
}
+ if (unlikely(page_is_pfmemalloc(dma_info->page)))
+ return false;
+
cache->page_cache[cache->tail] = *dma_info;
cache->tail = tail_next;
return true;
@@ -264,7 +264,7 @@ int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe *wqe, u16 ix)
if (unlikely(mlx5e_page_alloc_mapped(rq, di)))
return -ENOMEM;
- wqe->data.addr = cpu_to_be64(di->addr + MLX5_RX_HEADROOM);
+ wqe->data.addr = cpu_to_be64(di->addr + rq->rx_headroom);
return 0;
}
@@ -644,10 +644,9 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_sq *sq)
mlx5e_tx_notify_hw(sq, &wqe->ctrl, 0);
}
-static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
+static inline bool mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5e_dma_info *di,
- unsigned int data_offset,
- int len)
+ const struct xdp_buff *xdp)
{
struct mlx5e_sq *sq = &rq->channel->xdp_sq;
struct mlx5_wq_cyc *wq = &sq->wq;
@@ -659,9 +658,16 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
struct mlx5_wqe_eth_seg *eseg = &wqe->eth;
struct mlx5_wqe_data_seg *dseg;
+ ptrdiff_t data_offset = xdp->data - xdp->data_hard_start;
dma_addr_t dma_addr = di->addr + data_offset + MLX5E_XDP_MIN_INLINE;
- unsigned int dma_len = len - MLX5E_XDP_MIN_INLINE;
- void *data = page_address(di->page) + data_offset;
+ unsigned int dma_len = xdp->data_end - xdp->data;
+
+ if (unlikely(dma_len < MLX5E_XDP_MIN_INLINE ||
+ MLX5E_SW2HW_MTU(rq->netdev->mtu) < dma_len)) {
+ rq->stats.xdp_drop++;
+ mlx5e_page_release(rq, di, true);
+ return false;
+ }
if (unlikely(!mlx5e_sq_has_room_for(sq, MLX5E_XDP_TX_WQEBBS))) {
if (sq->db.xdp.doorbell) {
@@ -671,16 +677,17 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
}
rq->stats.xdp_tx_full++;
mlx5e_page_release(rq, di, true);
- return;
+ return false;
}
+ dma_len -= MLX5E_XDP_MIN_INLINE;
dma_sync_single_for_device(sq->pdev, dma_addr, dma_len,
PCI_DMA_TODEVICE);
memset(wqe, 0, sizeof(*wqe));
/* copy the inline part */
- memcpy(eseg->inline_hdr_start, data, MLX5E_XDP_MIN_INLINE);
+ memcpy(eseg->inline_hdr_start, xdp->data, MLX5E_XDP_MIN_INLINE);
eseg->inline_hdr_sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE);
dseg = (struct mlx5_wqe_data_seg *)cseg + (MLX5E_XDP_TX_DS_COUNT - 1);
@@ -700,32 +707,39 @@ static inline void mlx5e_xmit_xdp_frame(struct mlx5e_rq *rq,
sq->db.xdp.doorbell = true;
rq->stats.xdp_tx++;
+ return true;
}
/* returns true if packet was consumed by xdp */
-static inline bool mlx5e_xdp_handle(struct mlx5e_rq *rq,
- const struct bpf_prog *prog,
- struct mlx5e_dma_info *di,
- void *data, u16 len)
+static inline int mlx5e_xdp_handle(struct mlx5e_rq *rq,
+ struct mlx5e_dma_info *di,
+ void *va, u16 *rx_headroom, u32 *len)
{
+ const struct bpf_prog *prog = READ_ONCE(rq->xdp_prog);
struct xdp_buff xdp;
u32 act;
if (!prog)
return false;
- xdp.data = data;
- xdp.data_end = xdp.data + len;
+ xdp.data = va + *rx_headroom;
+ xdp.data_end = xdp.data + *len;
+ xdp.data_hard_start = va;
+
act = bpf_prog_run_xdp(prog, &xdp);
switch (act) {
case XDP_PASS:
+ *rx_headroom = xdp.data - xdp.data_hard_start;
+ *len = xdp.data_end - xdp.data;
return false;
case XDP_TX:
- mlx5e_xmit_xdp_frame(rq, di, MLX5_RX_HEADROOM, len);
+ if (unlikely(!mlx5e_xmit_xdp_frame(rq, di, &xdp)))
+ trace_xdp_exception(rq->netdev, prog, act);
return true;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(rq->netdev, prog, act);
case XDP_DROP:
rq->stats.xdp_drop++;
mlx5e_page_release(rq, di, true);
@@ -740,15 +754,16 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
struct mlx5e_dma_info *di;
struct sk_buff *skb;
void *va, *data;
+ u16 rx_headroom = rq->rx_headroom;
bool consumed;
di = &rq->dma_info[wqe_counter];
va = page_address(di->page);
- data = va + MLX5_RX_HEADROOM;
+ data = va + rx_headroom;
dma_sync_single_range_for_cpu(rq->pdev,
di->addr,
- MLX5_RX_HEADROOM,
+ rx_headroom,
rq->buff.wqe_sz,
DMA_FROM_DEVICE);
prefetch(data);
@@ -760,8 +775,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
}
rcu_read_lock();
- consumed = mlx5e_xdp_handle(rq, READ_ONCE(rq->xdp_prog), di, data,
- cqe_bcnt);
+ consumed = mlx5e_xdp_handle(rq, di, va, &rx_headroom, &cqe_bcnt);
rcu_read_unlock();
if (consumed)
return NULL; /* page/packet was consumed by XDP */
@@ -777,7 +791,7 @@ struct sk_buff *skb_from_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
page_ref_inc(di->page);
mlx5e_page_release(rq, di, true);
- skb_reserve(skb, MLX5_RX_HEADROOM);
+ skb_reserve(skb, rx_headroom);
skb_put(skb, cqe_bcnt);
return skb;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
index 1fffe48..cbfac06 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx_am.c
@@ -109,7 +109,6 @@ static bool mlx5e_am_on_top(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_on_top: PARKING\n");
return true;
case MLX5E_AM_GOING_RIGHT:
return (am->steps_left > 1) && (am->steps_right == 1);
@@ -123,7 +122,6 @@ static void mlx5e_am_turn(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_turn: PARKING\n");
break;
case MLX5E_AM_GOING_RIGHT:
am->tune_state = MLX5E_AM_GOING_LEFT;
@@ -144,7 +142,6 @@ static int mlx5e_am_step(struct mlx5e_rx_am *am)
switch (am->tune_state) {
case MLX5E_AM_PARKING_ON_TOP:
case MLX5E_AM_PARKING_TIRED:
- WARN_ONCE(true, "mlx5e_am_step: PARKING\n");
break;
case MLX5E_AM_GOING_RIGHT:
if (am->profile_ix == (MLX5E_PARAMS_AM_NUM_PROFILES - 1))
@@ -282,10 +279,8 @@ static void mlx5e_am_calc_stats(struct mlx5e_rx_am_sample *start,
u32 delta_us = ktime_us_delta(end->time, start->time);
unsigned int npkts = end->pkt_ctr - start->pkt_ctr;
- if (!delta_us) {
- WARN_ONCE(true, "mlx5e_am_calc_stats: delta_us=0\n");
+ if (!delta_us)
return;
- }
curr_stats->ppms = (npkts * USEC_PER_MSEC) / delta_us;
curr_stats->epms = (MLX5E_AM_NEVENTS * USEC_PER_MSEC) / delta_us;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
index ba5db1d..53e4992 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h
@@ -39,7 +39,7 @@
#define MLX5E_READ_CTR32_CPU(ptr, dsc, i) \
(*(u32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_READ_CTR32_BE(ptr, dsc, i) \
- be64_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
+ be32_to_cpu(*(__be32 *)((char *)ptr + dsc[i].offset))
#define MLX5E_DECLARE_STAT(type, fld) #fld, offsetof(type, fld)
#define MLX5E_DECLARE_RX_STAT(type, fld) "rx%d_"#fld, offsetof(type, fld)
@@ -201,6 +201,12 @@ static const struct counter_desc vport_stats_desc[] = {
#define PPORT_2819_GET(pstats, c) \
MLX5_GET64(ppcnt_reg, pstats->RFC_2819_counters, \
counter_set.eth_2819_cntrs_grp_data_layout.c##_high)
+#define PPORT_PHY_STATISTICAL_OFF(c) \
+ MLX5_BYTE_OFF(ppcnt_reg, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
+#define PPORT_PHY_STATISTICAL_GET(pstats, c) \
+ MLX5_GET64(ppcnt_reg, (pstats)->phy_statistical_counters, \
+ counter_set.phys_layer_statistical_cntrs.c##_high)
#define PPORT_PER_PRIO_OFF(c) \
MLX5_BYTE_OFF(ppcnt_reg, \
counter_set.eth_per_prio_grp_data_layout.c##_high)
@@ -215,6 +221,7 @@ struct mlx5e_pport_stats {
__be64 RFC_2819_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 per_prio_counters[NUM_PPORT_PRIO][MLX5_ST_SZ_QW(ppcnt_reg)];
__be64 phy_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
+ __be64 phy_statistical_counters[MLX5_ST_SZ_QW(ppcnt_reg)];
};
static const struct counter_desc pport_802_3_stats_desc[] = {
@@ -260,6 +267,11 @@ static const struct counter_desc pport_2819_stats_desc[] = {
{ "rx_8192_to_10239_bytes_phy", PPORT_2819_OFF(ether_stats_pkts8192to10239octets) },
};
+static const struct counter_desc pport_phy_statistical_stats_desc[] = {
+ { "rx_symbol_errors_phy", PPORT_PHY_STATISTICAL_OFF(phy_symbol_errors) },
+ { "rx_corrected_bits_phy", PPORT_PHY_STATISTICAL_OFF(phy_corrected_bits) },
+};
+
static const struct counter_desc pport_per_prio_traffic_stats_desc[] = {
{ "rx_prio%d_bytes", PPORT_PER_PRIO_OFF(rx_octets) },
{ "rx_prio%d_packets", PPORT_PER_PRIO_OFF(rx_frames) },
@@ -276,6 +288,21 @@ static const struct counter_desc pport_per_prio_pfc_stats_desc[] = {
{ "rx_%s_pause_transition", PPORT_PER_PRIO_OFF(rx_pause_transition) },
};
+#define PCIE_PERF_OFF(c) \
+ MLX5_BYTE_OFF(mpcnt_reg, counter_set.pcie_perf_cntrs_grp_data_layout.c)
+#define PCIE_PERF_GET(pcie_stats, c) \
+ MLX5_GET(mpcnt_reg, (pcie_stats)->pcie_perf_counters, \
+ counter_set.pcie_perf_cntrs_grp_data_layout.c)
+
+struct mlx5e_pcie_stats {
+ __be64 pcie_perf_counters[MLX5_ST_SZ_QW(mpcnt_reg)];
+};
+
+static const struct counter_desc pcie_perf_stats_desc[] = {
+ { "rx_pci_signal_integrity", PCIE_PERF_OFF(rx_errors) },
+ { "tx_pci_signal_integrity", PCIE_PERF_OFF(tx_errors) },
+};
+
struct mlx5e_rq_stats {
u64 packets;
u64 bytes;
@@ -360,15 +387,23 @@ static const struct counter_desc sq_stats_desc[] = {
#define NUM_PPORT_802_3_COUNTERS ARRAY_SIZE(pport_802_3_stats_desc)
#define NUM_PPORT_2863_COUNTERS ARRAY_SIZE(pport_2863_stats_desc)
#define NUM_PPORT_2819_COUNTERS ARRAY_SIZE(pport_2819_stats_desc)
+#define NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) \
+ (ARRAY_SIZE(pport_phy_statistical_stats_desc) * \
+ MLX5_CAP_PCAM_FEATURE((priv)->mdev, ppcnt_statistical_group))
+#define NUM_PCIE_PERF_COUNTERS(priv) \
+ (ARRAY_SIZE(pcie_perf_stats_desc) * \
+ MLX5_CAP_MCAM_FEATURE((priv)->mdev, pcie_performance_group))
#define NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS \
ARRAY_SIZE(pport_per_prio_traffic_stats_desc)
#define NUM_PPORT_PER_PRIO_PFC_COUNTERS \
ARRAY_SIZE(pport_per_prio_pfc_stats_desc)
-#define NUM_PPORT_COUNTERS (NUM_PPORT_802_3_COUNTERS + \
+#define NUM_PPORT_COUNTERS(priv) (NUM_PPORT_802_3_COUNTERS + \
NUM_PPORT_2863_COUNTERS + \
NUM_PPORT_2819_COUNTERS + \
+ NUM_PPORT_PHY_STATISTICAL_COUNTERS(priv) + \
NUM_PPORT_PER_PRIO_TRAFFIC_COUNTERS * \
NUM_PPORT_PRIO)
+#define NUM_PCIE_COUNTERS(priv) NUM_PCIE_PERF_COUNTERS(priv)
#define NUM_RQ_STATS ARRAY_SIZE(rq_stats_desc)
#define NUM_SQ_STATS ARRAY_SIZE(sq_stats_desc)
@@ -378,6 +413,7 @@ struct mlx5e_stats {
struct mlx5e_vport_stats vport;
struct mlx5e_pport_stats pport;
struct rtnl_link_stats64 vf_vport;
+ struct mlx5e_pcie_stats pcie;
};
static const struct counter_desc mlx5e_pme_status_desc[] = {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
index f8829b5..640f10f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c
@@ -161,15 +161,21 @@ static void mlx5e_detach_encap(struct mlx5e_priv *priv,
}
}
+/* we get here also when setting rule to the FW failed, etc. It means that the
+ * flow rule itself might not exist, but some offloading related to the actions
+ * should be cleaned.
+ */
static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
struct mlx5e_tc_flow *flow)
{
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct mlx5_fc *counter = NULL;
- counter = mlx5_flow_rule_counter(flow->rule);
-
- mlx5_del_flow_rules(flow->rule);
+ if (!IS_ERR(flow->rule)) {
+ counter = mlx5_flow_rule_counter(flow->rule);
+ mlx5_del_flow_rules(flow->rule);
+ mlx5_fc_destroy(priv->mdev, counter);
+ }
if (esw && esw->mode == SRIOV_OFFLOADS) {
mlx5_eswitch_del_vlan_action(esw, flow->attr);
@@ -177,8 +183,6 @@ static void mlx5e_tc_del_flow(struct mlx5e_priv *priv,
mlx5e_detach_encap(priv, flow);
}
- mlx5_fc_destroy(priv->mdev, counter);
-
if (!mlx5e_tc_num_filters(priv) && (priv->fs.tc.t)) {
mlx5_destroy_flow_table(priv->fs.tc.t);
priv->fs.tc.t = NULL;
@@ -225,6 +229,11 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
void *headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value,
outer_headers);
+ struct flow_dissector_key_control *enc_control =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_CONTROL,
+ f->key);
+
if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
struct flow_dissector_key_ports *key =
skb_flow_dissector_target(f->dissector,
@@ -237,28 +246,34 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
/* Full udp dst port must be given */
if (memchr_inv(&mask->dst, 0xff, sizeof(mask->dst)))
- return -EOPNOTSUPP;
-
- /* udp src port isn't supported */
- if (memchr_inv(&mask->src, 0, sizeof(mask->src)))
- return -EOPNOTSUPP;
+ goto vxlan_match_offload_err;
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap))
parse_vxlan_attr(spec, f);
- else
+ else {
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->dst));
return -EOPNOTSUPP;
+ }
MLX5_SET(fte_match_set_lyr_2_4, headers_c,
udp_dport, ntohs(mask->dst));
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
udp_dport, ntohs(key->dst));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c,
+ udp_sport, ntohs(mask->src));
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v,
+ udp_sport, ntohs(key->src));
} else { /* udp dst port must be given */
- return -EOPNOTSUPP;
+vxlan_match_offload_err:
+ netdev_warn(priv->netdev,
+ "IP tunnel decap offload supported only for vxlan, must set UDP dport\n");
+ return -EOPNOTSUPP;
}
- if (dissector_uses_key(f->dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
+ if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
struct flow_dissector_key_ipv4_addrs *key =
skb_flow_dissector_target(f->dissector,
FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS,
@@ -280,10 +295,36 @@ static int parse_tunnel_attr(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_v,
dst_ipv4_dst_ipv6.ipv4_layout.ipv4,
ntohl(key->dst));
- }
- MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IP);
+ } else if (enc_control->addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
+ struct flow_dissector_key_ipv6_addrs *key =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ f->key);
+ struct flow_dissector_key_ipv6_addrs *mask =
+ skb_flow_dissector_target(f->dissector,
+ FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS,
+ f->mask);
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &mask->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ src_ipv4_src_ipv6.ipv6_layout.ipv6),
+ &key->src, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_c,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &mask->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+ memcpy(MLX5_ADDR_OF(fte_match_set_lyr_2_4, headers_v,
+ dst_ipv4_dst_ipv6.ipv6_layout.ipv6),
+ &key->dst, MLX5_FLD_SZ_BYTES(ipv6_layout, ipv6));
+
+ MLX5_SET_TO_ONES(fte_match_set_lyr_2_4, headers_c, ethertype);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, ethertype, ETH_P_IPV6);
+ }
/* Enforce DMAC when offloading incoming tunneled flows.
* Flow counters require a match on the DMAC.
@@ -343,6 +384,7 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
f->key);
switch (key->addr_type) {
case FLOW_DISSECTOR_KEY_IPV4_ADDRS:
+ case FLOW_DISSECTOR_KEY_IPV6_ADDRS:
if (parse_tunnel_attr(priv, spec, f))
return -EOPNOTSUPP;
break;
@@ -375,6 +417,10 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
MLX5_SET(fte_match_set_lyr_2_4, headers_c, frag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, frag,
key->flags & FLOW_DIS_IS_FRAGMENT);
+
+ /* the HW doesn't need L3 inline to match on frag=no */
+ if (key->flags & FLOW_DIS_IS_FRAGMENT)
+ *min_inline = MLX5_INLINE_MODE_IP;
}
}
@@ -438,8 +484,8 @@ static int __parse_cls_flower(struct mlx5e_priv *priv,
FLOW_DISSECTOR_KEY_VLAN,
f->mask);
if (mask->vlan_id || mask->vlan_priority) {
- MLX5_SET(fte_match_set_lyr_2_4, headers_c, vlan_tag, 1);
- MLX5_SET(fte_match_set_lyr_2_4, headers_v, vlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_c, cvlan_tag, 1);
+ MLX5_SET(fte_match_set_lyr_2_4, headers_v, cvlan_tag, 1);
MLX5_SET(fte_match_set_lyr_2_4, headers_c, first_vid, mask->vlan_id);
MLX5_SET(fte_match_set_lyr_2_4, headers_v, first_vid, key->vlan_id);
@@ -622,15 +668,15 @@ static int parse_tc_nic_actions(struct mlx5e_priv *priv, struct tcf_exts *exts,
return 0;
}
-static inline int cmp_encap_info(struct mlx5_encap_info *a,
- struct mlx5_encap_info *b)
+static inline int cmp_encap_info(struct ip_tunnel_key *a,
+ struct ip_tunnel_key *b)
{
return memcmp(a, b, sizeof(*a));
}
-static inline int hash_encap_info(struct mlx5_encap_info *info)
+static inline int hash_encap_info(struct ip_tunnel_key *key)
{
- return jhash(info, sizeof(*info), 0);
+ return jhash(key, sizeof(*key), 0);
}
static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
@@ -638,44 +684,81 @@ static int mlx5e_route_lookup_ipv4(struct mlx5e_priv *priv,
struct net_device **out_dev,
struct flowi4 *fl4,
struct neighbour **out_n,
- __be32 *saddr,
int *out_ttl)
{
struct rtable *rt;
struct neighbour *n = NULL;
- int ttl;
#if IS_ENABLED(CONFIG_INET)
+ int ret;
+
rt = ip_route_output_key(dev_net(mirred_dev), fl4);
- if (IS_ERR(rt)) {
- pr_warn("%s: no route to %pI4\n", __func__, &fl4->daddr);
- return -EOPNOTSUPP;
- }
+ ret = PTR_ERR_OR_ZERO(rt);
+ if (ret)
+ return ret;
#else
return -EOPNOTSUPP;
#endif
if (!switchdev_port_same_parent_id(priv->netdev, rt->dst.dev)) {
- pr_warn("%s: Can't offload the flow, netdevices aren't on the same HW e-switch\n",
- __func__);
+ pr_warn("%s: can't offload, devices not on same HW e-switch\n", __func__);
ip_rt_put(rt);
return -EOPNOTSUPP;
}
- ttl = ip4_dst_hoplimit(&rt->dst);
+ *out_ttl = ip4_dst_hoplimit(&rt->dst);
n = dst_neigh_lookup(&rt->dst, &fl4->daddr);
ip_rt_put(rt);
if (!n)
return -ENOMEM;
*out_n = n;
- *saddr = fl4->saddr;
- *out_ttl = ttl;
*out_dev = rt->dst.dev;
return 0;
}
+static int mlx5e_route_lookup_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct net_device **out_dev,
+ struct flowi6 *fl6,
+ struct neighbour **out_n,
+ int *out_ttl)
+{
+ struct neighbour *n = NULL;
+ struct dst_entry *dst;
+
+#if IS_ENABLED(CONFIG_INET) && IS_ENABLED(CONFIG_IPV6)
+ struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
+ int ret;
+
+ dst = ip6_route_output(dev_net(mirred_dev), NULL, fl6);
+ if (dst->error) {
+ ret = dst->error;
+ dst_release(dst);
+ return ret;
+ }
+
+ *out_ttl = ip6_dst_hoplimit(dst);
+
+ /* if the egress device isn't on the same HW e-switch, we use the uplink */
+ if (!switchdev_port_same_parent_id(priv->netdev, dst->dev))
+ *out_dev = mlx5_eswitch_get_uplink_netdev(esw);
+ else
+ *out_dev = dst->dev;
+#else
+ return -EOPNOTSUPP;
+#endif
+
+ n = dst_neigh_lookup(dst, &fl6->daddr);
+ dst_release(dst);
+ if (!n)
+ return -ENOMEM;
+
+ *out_n = n;
+ return 0;
+}
+
static int gen_vxlan_header_ipv4(struct net_device *out_dev,
char buf[],
unsigned char h_dest[ETH_ALEN],
@@ -712,19 +795,52 @@ static int gen_vxlan_header_ipv4(struct net_device *out_dev,
return encap_size;
}
+static int gen_vxlan_header_ipv6(struct net_device *out_dev,
+ char buf[],
+ unsigned char h_dest[ETH_ALEN],
+ int ttl,
+ struct in6_addr *daddr,
+ struct in6_addr *saddr,
+ __be16 udp_dst_port,
+ __be32 vx_vni)
+{
+ int encap_size = VXLAN_HLEN + sizeof(struct ipv6hdr) + ETH_HLEN;
+ struct ethhdr *eth = (struct ethhdr *)buf;
+ struct ipv6hdr *ip6h = (struct ipv6hdr *)((char *)eth + sizeof(struct ethhdr));
+ struct udphdr *udp = (struct udphdr *)((char *)ip6h + sizeof(struct ipv6hdr));
+ struct vxlanhdr *vxh = (struct vxlanhdr *)((char *)udp + sizeof(struct udphdr));
+
+ memset(buf, 0, encap_size);
+
+ ether_addr_copy(eth->h_dest, h_dest);
+ ether_addr_copy(eth->h_source, out_dev->dev_addr);
+ eth->h_proto = htons(ETH_P_IPV6);
+
+ ip6_flow_hdr(ip6h, 0, 0);
+ /* the HW fills up ipv6 payload len */
+ ip6h->nexthdr = IPPROTO_UDP;
+ ip6h->hop_limit = ttl;
+ ip6h->daddr = *daddr;
+ ip6h->saddr = *saddr;
+
+ udp->dest = udp_dst_port;
+ vxh->vx_flags = VXLAN_HF_VNI;
+ vxh->vx_vni = vxlan_vni_field(vx_vni);
+
+ return encap_size;
+}
+
static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
struct net_device *mirred_dev,
struct mlx5_encap_entry *e,
struct net_device **out_dev)
{
int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ int encap_size, ttl, err;
+ struct neighbour *n = NULL;
struct flowi4 fl4 = {};
- struct neighbour *n;
char *encap_header;
- int encap_size;
- __be32 saddr;
- int ttl;
- int err;
encap_header = kzalloc(max_encap_size, GFP_KERNEL);
if (!encap_header)
@@ -733,36 +849,108 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
fl4.flowi4_proto = IPPROTO_UDP;
- fl4.fl4_dport = e->tun_info.tp_dst;
+ fl4.fl4_dport = tun_key->tp_dst;
break;
default:
err = -EOPNOTSUPP;
goto out;
}
- fl4.daddr = e->tun_info.daddr;
+ fl4.flowi4_tos = tun_key->tos;
+ fl4.daddr = tun_key->u.ipv4.dst;
+ fl4.saddr = tun_key->u.ipv4.src;
err = mlx5e_route_lookup_ipv4(priv, mirred_dev, out_dev,
- &fl4, &n, &saddr, &ttl);
+ &fl4, &n, &ttl);
if (err)
goto out;
+ if (!(n->nud_state & NUD_VALID)) {
+ pr_warn("%s: can't offload, neighbour to %pI4 invalid\n", __func__, &fl4.daddr);
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
e->n = n;
e->out_dev = *out_dev;
+ neigh_ha_snapshot(e->h_dest, n, *out_dev);
+
+ switch (e->tunnel_type) {
+ case MLX5_HEADER_TYPE_VXLAN:
+ encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+ e->h_dest, ttl,
+ fl4.daddr,
+ fl4.saddr, tun_key->tp_dst,
+ tunnel_id_to_key32(tun_key->tun_id));
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
+ encap_size, encap_header, &e->encap_id);
+out:
+ if (err && n)
+ neigh_release(n);
+ kfree(encap_header);
+ return err;
+}
+
+static int mlx5e_create_encap_header_ipv6(struct mlx5e_priv *priv,
+ struct net_device *mirred_dev,
+ struct mlx5_encap_entry *e,
+ struct net_device **out_dev)
+
+{
+ int max_encap_size = MLX5_CAP_ESW(priv->mdev, max_encap_header_size);
+ struct ip_tunnel_key *tun_key = &e->tun_info.key;
+ int encap_size, err, ttl = 0;
+ struct neighbour *n = NULL;
+ struct flowi6 fl6 = {};
+ char *encap_header;
+
+ encap_header = kzalloc(max_encap_size, GFP_KERNEL);
+ if (!encap_header)
+ return -ENOMEM;
+
+ switch (e->tunnel_type) {
+ case MLX5_HEADER_TYPE_VXLAN:
+ fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_dport = tun_key->tp_dst;
+ break;
+ default:
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
+ fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tun_key->tos), tun_key->label);
+ fl6.daddr = tun_key->u.ipv6.dst;
+ fl6.saddr = tun_key->u.ipv6.src;
+
+ err = mlx5e_route_lookup_ipv6(priv, mirred_dev, out_dev,
+ &fl6, &n, &ttl);
+ if (err)
+ goto out;
+
if (!(n->nud_state & NUD_VALID)) {
- err = -ENOTSUPP;
+ pr_warn("%s: can't offload, neighbour to %pI6 invalid\n", __func__, &fl6.daddr);
+ err = -EOPNOTSUPP;
goto out;
}
+ e->n = n;
+ e->out_dev = *out_dev;
+
neigh_ha_snapshot(e->h_dest, n, *out_dev);
switch (e->tunnel_type) {
case MLX5_HEADER_TYPE_VXLAN:
- encap_size = gen_vxlan_header_ipv4(*out_dev, encap_header,
+ encap_size = gen_vxlan_header_ipv6(*out_dev, encap_header,
e->h_dest, ttl,
- e->tun_info.daddr,
- saddr, e->tun_info.tp_dst,
- e->tun_info.tun_id);
+ &fl6.daddr,
+ &fl6.saddr, tun_key->tp_dst,
+ tunnel_id_to_key32(tun_key->tun_id));
break;
default:
err = -EOPNOTSUPP;
@@ -772,6 +960,8 @@ static int mlx5e_create_encap_header_ipv4(struct mlx5e_priv *priv,
err = mlx5_encap_alloc(priv->mdev, e->tunnel_type,
encap_size, encap_header, &e->encap_id);
out:
+ if (err && n)
+ neigh_release(n);
kfree(encap_header);
return err;
}
@@ -784,40 +974,38 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
unsigned short family = ip_tunnel_info_af(tun_info);
struct ip_tunnel_key *key = &tun_info->key;
- struct mlx5_encap_info info;
struct mlx5_encap_entry *e;
struct net_device *out_dev;
+ int tunnel_type, err = -EOPNOTSUPP;
uintptr_t hash_key;
bool found = false;
- int tunnel_type;
- int err;
- /* udp dst port must be given */
+ /* udp dst port must be set */
if (!memchr_inv(&key->tp_dst, 0, sizeof(key->tp_dst)))
+ goto vxlan_encap_offload_err;
+
+ /* setting udp src port isn't supported */
+ if (memchr_inv(&key->tp_src, 0, sizeof(key->tp_src))) {
+vxlan_encap_offload_err:
+ netdev_warn(priv->netdev,
+ "must set udp dst port and not set udp src port\n");
return -EOPNOTSUPP;
+ }
if (mlx5e_vxlan_lookup_port(priv, be16_to_cpu(key->tp_dst)) &&
MLX5_CAP_ESW(priv->mdev, vxlan_encap_decap)) {
- info.tp_dst = key->tp_dst;
- info.tun_id = tunnel_id_to_key32(key->tun_id);
tunnel_type = MLX5_HEADER_TYPE_VXLAN;
} else {
+ netdev_warn(priv->netdev,
+ "%d isn't an offloaded vxlan udp dport\n", be16_to_cpu(key->tp_dst));
return -EOPNOTSUPP;
}
- switch (family) {
- case AF_INET:
- info.daddr = key->u.ipv4.dst;
- break;
- default:
- return -EOPNOTSUPP;
- }
-
- hash_key = hash_encap_info(&info);
+ hash_key = hash_encap_info(key);
hash_for_each_possible_rcu(esw->offloads.encap_tbl, e,
encap_hlist, hash_key) {
- if (!cmp_encap_info(&e->tun_info, &info)) {
+ if (!cmp_encap_info(&e->tun_info.key, key)) {
found = true;
break;
}
@@ -832,11 +1020,15 @@ static int mlx5e_attach_encap(struct mlx5e_priv *priv,
if (!e)
return -ENOMEM;
- e->tun_info = info;
+ e->tun_info = *tun_info;
e->tunnel_type = tunnel_type;
INIT_LIST_HEAD(&e->flows);
- err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ if (family == AF_INET)
+ err = mlx5e_create_encap_header_ipv4(priv, mirred_dev, e, &out_dev);
+ else if (family == AF_INET6)
+ err = mlx5e_create_encap_header_ipv6(priv, mirred_dev, e, &out_dev);
+
if (err)
goto out_err;
@@ -986,7 +1178,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
if (IS_ERR(flow->rule)) {
err = PTR_ERR(flow->rule);
- goto err_free;
+ goto err_del_rule;
}
err = rhashtable_insert_fast(&tc->ht, &flow->node,
@@ -997,7 +1189,7 @@ int mlx5e_configure_flower(struct mlx5e_priv *priv, __be16 protocol,
goto out;
err_del_rule:
- mlx5_del_flow_rules(flow->rule);
+ mlx5e_tc_del_flow(priv, flow);
err_free:
kfree(flow);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index 5130d65..ea5d8d3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -154,6 +154,8 @@ static const char *eqe_type_str(u8 type)
return "MLX5_EVENT_TYPE_PAGE_REQUEST";
case MLX5_EVENT_TYPE_PAGE_FAULT:
return "MLX5_EVENT_TYPE_PAGE_FAULT";
+ case MLX5_EVENT_TYPE_PPS_EVENT:
+ return "MLX5_EVENT_TYPE_PPS_EVENT";
default:
return "Unrecognized event";
}
@@ -470,6 +472,10 @@ static irqreturn_t mlx5_eq_int(int irq, void *eq_ptr)
mlx5_port_module_event(dev, eqe);
break;
+ case MLX5_EVENT_TYPE_PPS_EVENT:
+ if (dev->event)
+ dev->event(dev, MLX5_DEV_EVENT_PPS, (unsigned long)eqe);
+ break;
default:
mlx5_core_warn(dev, "Unhandled event 0x%x on EQ 0x%x\n",
eqe->type, eq->eqn);
@@ -684,6 +690,9 @@ int mlx5_start_eqs(struct mlx5_core_dev *dev)
else
mlx5_core_dbg(dev, "port_module_event is not set\n");
+ if (MLX5_CAP_GEN(dev, pps))
+ async_event_mask |= (1ull << MLX5_EVENT_TYPE_PPS_EVENT);
+
err = mlx5_create_map_eq(dev, &table->cmd_eq, MLX5_EQ_VEC_CMD,
MLX5_NUM_CMD_EQE, 1ull << MLX5_EVENT_TYPE_CMD,
"mlx5_cmd_eq", MLX5_EQ_TYPE_ASYNC);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index f14d9c9..efa1a7a 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -979,7 +979,7 @@ static int esw_vport_enable_egress_acl(struct mlx5_eswitch *esw,
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.first_vid);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 0);
@@ -1098,7 +1098,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
match_criteria = MLX5_ADDR_OF(create_flow_group_in, flow_group_in, match_criteria);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_47_16);
MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.smac_15_0);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 0);
@@ -1115,7 +1115,7 @@ static int esw_vport_enable_ingress_acl(struct mlx5_eswitch *esw,
memset(flow_group_in, 0, inlen);
MLX5_SET(create_flow_group_in, flow_group_in, match_criteria_enable, MLX5_MATCH_OUTER_HEADERS);
- MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, match_criteria, outer_headers.cvlan_tag);
MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, 1);
MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, 1);
@@ -1254,7 +1254,7 @@ static int esw_vport_ingress_config(struct mlx5_eswitch *esw,
}
if (vport->info.vlan || vport->info.qos)
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
if (vport->info.spoofchk) {
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.smac_47_16);
@@ -1335,8 +1335,8 @@ static int esw_vport_egress_config(struct mlx5_eswitch *esw,
}
/* Allowed vlan rule */
- MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.vlan_tag);
- MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.vlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.cvlan_tag);
+ MLX5_SET_TO_ONES(fte_match_param, spec->match_value, outer_headers.cvlan_tag);
MLX5_SET_TO_ONES(fte_match_param, spec->match_criteria, outer_headers.first_vid);
MLX5_SET(fte_match_param, spec->match_value, outer_headers.first_vid, vport->info.vlan);
@@ -1415,7 +1415,7 @@ static void esw_destroy_tsar(struct mlx5_eswitch *esw)
}
static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
- u32 initial_max_rate)
+ u32 initial_max_rate, u32 initial_bw_share)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -1439,6 +1439,7 @@ static int esw_vport_enable_qos(struct mlx5_eswitch *esw, int vport_num,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
initial_max_rate);
+ MLX5_SET(scheduling_context, &sched_ctx, bw_share, initial_bw_share);
err = mlx5_create_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1473,7 +1474,7 @@ static void esw_vport_disable_qos(struct mlx5_eswitch *esw, int vport_num)
}
static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
- u32 max_rate)
+ u32 max_rate, u32 bw_share)
{
u32 sched_ctx[MLX5_ST_SZ_DW(scheduling_context)] = {0};
struct mlx5_vport *vport = &esw->vports[vport_num];
@@ -1497,7 +1498,9 @@ static int esw_vport_qos_config(struct mlx5_eswitch *esw, int vport_num,
esw->qos.root_tsar_id);
MLX5_SET(scheduling_context, &sched_ctx, max_average_bw,
max_rate);
+ MLX5_SET(scheduling_context, &sched_ctx, bw_share, bw_share);
bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_MAX_AVERAGE_BW;
+ bitmask |= MODIFY_SCHEDULING_ELEMENT_IN_MODIFY_BITMASK_BW_SHARE;
err = mlx5_modify_scheduling_element_cmd(dev,
SCHEDULING_HIERARCHY_E_SWITCH,
@@ -1563,7 +1566,8 @@ static void esw_enable_vport(struct mlx5_eswitch *esw, int vport_num,
esw_apply_vport_conf(esw, vport);
/* Attach vport to the eswitch rate limiter */
- if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate))
+ if (esw_vport_enable_qos(esw, vport_num, vport->info.max_rate,
+ vport->qos.bw_share))
esw_warn(esw->dev, "Failed to attach vport %d to eswitch rate limiter", vport_num);
/* Sync with current vport context */
@@ -1952,6 +1956,7 @@ int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
ivi->qos = evport->info.qos;
ivi->spoofchk = evport->info.spoofchk;
ivi->trusted = evport->info.trusted;
+ ivi->min_tx_rate = evport->info.min_rate;
ivi->max_tx_rate = evport->info.max_rate;
mutex_unlock(&esw->state_lock);
@@ -2046,23 +2051,103 @@ int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
return 0;
}
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
- int vport, u32 max_rate)
+static u32 calculate_vports_min_rate_divider(struct mlx5_eswitch *esw)
{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
struct mlx5_vport *evport;
+ u32 max_guarantee = 0;
+ int i;
+
+ for (i = 0; i <= esw->total_vports; i++) {
+ evport = &esw->vports[i];
+ if (!evport->enabled || evport->info.min_rate < max_guarantee)
+ continue;
+ max_guarantee = evport->info.min_rate;
+ }
+
+ return max_t(u32, max_guarantee / fw_max_bw_share, 1);
+}
+
+static int normalize_vports_min_rate(struct mlx5_eswitch *esw, u32 divider)
+{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ struct mlx5_vport *evport;
+ u32 vport_max_rate;
+ u32 vport_min_rate;
+ u32 bw_share;
+ int err;
+ int i;
+
+ for (i = 0; i <= esw->total_vports; i++) {
+ evport = &esw->vports[i];
+ if (!evport->enabled)
+ continue;
+ vport_min_rate = evport->info.min_rate;
+ vport_max_rate = evport->info.max_rate;
+ bw_share = MLX5_MIN_BW_SHARE;
+
+ if (vport_min_rate)
+ bw_share = MLX5_RATE_TO_BW_SHARE(vport_min_rate,
+ divider,
+ fw_max_bw_share);
+
+ if (bw_share == evport->qos.bw_share)
+ continue;
+
+ err = esw_vport_qos_config(esw, i, vport_max_rate,
+ bw_share);
+ if (!err)
+ evport->qos.bw_share = bw_share;
+ else
+ return err;
+ }
+
+ return 0;
+}
+
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
+ u32 max_rate, u32 min_rate)
+{
+ u32 fw_max_bw_share = MLX5_CAP_QOS(esw->dev, max_tsar_bw_share);
+ bool min_rate_supported = MLX5_CAP_QOS(esw->dev, esw_bw_share) &&
+ fw_max_bw_share >= MLX5_MIN_BW_SHARE;
+ bool max_rate_supported = MLX5_CAP_QOS(esw->dev, esw_rate_limit);
+ struct mlx5_vport *evport;
+ u32 previous_min_rate;
+ u32 divider;
int err = 0;
if (!ESW_ALLOWED(esw))
return -EPERM;
if (!LEGAL_VPORT(esw, vport))
return -EINVAL;
+ if ((min_rate && !min_rate_supported) || (max_rate && !max_rate_supported))
+ return -EOPNOTSUPP;
mutex_lock(&esw->state_lock);
evport = &esw->vports[vport];
- err = esw_vport_qos_config(esw, vport, max_rate);
+
+ if (min_rate == evport->info.min_rate)
+ goto set_max_rate;
+
+ previous_min_rate = evport->info.min_rate;
+ evport->info.min_rate = min_rate;
+ divider = calculate_vports_min_rate_divider(esw);
+ err = normalize_vports_min_rate(esw, divider);
+ if (err) {
+ evport->info.min_rate = previous_min_rate;
+ goto unlock;
+ }
+
+set_max_rate:
+ if (max_rate == evport->info.max_rate)
+ goto unlock;
+
+ err = esw_vport_qos_config(esw, vport, max_rate, evport->qos.bw_share);
if (!err)
evport->info.max_rate = max_rate;
+unlock:
mutex_unlock(&esw->state_lock);
return err;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
index 8661dd3..5b78883 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.h
@@ -36,6 +36,7 @@
#include <linux/if_ether.h>
#include <linux/if_link.h>
#include <net/devlink.h>
+#include <net/ip_tunnels.h>
#include <linux/mlx5/device.h>
#define MLX5_MAX_UC_PER_VPORT(dev) \
@@ -49,6 +50,11 @@
#define FDB_UPLINK_VPORT 0xffff
+#define MLX5_MIN_BW_SHARE 1
+
+#define MLX5_RATE_TO_BW_SHARE(rate, divider, limit) \
+ min_t(u32, max_t(u32, (rate) / (divider), MLX5_MIN_BW_SHARE), limit)
+
/* L2 -mac address based- hash helpers */
struct l2addr_node {
struct hlist_node hlist;
@@ -115,6 +121,7 @@ struct mlx5_vport_info {
u8 qos;
u64 node_guid;
int link_state;
+ u32 min_rate;
u32 max_rate;
bool spoofchk;
bool trusted;
@@ -137,6 +144,7 @@ struct mlx5_vport {
struct {
bool enabled;
u32 esw_tsar_ix;
+ u32 bw_share;
} qos;
bool enabled;
@@ -248,8 +256,8 @@ int mlx5_eswitch_set_vport_spoofchk(struct mlx5_eswitch *esw,
int vport, bool spoofchk);
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
int vport_num, bool setting);
-int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw,
- int vport, u32 max_rate);
+int mlx5_eswitch_set_vport_rate(struct mlx5_eswitch *esw, int vport,
+ u32 max_rate, u32 min_rate);
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
int vport, struct ifla_vf_info *ivi);
int mlx5_eswitch_get_vport_stats(struct mlx5_eswitch *esw,
@@ -274,18 +282,12 @@ enum {
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_POP 0x40
#define MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH 0x80
-struct mlx5_encap_info {
- __be32 daddr;
- __be32 tun_id;
- __be16 tp_dst;
-};
-
struct mlx5_encap_entry {
struct hlist_node encap_hlist;
struct list_head flows;
u32 encap_id;
struct neighbour *n;
- struct mlx5_encap_info tun_info;
+ struct ip_tunnel_info tun_info;
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
struct net_device *out_dev;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 03293ed..3481825 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -402,19 +402,18 @@ out:
}
#define MAX_PF_SQ 256
-#define ESW_OFFLOADS_NUM_ENTRIES (1 << 13) /* 8K */
#define ESW_OFFLOADS_NUM_GROUPS 4
static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
{
int inlen = MLX5_ST_SZ_BYTES(create_flow_group_in);
+ int table_size, ix, esw_size, err = 0;
struct mlx5_core_dev *dev = esw->dev;
struct mlx5_flow_namespace *root_ns;
struct mlx5_flow_table *fdb = NULL;
struct mlx5_flow_group *g;
u32 *flow_group_in;
void *match_criteria;
- int table_size, ix, err = 0;
u32 flags = 0;
flow_group_in = mlx5_vzalloc(inlen);
@@ -427,15 +426,19 @@ static int esw_create_offloads_fdb_table(struct mlx5_eswitch *esw, int nvports)
goto ns_err;
}
- esw_debug(dev, "Create offloads FDB table, log_max_size(%d)\n",
- MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
+ esw_debug(dev, "Create offloads FDB table, min (max esw size(2^%d), max counters(%d)*groups(%d))\n",
+ MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size),
+ MLX5_CAP_GEN(dev, max_flow_counter), ESW_OFFLOADS_NUM_GROUPS);
+
+ esw_size = min_t(int, MLX5_CAP_GEN(dev, max_flow_counter) * ESW_OFFLOADS_NUM_GROUPS,
+ 1 << MLX5_CAP_ESW_FLOWTABLE_FDB(dev, log_max_ft_size));
if (MLX5_CAP_ESW_FLOWTABLE_FDB(dev, encap) &&
MLX5_CAP_ESW_FLOWTABLE_FDB(dev, decap))
flags |= MLX5_FLOW_TABLE_TUNNEL_EN;
fdb = mlx5_create_auto_grouped_flow_table(root_ns, FDB_FAST_PATH,
- ESW_OFFLOADS_NUM_ENTRIES,
+ esw_size,
ESW_OFFLOADS_NUM_GROUPS, 0,
flags);
if (IS_ERR(fdb)) {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
index c4478ec..b5253b5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_cmd.c
@@ -473,10 +473,13 @@ int mlx5_encap_alloc(struct mlx5_core_dev *dev,
int err;
u32 *in;
- if (size > MLX5_CAP_ESW(dev, max_encap_header_size))
+ if (size > max_encap_size) {
+ mlx5_core_warn(dev, "encap size %zd too big, max supported is %d\n",
+ size, max_encap_size);
return -EINVAL;
+ }
- in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + max_encap_size,
+ in = kzalloc(MLX5_ST_SZ_BYTES(alloc_encap_header_in) + size,
GFP_KERNEL);
if (!in)
return -ENOMEM;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
index 0ac7a2f..85ff4b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c
@@ -1665,7 +1665,7 @@ static int create_leaf_prios(struct mlx5_flow_namespace *ns, int prio,
#define FLOW_TABLE_BIT_SZ 1
#define GET_FLOW_TABLE_CAP(dev, offset) \
- ((be32_to_cpu(*((__be32 *)(dev->hca_caps_cur[MLX5_CAP_FLOW_TABLE]) + \
+ ((be32_to_cpu(*((__be32 *)(dev->caps.hca_cur[MLX5_CAP_FLOW_TABLE]) + \
offset / 32)) >> \
(32 - FLOW_TABLE_BIT_SZ - (offset & 0x1f))) & FLOW_TABLE_BIT_SZ)
static bool has_required_caps(struct mlx5_core_dev *dev, struct node_caps *caps)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw.c b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
index 5718aad..d0bbefa 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/fw.c
@@ -91,6 +91,20 @@ out:
}
EXPORT_SYMBOL(mlx5_core_query_vendor_id);
+static int mlx5_get_pcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_pcam_reg(dev, dev->caps.pcam,
+ MLX5_PCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_PCAM_REGS_5000_TO_507F);
+}
+
+static int mlx5_get_mcam_reg(struct mlx5_core_dev *dev)
+{
+ return mlx5_query_mcam_reg(dev, dev->caps.mcam,
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES,
+ MLX5_MCAM_REGS_FIRST_128);
+}
+
int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
{
int err;
@@ -154,6 +168,12 @@ int mlx5_query_hca_caps(struct mlx5_core_dev *dev)
return err;
}
+ if (MLX5_CAP_GEN(dev, pcam_reg))
+ mlx5_get_pcam_reg(dev);
+
+ if (MLX5_CAP_GEN(dev, mcam_reg))
+ mlx5_get_mcam_reg(dev);
+
return 0;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c
index 5bcf934..d051539 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c
@@ -231,21 +231,6 @@ static const char *hsynd_str(u8 synd)
}
}
-static u16 get_maj(u32 fw)
-{
- return fw >> 28;
-}
-
-static u16 get_min(u32 fw)
-{
- return fw >> 16 & 0xfff;
-}
-
-static u16 get_sub(u32 fw)
-{
- return fw & 0xffff;
-}
-
static void print_health_info(struct mlx5_core_dev *dev)
{
struct mlx5_core_health *health = &dev->priv.health;
@@ -263,13 +248,14 @@ static void print_health_info(struct mlx5_core_dev *dev)
dev_err(&dev->pdev->dev, "assert_exit_ptr 0x%08x\n", ioread32be(&h->assert_exit_ptr));
dev_err(&dev->pdev->dev, "assert_callra 0x%08x\n", ioread32be(&h->assert_callra));
- fw = ioread32be(&h->fw_ver);
- sprintf(fw_str, "%d.%d.%d", get_maj(fw), get_min(fw), get_sub(fw));
+ sprintf(fw_str, "%d.%d.%d", fw_rev_maj(dev), fw_rev_min(dev), fw_rev_sub(dev));
dev_err(&dev->pdev->dev, "fw_ver %s\n", fw_str);
dev_err(&dev->pdev->dev, "hw_id 0x%08x\n", ioread32be(&h->hw_id));
dev_err(&dev->pdev->dev, "irisc_index %d\n", ioread8(&h->irisc_index));
dev_err(&dev->pdev->dev, "synd 0x%x: %s\n", ioread8(&h->synd), hsynd_str(ioread8(&h->synd)));
dev_err(&dev->pdev->dev, "ext_synd 0x%04x\n", ioread16be(&h->ext_synd));
+ fw = ioread32be(&h->fw_ver);
+ dev_err(&dev->pdev->dev, "raw fw_ver 0x%08x\n", fw);
}
static unsigned long get_next_poll_jiffies(void)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index cb7708b..84f7970 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -418,11 +418,11 @@ static int mlx5_core_get_caps_mode(struct mlx5_core_dev *dev,
switch (cap_mode) {
case HCA_CAP_OPMOD_GET_MAX:
- memcpy(dev->hca_caps_max[cap_type], hca_caps,
+ memcpy(dev->caps.hca_max[cap_type], hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
case HCA_CAP_OPMOD_GET_CUR:
- memcpy(dev->hca_caps_cur[cap_type], hca_caps,
+ memcpy(dev->caps.hca_cur[cap_type], hca_caps,
MLX5_UN_SZ_BYTES(hca_cap_union));
break;
default:
@@ -513,7 +513,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
set_hca_cap = MLX5_ADDR_OF(set_hca_cap_in, set_ctx,
capability);
- memcpy(set_hca_cap, dev->hca_caps_cur[MLX5_CAP_GENERAL],
+ memcpy(set_hca_cap, dev->caps.hca_cur[MLX5_CAP_GENERAL],
MLX5_ST_SZ_BYTES(cmd_hca_cap));
mlx5_core_dbg(dev, "Current Pkey table size %d Setting new size %d\n",
@@ -1217,7 +1217,8 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv,
{
int err = 0;
- mlx5_drain_health_wq(dev);
+ if (cleanup)
+ mlx5_drain_health_wq(dev);
mutex_lock(&dev->intf_state_mutex);
if (test_bit(MLX5_INTERFACE_STATE_DOWN, &dev->intf_state)) {
@@ -1339,9 +1340,7 @@ static int init_one(struct pci_dev *pdev,
goto clean_health;
}
- err = request_module_nowait(MLX5_IB_MOD);
- if (err)
- pr_info("failed request module on %s\n", MLX5_IB_MOD);
+ request_module_nowait(MLX5_IB_MOD);
err = devlink_register(devlink, &pdev->dev);
if (err)
@@ -1402,9 +1401,10 @@ static pci_ers_result_t mlx5_pci_err_detected(struct pci_dev *pdev,
mlx5_enter_error_state(dev);
mlx5_unload_one(dev, priv, false);
- /* In case of kernel call save the pci state */
+ /* In case of kernel call save the pci state and drain the health wq */
if (state) {
pci_save_state(pdev);
+ mlx5_drain_health_wq(dev);
mlx5_pci_disable_device(dev);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index 74241e8..b3dabe6 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -113,6 +113,11 @@ u32 mlx5_get_msix_vec(struct mlx5_core_dev *dev, int vecidx);
struct mlx5_eq *mlx5_eqn2eq(struct mlx5_core_dev *dev, int eqn);
void mlx5_cq_tasklet_cb(unsigned long data);
+int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
+ u8 access_reg_group);
+int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcap, u8 feature_group,
+ u8 access_reg_group);
+
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
void mlx5_lag_remove(struct mlx5_core_dev *dev);
@@ -138,6 +143,11 @@ void mlx5_encap_dealloc(struct mlx5_core_dev *dev, u32 encap_id);
bool mlx5_lag_intf_add(struct mlx5_interface *intf, struct mlx5_priv *priv);
+int mlx5_query_mtpps(struct mlx5_core_dev *dev, u32 *mtpps, u32 mtpps_size);
+int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size);
+int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode);
+int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode);
+
void mlx5e_init(void);
void mlx5e_cleanup(void);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c
index d2ec9d2..969e352 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/port.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c
@@ -74,6 +74,30 @@ out:
}
EXPORT_SYMBOL_GPL(mlx5_core_access_reg);
+int mlx5_query_pcam_reg(struct mlx5_core_dev *dev, u32 *pcam, u8 feature_group,
+ u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(pcam_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(pcam_reg);
+
+ MLX5_SET(pcam_reg, in, feature_group, feature_group);
+ MLX5_SET(pcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(dev, in, sz, pcam, sz, MLX5_REG_PCAM, 0, 0);
+}
+
+int mlx5_query_mcam_reg(struct mlx5_core_dev *dev, u32 *mcam, u8 feature_group,
+ u8 access_reg_group)
+{
+ u32 in[MLX5_ST_SZ_DW(mcam_reg)] = {0};
+ int sz = MLX5_ST_SZ_BYTES(mcam_reg);
+
+ MLX5_SET(mcam_reg, in, feature_group, feature_group);
+ MLX5_SET(mcam_reg, in, access_reg_group, access_reg_group);
+
+ return mlx5_core_access_reg(dev, in, sz, mcam, sz, MLX5_REG_MCAM, 0, 0);
+}
+
struct mlx5_reg_pcap {
u8 rsvd0;
u8 port_num;
@@ -866,3 +890,51 @@ void mlx5_port_module_event(struct mlx5_core_dev *dev, struct mlx5_eqe *eqe)
module_num, mlx5_pme_status[module_status - 1],
mlx5_pme_error[error_type]);
}
+
+int mlx5_query_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
+{
+ u32 in[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), mtpps,
+ mtpps_size, MLX5_REG_MTPPS, 0, 0);
+}
+
+int mlx5_set_mtpps(struct mlx5_core_dev *mdev, u32 *mtpps, u32 mtpps_size)
+{
+ u32 out[MLX5_ST_SZ_DW(mtpps_reg)] = {0};
+
+ return mlx5_core_access_reg(mdev, mtpps, mtpps_size, out,
+ sizeof(out), MLX5_REG_MTPPS, 0, 1);
+}
+
+int mlx5_query_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 *arm, u8 *mode)
+{
+ u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+ int err = 0;
+
+ MLX5_SET(mtppse_reg, in, pin, pin);
+
+ err = mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MTPPSE, 0, 0);
+ if (err)
+ return err;
+
+ *arm = MLX5_GET(mtppse_reg, in, event_arm);
+ *mode = MLX5_GET(mtppse_reg, in, event_generation_mode);
+
+ return err;
+}
+
+int mlx5_set_mtppse(struct mlx5_core_dev *mdev, u8 pin, u8 arm, u8 mode)
+{
+ u32 out[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+ u32 in[MLX5_ST_SZ_DW(mtppse_reg)] = {0};
+
+ MLX5_SET(mtppse_reg, in, pin, pin);
+ MLX5_SET(mtppse_reg, in, event_arm, arm);
+ MLX5_SET(mtppse_reg, in, event_generation_mode, mode);
+
+ return mlx5_core_access_reg(mdev, in, sizeof(in), out,
+ sizeof(out), MLX5_REG_MTPPSE, 0, 1);
+}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/vport.c b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
index 269e440..b49cfc8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/vport.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/vport.c
@@ -127,6 +127,23 @@ int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
}
EXPORT_SYMBOL_GPL(mlx5_query_nic_vport_min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev,
+ u8 *min_inline_mode)
+{
+ switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
+ case MLX5_CAP_INLINE_MODE_L2:
+ *min_inline_mode = MLX5_INLINE_MODE_L2;
+ break;
+ case MLX5_CAP_INLINE_MODE_VPORT_CONTEXT:
+ mlx5_query_nic_vport_min_inline(mdev, 0, min_inline_mode);
+ break;
+ case MLX5_CAP_INLINE_MODE_NOT_REQUIRED:
+ *min_inline_mode = MLX5_INLINE_MODE_NONE;
+ break;
+ }
+}
+EXPORT_SYMBOL_GPL(mlx5_query_min_inline);
+
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline)
{
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index d147ddd..0af3338 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -209,21 +209,21 @@ MLXSW_ITEM32(pci, eqe, owner, 0x0C, 0, 1);
/* pci_eqe_cmd_token
* Command completion event - token
*/
-MLXSW_ITEM32(pci, eqe, cmd_token, 0x08, 16, 16);
+MLXSW_ITEM32(pci, eqe, cmd_token, 0x00, 16, 16);
/* pci_eqe_cmd_status
* Command completion event - status
*/
-MLXSW_ITEM32(pci, eqe, cmd_status, 0x08, 0, 8);
+MLXSW_ITEM32(pci, eqe, cmd_status, 0x00, 0, 8);
/* pci_eqe_cmd_out_param_h
* Command completion event - output parameter - higher part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x0C, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_h, 0x04, 0, 32);
/* pci_eqe_cmd_out_param_l
* Command completion event - output parameter - lower part
*/
-MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x10, 0, 32);
+MLXSW_ITEM32(pci, eqe, cmd_out_param_l, 0x08, 0, 32);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index 1357fe0..9fb0316 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -4965,6 +4965,46 @@ static inline void mlxsw_reg_mlcr_pack(char *payload, u8 local_port,
MLXSW_REG_MLCR_DURATION_MAX : 0);
}
+/* MPSC - Monitoring Packet Sampling Configuration Register
+ * --------------------------------------------------------
+ * MPSC Register is used to configure the Packet Sampling mechanism.
+ */
+#define MLXSW_REG_MPSC_ID 0x9080
+#define MLXSW_REG_MPSC_LEN 0x1C
+
+MLXSW_REG_DEFINE(mpsc, MLXSW_REG_MPSC_ID, MLXSW_REG_MPSC_LEN);
+
+/* reg_mpsc_local_port
+ * Local port number
+ * Not supported for CPU port
+ * Access: Index
+ */
+MLXSW_ITEM32(reg, mpsc, local_port, 0x00, 16, 8);
+
+/* reg_mpsc_e
+ * Enable sampling on port local_port
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, e, 0x04, 30, 1);
+
+#define MLXSW_REG_MPSC_RATE_MAX 3500000000UL
+
+/* reg_mpsc_rate
+ * Sampling rate = 1 out of rate packets (with randomization around
+ * the point). Valid values are: 1 to MLXSW_REG_MPSC_RATE_MAX
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, mpsc, rate, 0x08, 0, 32);
+
+static inline void mlxsw_reg_mpsc_pack(char *payload, u8 local_port, bool e,
+ u32 rate)
+{
+ MLXSW_REG_ZERO(mpsc, payload);
+ mlxsw_reg_mpsc_local_port_set(payload, local_port);
+ mlxsw_reg_mpsc_e_set(payload, e);
+ mlxsw_reg_mpsc_rate_set(payload, rate);
+}
+
/* SBPR - Shared Buffer Pools Register
* -----------------------------------
* The SBPR configures and retrieves the shared buffer pools and configuration.
@@ -5429,6 +5469,7 @@ static const struct mlxsw_reg_info *mlxsw_reg_infos[] = {
MLXSW_REG(mpat),
MLXSW_REG(mpar),
MLXSW_REG(mlcr),
+ MLXSW_REG(mpsc),
MLXSW_REG(sbpr),
MLXSW_REG(sbcm),
MLXSW_REG(sbpm),
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index d0e803f..467aa52 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -57,6 +57,7 @@
#include <net/pkt_cls.h>
#include <net/tc_act/tc_mirred.h>
#include <net/netevent.h>
+#include <net/tc_act/tc_sample.h>
#include "spectrum.h"
#include "pci.h"
@@ -469,6 +470,16 @@ static void mlxsw_sp_span_mirror_remove(struct mlxsw_sp_port *from,
mlxsw_sp_span_inspected_port_unbind(from, span_entry, type);
}
+static int mlxsw_sp_port_sample_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool enable, u32 rate)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char mpsc_pl[MLXSW_REG_MPSC_LEN];
+
+ mlxsw_reg_mpsc_pack(mpsc_pl, mlxsw_sp_port->local_port, enable, rate);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(mpsc), mpsc_pl);
+}
+
static int mlxsw_sp_port_admin_status_set(struct mlxsw_sp_port *mlxsw_sp_port,
bool is_up)
{
@@ -684,6 +695,7 @@ static netdev_tx_t mlxsw_sp_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
if (eth_skb_pad(skb)) {
@@ -1217,6 +1229,51 @@ mlxsw_sp_port_del_cls_matchall_mirror(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_span_mirror_remove(mlxsw_sp_port, to_port, span_type);
}
+static int
+mlxsw_sp_port_add_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port,
+ struct tc_cls_matchall_offload *cls,
+ const struct tc_action *a,
+ bool ingress)
+{
+ int err;
+
+ if (!mlxsw_sp_port->sample)
+ return -EOPNOTSUPP;
+ if (rtnl_dereference(mlxsw_sp_port->sample->psample_group)) {
+ netdev_err(mlxsw_sp_port->dev, "sample already active\n");
+ return -EEXIST;
+ }
+ if (tcf_sample_rate(a) > MLXSW_REG_MPSC_RATE_MAX) {
+ netdev_err(mlxsw_sp_port->dev, "sample rate not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ rcu_assign_pointer(mlxsw_sp_port->sample->psample_group,
+ tcf_sample_psample_group(a));
+ mlxsw_sp_port->sample->truncate = tcf_sample_truncate(a);
+ mlxsw_sp_port->sample->trunc_size = tcf_sample_trunc_size(a);
+ mlxsw_sp_port->sample->rate = tcf_sample_rate(a);
+
+ err = mlxsw_sp_port_sample_set(mlxsw_sp_port, true, tcf_sample_rate(a));
+ if (err)
+ goto err_port_sample_set;
+ return 0;
+
+err_port_sample_set:
+ RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
+ return err;
+}
+
+static void
+mlxsw_sp_port_del_cls_matchall_sample(struct mlxsw_sp_port *mlxsw_sp_port)
+{
+ if (!mlxsw_sp_port->sample)
+ return;
+
+ mlxsw_sp_port_sample_set(mlxsw_sp_port, false, 1);
+ RCU_INIT_POINTER(mlxsw_sp_port->sample->psample_group, NULL);
+}
+
static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
__be16 protocol,
struct tc_cls_matchall_offload *cls,
@@ -1247,6 +1304,10 @@ static int mlxsw_sp_port_add_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
mirror = &mall_tc_entry->mirror;
err = mlxsw_sp_port_add_cls_matchall_mirror(mlxsw_sp_port,
mirror, a, ingress);
+ } else if (is_tcf_sample(a) && protocol == htons(ETH_P_ALL)) {
+ mall_tc_entry->type = MLXSW_SP_PORT_MALL_SAMPLE;
+ err = mlxsw_sp_port_add_cls_matchall_sample(mlxsw_sp_port, cls,
+ a, ingress);
} else {
err = -EOPNOTSUPP;
}
@@ -1280,6 +1341,9 @@ static void mlxsw_sp_port_del_cls_matchall(struct mlxsw_sp_port *mlxsw_sp_port,
mlxsw_sp_port_del_cls_matchall_mirror(mlxsw_sp_port,
&mall_tc_entry->mirror);
break;
+ case MLXSW_SP_PORT_MALL_SAMPLE:
+ mlxsw_sp_port_del_cls_matchall_sample(mlxsw_sp_port);
+ break;
default:
WARN_ON(1);
}
@@ -2258,6 +2322,13 @@ static int __mlxsw_sp_port_create(struct mlxsw_sp *mlxsw_sp, u8 local_port,
goto err_alloc_stats;
}
+ mlxsw_sp_port->sample = kzalloc(sizeof(*mlxsw_sp_port->sample),
+ GFP_KERNEL);
+ if (!mlxsw_sp_port->sample) {
+ err = -ENOMEM;
+ goto err_alloc_sample;
+ }
+
mlxsw_sp_port->hw_stats.cache =
kzalloc(sizeof(*mlxsw_sp_port->hw_stats.cache), GFP_KERNEL);
@@ -2386,6 +2457,8 @@ err_dev_addr_init:
err_port_swid_set:
kfree(mlxsw_sp_port->hw_stats.cache);
err_alloc_hw_stats:
+ kfree(mlxsw_sp_port->sample);
+err_alloc_sample:
free_percpu(mlxsw_sp_port->pcpu_stats);
err_alloc_stats:
kfree(mlxsw_sp_port->untagged_vlans);
@@ -2432,6 +2505,7 @@ static void __mlxsw_sp_port_remove(struct mlxsw_sp *mlxsw_sp, u8 local_port)
mlxsw_sp_port_swid_set(mlxsw_sp_port, MLXSW_PORT_SWID_DISABLED_PORT);
mlxsw_sp_port_module_unmap(mlxsw_sp, mlxsw_sp_port->local_port);
kfree(mlxsw_sp_port->hw_stats.cache);
+ kfree(mlxsw_sp_port->sample);
free_percpu(mlxsw_sp_port->pcpu_stats);
kfree(mlxsw_sp_port->untagged_vlans);
kfree(mlxsw_sp_port->active_vlans);
@@ -2733,6 +2807,41 @@ static void mlxsw_sp_rx_listener_mark_func(struct sk_buff *skb, u8 local_port,
return mlxsw_sp_rx_listener_no_mark_func(skb, local_port, priv);
}
+static void mlxsw_sp_rx_listener_sample_func(struct sk_buff *skb, u8 local_port,
+ void *priv)
+{
+ struct mlxsw_sp *mlxsw_sp = priv;
+ struct mlxsw_sp_port *mlxsw_sp_port = mlxsw_sp->ports[local_port];
+ struct psample_group *psample_group;
+ u32 size;
+
+ if (unlikely(!mlxsw_sp_port)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received for non-existent port\n",
+ local_port);
+ goto out;
+ }
+ if (unlikely(!mlxsw_sp_port->sample)) {
+ dev_warn_ratelimited(mlxsw_sp->bus_info->dev, "Port %d: sample skb received on unsupported port\n",
+ local_port);
+ goto out;
+ }
+
+ size = mlxsw_sp_port->sample->truncate ?
+ mlxsw_sp_port->sample->trunc_size : skb->len;
+
+ rcu_read_lock();
+ psample_group = rcu_dereference(mlxsw_sp_port->sample->psample_group);
+ if (!psample_group)
+ goto out_unlock;
+ psample_sample_packet(psample_group, skb, size,
+ mlxsw_sp_port->dev->ifindex, 0,
+ mlxsw_sp_port->sample->rate);
+out_unlock:
+ rcu_read_unlock();
+out:
+ consume_skb(skb);
+}
+
#define MLXSW_SP_RXL_NO_MARK(_trap_id, _action, _trap_group, _is_ctrl) \
MLXSW_RXL(mlxsw_sp_rx_listener_no_mark_func, _trap_id, _action, \
_is_ctrl, SP_##_trap_group, DISCARD)
@@ -2768,6 +2877,9 @@ static const struct mlxsw_listener mlxsw_sp_listener[] = {
MLXSW_SP_RXL_NO_MARK(RTR_INGRESS0, TRAP_TO_CPU, REMOTE_ROUTE, false),
MLXSW_SP_RXL_NO_MARK(HOST_MISS_IPV4, TRAP_TO_CPU, ARP_MISS, false),
MLXSW_SP_RXL_NO_MARK(BGP_IPV4, TRAP_TO_CPU, BGP_IPV4, false),
+ /* PKT Sample trap */
+ MLXSW_RXL(mlxsw_sp_rx_listener_sample_func, PKT_SAMPLE, MIRROR_TO_CPU,
+ false, SP_IP2ME, DISCARD)
};
static int mlxsw_sp_cpu_policers_set(struct mlxsw_core *mlxsw_core)
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index cc1af19..bc3efe1 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -46,6 +46,7 @@
#include <linux/dcbnl.h>
#include <linux/in6.h>
#include <linux/notifier.h>
+#include <net/psample.h>
#include "port.h"
#include "core.h"
@@ -229,6 +230,7 @@ struct mlxsw_sp_span_entry {
enum mlxsw_sp_port_mall_action_type {
MLXSW_SP_PORT_MALL_MIRROR,
+ MLXSW_SP_PORT_MALL_SAMPLE,
};
struct mlxsw_sp_port_mall_mirror_tc_entry {
@@ -315,6 +317,13 @@ struct mlxsw_sp_port_pcpu_stats {
u32 tx_dropped;
};
+struct mlxsw_sp_port_sample {
+ struct psample_group __rcu *psample_group;
+ u32 trunc_size;
+ u32 rate;
+ bool truncate;
+};
+
struct mlxsw_sp_port {
struct net_device *dev;
struct mlxsw_sp_port_pcpu_stats __percpu *pcpu_stats;
@@ -361,6 +370,7 @@ struct mlxsw_sp_port {
struct rtnl_link_stats64 *cache;
struct delayed_work update_dw;
} hw_stats;
+ struct mlxsw_sp_port_sample *sample;
};
struct mlxsw_sp_port *mlxsw_sp_port_lower_dev_hold(struct net_device *dev);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
index 01d0efa..9e494a4 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_router.c
@@ -1172,7 +1172,8 @@ static int mlxsw_sp_nexthop_mac_update(struct mlxsw_sp *mlxsw_sp, u32 adj_index,
static int
mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
- struct mlxsw_sp_nexthop_group *nh_grp)
+ struct mlxsw_sp_nexthop_group *nh_grp,
+ bool reallocate)
{
u32 adj_index = nh_grp->adj_index; /* base */
struct mlxsw_sp_nexthop *nh;
@@ -1187,7 +1188,7 @@ mlxsw_sp_nexthop_group_mac_update(struct mlxsw_sp *mlxsw_sp,
continue;
}
- if (nh->update) {
+ if (nh->update || reallocate) {
err = mlxsw_sp_nexthop_mac_update(mlxsw_sp,
adj_index, nh);
if (err)
@@ -1248,7 +1249,8 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
/* Nothing was added or removed, so no need to reallocate. Just
* update MAC on existing adjacency indexes.
*/
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp,
+ false);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
@@ -1276,7 +1278,7 @@ mlxsw_sp_nexthop_group_refresh(struct mlxsw_sp *mlxsw_sp,
nh_grp->adj_index_valid = 1;
nh_grp->adj_index = adj_index;
nh_grp->ecmp_size = ecmp_size;
- err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp);
+ err = mlxsw_sp_nexthop_group_mac_update(mlxsw_sp, nh_grp, true);
if (err) {
dev_warn(mlxsw_sp->bus_info->dev, "Failed to update neigh MAC in adjacency table.\n");
goto set_trap;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
index 696d406..169193e 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/switchx2.c
@@ -345,6 +345,7 @@ static netdev_tx_t mlxsw_sx_port_xmit(struct sk_buff *skb,
dev_kfree_skb_any(skb_orig);
return NETDEV_TX_OK;
}
+ dev_consume_skb_any(skb_orig);
}
mlxsw_sx_txhdr_construct(skb, &tx_info);
/* TX header is consumed by HW on the way so we shouldn't count its
diff --git a/drivers/net/ethernet/mellanox/mlxsw/trap.h b/drivers/net/ethernet/mellanox/mlxsw/trap.h
index 7ab275d..02ea48b 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/trap.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/trap.h
@@ -54,6 +54,7 @@ enum {
MLXSW_TRAP_ID_IGMP_V2_REPORT = 0x32,
MLXSW_TRAP_ID_IGMP_V2_LEAVE = 0x33,
MLXSW_TRAP_ID_IGMP_V3_REPORT = 0x34,
+ MLXSW_TRAP_ID_PKT_SAMPLE = 0x38,
MLXSW_TRAP_ID_ARPBC = 0x50,
MLXSW_TRAP_ID_ARPUC = 0x51,
MLXSW_TRAP_ID_MTUERROR = 0x52,
diff --git a/drivers/net/ethernet/micrel/ks8851.c b/drivers/net/ethernet/micrel/ks8851.c
index e7e1aff..955d69a 100644
--- a/drivers/net/ethernet/micrel/ks8851.c
+++ b/drivers/net/ethernet/micrel/ks8851.c
@@ -84,7 +84,6 @@ union ks8851_tx_hdr {
* @rc_ier: Cached copy of KS_IER.
* @rc_ccr: Cached copy of KS_CCR.
* @rc_rxqcr: Cached copy of KS_RXQCR.
- * @eeprom_size: Companion eeprom size in Bytes, 0 if no eeprom
* @eeprom: 93CX6 EEPROM state for accessing on-board EEPROM.
* @vdd_reg: Optional regulator supplying the chip
* @vdd_io: Optional digital power supply for IO
@@ -120,7 +119,6 @@ struct ks8851_net {
u16 rc_ier;
u16 rc_rxqcr;
u16 rc_ccr;
- u16 eeprom_size;
struct mii_if_info mii;
struct ks8851_rxctrl rxctrl;
@@ -1533,11 +1531,6 @@ static int ks8851_probe(struct spi_device *spi)
/* cache the contents of the CCR register for EEPROM, etc. */
ks->rc_ccr = ks8851_rdreg16(ks, KS_CCR);
- if (ks->rc_ccr & CCR_EEPROM)
- ks->eeprom_size = 128;
- else
- ks->eeprom_size = 0;
-
ks8851_read_selftest(ks);
ks8851_init_mac(ks);
diff --git a/drivers/net/ethernet/moxa/moxart_ether.c b/drivers/net/ethernet/moxa/moxart_ether.c
index 9774b50..06c9f41 100644
--- a/drivers/net/ethernet/moxa/moxart_ether.c
+++ b/drivers/net/ethernet/moxa/moxart_ether.c
@@ -269,7 +269,7 @@ rx_next:
}
if (rx < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx);
}
priv->reg_imr |= RPKT_FINISH_M;
@@ -436,7 +436,7 @@ static void moxart_mac_set_rx_mode(struct net_device *ndev)
spin_unlock_irq(&priv->txlock);
}
-static struct net_device_ops moxart_netdev_ops = {
+static const struct net_device_ops moxart_netdev_ops = {
.ndo_open = moxart_mac_open,
.ndo_stop = moxart_mac_stop,
.ndo_start_xmit = moxart_mac_start_xmit,
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index db297cf..3d88117 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -1678,7 +1678,7 @@ static int myri10ge_poll(struct napi_struct *napi, int budget)
myri10ge_ss_unlock_napi(ss);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
put_be32(htonl(3), ss->irq_claim);
}
return work_done;
diff --git a/drivers/net/ethernet/natsemi/natsemi.c b/drivers/net/ethernet/natsemi/natsemi.c
index 90eac63..8e72679 100644
--- a/drivers/net/ethernet/natsemi/natsemi.c
+++ b/drivers/net/ethernet/natsemi/natsemi.c
@@ -2265,7 +2265,7 @@ static int natsemi_poll(struct napi_struct *napi, int budget)
np->intr_status = readl(ioaddr + IntrStatus);
} while (np->intr_status);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
/* Reenable interrupts providing nothing is trying to shut
* the chip down. */
diff --git a/drivers/net/ethernet/neterion/s2io.c b/drivers/net/ethernet/neterion/s2io.c
index 564f682..203abcb 100644
--- a/drivers/net/ethernet/neterion/s2io.c
+++ b/drivers/net/ethernet/neterion/s2io.c
@@ -2783,7 +2783,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
s2io_chk_rx_buffers(nic, ring);
if (pkts_processed < budget_org) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts_processed);
/*Re Enable MSI-Rx Vector*/
addr = (u8 __iomem *)&bar0->xmsi_mask_reg;
addr += 7 - ring->ring_no;
@@ -2817,7 +2817,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
break;
}
if (pkts_processed < budget_org) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts_processed);
/* Re enable the Rx interrupts for the ring */
writeq(0, &bar0->rx_traffic_mask);
readl(&bar0->rx_traffic_mask);
diff --git a/drivers/net/ethernet/neterion/vxge/vxge-main.c b/drivers/net/ethernet/neterion/vxge/vxge-main.c
index f364502..6a4310a 100644
--- a/drivers/net/ethernet/neterion/vxge/vxge-main.c
+++ b/drivers/net/ethernet/neterion/vxge/vxge-main.c
@@ -1823,8 +1823,8 @@ static int vxge_poll_msix(struct napi_struct *napi, int budget)
vxge_hw_vpath_poll_rx(ring->handle);
pkts_processed = ring->pkts_processed;
- if (ring->pkts_processed < budget_org) {
- napi_complete(napi);
+ if (pkts_processed < budget_org) {
+ napi_complete_done(napi, pkts_processed);
/* Re enable the Rx interrupts for the vpath */
vxge_hw_channel_msix_unmask(
@@ -1863,7 +1863,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
VXGE_COMPLETE_ALL_TX(vdev);
if (pkts_processed < budget_org) {
- napi_complete(napi);
+ napi_complete_done(napi, pkts_processed);
/* Re enable the Rx interrupts for the ring */
vxge_hw_device_unmask_all(hldev);
vxge_hw_device_flush_io(hldev);
diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
index 67afd95..6ac43ab 100644
--- a/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
+++ b/drivers/net/ethernet/netronome/nfp/nfp_net_common.c
@@ -42,6 +42,7 @@
*/
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -1459,7 +1460,7 @@ nfp_net_rx_drop(struct nfp_net_r_vector *r_vec, struct nfp_net_rx_ring *rx_ring,
dev_kfree_skb_any(skb);
}
-static void
+static bool
nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
struct nfp_net_tx_ring *tx_ring,
struct nfp_net_rx_buf *rxbuf, unsigned int pkt_off,
@@ -1473,13 +1474,13 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
if (unlikely(nfp_net_tx_full(tx_ring, 1))) {
nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return;
+ return false;
}
new_frag = nfp_net_napi_alloc_one(nn, DMA_BIDIRECTIONAL, &new_dma_addr);
if (unlikely(!new_frag)) {
nfp_net_rx_drop(rx_ring->r_vec, rx_ring, rxbuf, NULL);
- return;
+ return false;
}
nfp_net_rx_give_one(rx_ring, new_frag, new_dma_addr);
@@ -1509,6 +1510,7 @@ nfp_net_tx_xdp_buf(struct nfp_net *nn, struct nfp_net_rx_ring *rx_ring,
tx_ring->wr_p++;
tx_ring->wr_ptr_add++;
+ return true;
}
static int nfp_net_run_xdp(struct bpf_prog *prog, void *data, unsigned int len)
@@ -1613,12 +1615,15 @@ static int nfp_net_rx(struct nfp_net_rx_ring *rx_ring, int budget)
case XDP_PASS:
break;
case XDP_TX:
- nfp_net_tx_xdp_buf(nn, rx_ring, tx_ring, rxbuf,
- pkt_off, pkt_len);
+ if (unlikely(!nfp_net_tx_xdp_buf(nn, rx_ring,
+ tx_ring, rxbuf,
+ pkt_off, pkt_len)))
+ trace_xdp_exception(nn->netdev, xdp_prog, act);
continue;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(nn->netdev, xdp_prog, act);
case XDP_DROP:
nfp_net_rx_give_one(rx_ring, rxbuf->frag,
rxbuf->dma_addr);
diff --git a/drivers/net/ethernet/nvidia/forcedeth.c b/drivers/net/ethernet/nvidia/forcedeth.c
index dfc2c81..58ba5d3 100644
--- a/drivers/net/ethernet/nvidia/forcedeth.c
+++ b/drivers/net/ethernet/nvidia/forcedeth.c
@@ -3749,7 +3749,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
if (rx_work < budget) {
/* re-enable interrupts
(msix not enabled in napi) */
- napi_complete(napi);
+ napi_complete_done(napi, rx_work);
writel(np->irqmask, base + NvRegIrqMask);
}
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index dd6b0d0..9c7ffd6 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -999,7 +999,7 @@ static int lpc_eth_poll(struct napi_struct *napi, int budget)
rx_done = __lpc_handle_recv(ndev, budget);
if (rx_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
lpc_eth_enable_int(pldat->net_base);
}
diff --git a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
index d461f41..f9e4e8e 100644
--- a/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/ethernet/oki-semi/pch_gbe/pch_gbe_main.c
@@ -2385,7 +2385,7 @@ static int pch_gbe_napi_poll(struct napi_struct *napi, int budget)
poll_end_flag = true;
if (poll_end_flag) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
pch_gbe_irq_enable(adapter);
}
diff --git a/drivers/net/ethernet/pasemi/pasemi_mac.c b/drivers/net/ethernet/pasemi/pasemi_mac.c
index badfa1d..49591d9 100644
--- a/drivers/net/ethernet/pasemi/pasemi_mac.c
+++ b/drivers/net/ethernet/pasemi/pasemi_mac.c
@@ -1575,7 +1575,7 @@ static int pasemi_mac_poll(struct napi_struct *napi, int budget)
pkts = pasemi_mac_clean_rx(rx_ring(mac), budget);
if (pkts < budget) {
/* all done, no more packets present */
- napi_complete(napi);
+ napi_complete_done(napi, pkts);
pasemi_mac_restart_rx_intr(mac);
pasemi_mac_restart_tx_intr(mac);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 86fb9d3..0cf8a37 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2396,7 +2396,7 @@ static int netxen_nic_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__NX_DEV_UP, &adapter->state))
netxen_nic_enable_int(sds_ring);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index c92a850..7520eb3 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -1846,7 +1846,7 @@ static int qed_fill_eth_dev_info(struct qed_dev *cdev,
qed_fill_dev_info(cdev, &info->common);
if (IS_VF(cdev))
- memset(info->common.hw_mac, 0, ETH_ALEN);
+ eth_zero_addr(info->common.hw_mac);
return 0;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 05e32f4..02c5d47 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -320,7 +320,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_del(&p_pkt->list_entry);
b_last_packet = list_empty(&p_tx->active_descq);
list_add_tail(&p_pkt->list_entry, &p_tx->free_descq);
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -332,7 +332,7 @@ static void qed_ll2_txq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
b_last_frag =
p_tx->cur_completing_bd_idx == p_pkt->bd_used;
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
+ if (p_ll2_conn->conn.gsi_enable)
qed_ll2b_release_tx_gsi_packet(p_hwfn,
p_ll2_conn->
my_id,
@@ -401,7 +401,7 @@ static int qed_ll2_txq_completion(struct qed_hwfn *p_hwfn, void *p_cookie)
spin_unlock_irqrestore(&p_tx->lock, flags);
tx_frag = p_pkt->bds_set[0].tx_frag;
- if (p_ll2_conn->gsi_enable)
+ if (p_ll2_conn->conn.gsi_enable)
qed_ll2b_complete_tx_gsi_packet(p_hwfn,
p_ll2_conn->my_id,
p_pkt->cookie,
@@ -573,7 +573,7 @@ static void qed_ll2_rxq_flush(struct qed_hwfn *p_hwfn, u8 connection_handle)
list_move_tail(&p_pkt->list_entry, &p_rx->free_descq);
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO) {
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO) {
struct qed_ooo_buffer *p_buffer;
p_buffer = (struct qed_ooo_buffer *)p_pkt->cookie;
@@ -761,7 +761,7 @@ qed_ooo_submit_tx_buffers(struct qed_hwfn *p_hwfn,
rc = qed_ll2_prepare_tx_packet(p_hwfn, p_ll2_conn->my_id, 1,
p_buffer->vlan, bd_flags,
l4_hdr_offset_w,
- p_ll2_conn->tx_dest, 0,
+ p_ll2_conn->conn.tx_dest, 0,
first_frag,
p_buffer->packet_length,
p_buffer, true);
@@ -881,7 +881,7 @@ qed_ll2_acquire_connection_ooo(struct qed_hwfn *p_hwfn,
u16 buf_idx;
int rc = 0;
- if (p_ll2_info->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_info->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return rc;
if (!rx_num_ooo_buffers)
@@ -924,7 +924,7 @@ static void
qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -936,7 +936,7 @@ static void qed_ll2_release_connection_ooo(struct qed_hwfn *p_hwfn,
{
struct qed_ooo_buffer *p_buffer;
- if (p_ll2_conn->conn_type != QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type != QED_LL2_TYPE_ISCSI_OOO)
return;
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
@@ -968,23 +968,19 @@ static int qed_ll2_start_ooo(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
u8 *handle = &hwfn->pf_params.iscsi_pf_params.ll2_ooo_queue_id;
- struct qed_ll2_info *ll2_info;
+ struct qed_ll2_conn ll2_info;
int rc;
- ll2_info = kzalloc(sizeof(*ll2_info), GFP_KERNEL);
- if (!ll2_info)
- return -ENOMEM;
- ll2_info->conn_type = QED_LL2_TYPE_ISCSI_OOO;
- ll2_info->mtu = params->mtu;
- ll2_info->rx_drop_ttl0_flg = params->drop_ttl0_packets;
- ll2_info->rx_vlan_removal_en = params->rx_vlan_stripping;
- ll2_info->tx_tc = OOO_LB_TC;
- ll2_info->tx_dest = CORE_TX_DEST_LB;
-
- rc = qed_ll2_acquire_connection(hwfn, ll2_info,
+ ll2_info.conn_type = QED_LL2_TYPE_ISCSI_OOO;
+ ll2_info.mtu = params->mtu;
+ ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
+ ll2_info.rx_vlan_removal_en = params->rx_vlan_stripping;
+ ll2_info.tx_tc = OOO_LB_TC;
+ ll2_info.tx_dest = CORE_TX_DEST_LB;
+
+ rc = qed_ll2_acquire_connection(hwfn, &ll2_info,
QED_LL2_RX_SIZE, QED_LL2_TX_SIZE,
handle);
- kfree(ll2_info);
if (rc) {
DP_INFO(cdev, "Failed to acquire LL2 OOO connection\n");
goto out;
@@ -1029,7 +1025,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn,
u8 action_on_error)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
struct qed_ll2_rx_queue *p_rx = &p_ll2_conn->rx_queue;
struct core_rx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1055,7 +1051,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
DMA_REGPAIR_LE(p_ramrod->bd_base,
p_rx->rxq_chain.p_phys_addr);
cqe_pbl_size = (u16)qed_chain_get_page_cnt(&p_rx->rcq_chain);
@@ -1063,8 +1059,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr,
qed_chain_get_pbl_phys(&p_rx->rcq_chain));
- p_ramrod->drop_ttl0_flg = p_ll2_conn->rx_drop_ttl0_flg;
- p_ramrod->inner_vlan_removal_en = p_ll2_conn->rx_vlan_removal_en;
+ p_ramrod->drop_ttl0_flg = p_ll2_conn->conn.rx_drop_ttl0_flg;
+ p_ramrod->inner_vlan_removal_en = p_ll2_conn->conn.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = (conn_type == QED_LL2_TYPE_ISCSI_OOO) ? 0
: 1;
@@ -1079,14 +1075,14 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
}
p_ramrod->action_on_error.error_type = action_on_error;
- p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
struct qed_ll2_info *p_ll2_conn)
{
- enum qed_ll2_conn_type conn_type = p_ll2_conn->conn_type;
+ enum qed_ll2_conn_type conn_type = p_ll2_conn->conn.conn_type;
struct qed_ll2_tx_queue *p_tx = &p_ll2_conn->tx_queue;
struct core_tx_start_ramrod_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
@@ -1098,7 +1094,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
if (!QED_LL2_TX_REGISTERED(p_ll2_conn))
return 0;
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
p_ll2_conn->tx_stats_en = 0;
else
p_ll2_conn->tx_stats_en = 1;
@@ -1119,7 +1115,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_tx->tx_sb_index;
- p_ramrod->mtu = cpu_to_le16(p_ll2_conn->mtu);
+ p_ramrod->mtu = cpu_to_le16(p_ll2_conn->conn.mtu);
p_ramrod->stats_en = p_ll2_conn->tx_stats_en;
p_ramrod->stats_id = p_ll2_conn->tx_stats_id;
@@ -1129,7 +1125,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->pbl_size = cpu_to_le16(pbl_size);
memset(&pq_params, 0, sizeof(pq_params));
- pq_params.core.tc = p_ll2_conn->tx_tc;
+ pq_params.core.tc = p_ll2_conn->conn.tx_tc;
pq_id = qed_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params);
p_ramrod->qm_pq_id = cpu_to_le16(pq_id);
@@ -1146,7 +1142,7 @@ static int qed_sp_ll2_tx_queue_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unknown connection type: %d\n", conn_type);
}
- p_ramrod->gsi_offload_flag = p_ll2_conn->gsi_enable;
+ p_ramrod->gsi_offload_flag = p_ll2_conn->conn.gsi_enable;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
@@ -1247,7 +1243,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Rxq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn_type, rx_num_desc);
+ p_ll2_info->conn.conn_type, rx_num_desc);
out:
return rc;
@@ -1285,7 +1281,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Allocated LL2 Txq [Type %08x] with 0x%08x buffers\n",
- p_ll2_info->conn_type, tx_num_desc);
+ p_ll2_info->conn.conn_type, tx_num_desc);
out:
if (rc)
@@ -1296,7 +1292,7 @@ out:
}
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_params,
+ struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle)
@@ -1325,15 +1321,7 @@ int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
if (!p_ll2_info)
return -EBUSY;
- p_ll2_info->conn_type = p_params->conn_type;
- p_ll2_info->mtu = p_params->mtu;
- p_ll2_info->rx_drop_ttl0_flg = p_params->rx_drop_ttl0_flg;
- p_ll2_info->rx_vlan_removal_en = p_params->rx_vlan_removal_en;
- p_ll2_info->tx_tc = p_params->tx_tc;
- p_ll2_info->tx_dest = p_params->tx_dest;
- p_ll2_info->ai_err_packet_too_big = p_params->ai_err_packet_too_big;
- p_ll2_info->ai_err_no_buf = p_params->ai_err_no_buf;
- p_ll2_info->gsi_enable = p_params->gsi_enable;
+ p_ll2_info->conn = *p_params;
rc = qed_ll2_acquire_connection_rx(p_hwfn, p_ll2_info, rx_num_desc);
if (rc)
@@ -1394,9 +1382,9 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
SET_FIELD(action_on_error,
CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG,
- p_ll2_conn->ai_err_packet_too_big);
+ p_ll2_conn->conn.ai_err_packet_too_big);
SET_FIELD(action_on_error,
- CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->ai_err_no_buf);
+ CORE_RX_ACTION_ON_ERROR_NO_BUFF, p_ll2_conn->conn.ai_err_no_buf);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
}
@@ -1623,7 +1611,7 @@ static void qed_ll2_prepare_tx_packet_set_bd(struct qed_hwfn *p_hwfn,
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Tx Producer at [0x%04x] - set with a %04x bytes %02x BDs buffer at %08x:%08x\n",
p_ll2->queue_id,
p_ll2->cid,
- p_ll2->conn_type,
+ p_ll2->conn.conn_type,
prod_idx,
first_frag_len,
num_of_bds,
@@ -1699,7 +1687,7 @@ static void qed_ll2_tx_packet_notify(struct qed_hwfn *p_hwfn,
(NETIF_MSG_TX_QUEUED | QED_MSG_LL2),
"LL2 [q 0x%02x cid 0x%08x type 0x%08x] Doorbelled [producer 0x%04x]\n",
p_ll2_conn->queue_id,
- p_ll2_conn->cid, p_ll2_conn->conn_type, db_msg.spq_prod);
+ p_ll2_conn->cid, p_ll2_conn->conn.conn_type, db_msg.spq_prod);
}
int qed_ll2_prepare_tx_packet(struct qed_hwfn *p_hwfn,
@@ -1840,7 +1828,7 @@ int qed_ll2_terminate_connection(struct qed_hwfn *p_hwfn, u8 connection_handle)
qed_ll2_rxq_flush(p_hwfn, connection_handle);
}
- if (p_ll2_conn->conn_type == QED_LL2_TYPE_ISCSI_OOO)
+ if (p_ll2_conn->conn.conn_type == QED_LL2_TYPE_ISCSI_OOO)
qed_ooo_release_all_isles(p_hwfn, p_hwfn->p_ooo_info);
return rc;
@@ -2016,7 +2004,7 @@ static void qed_ll2_register_cb_ops(struct qed_dev *cdev,
static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
{
- struct qed_ll2_info ll2_info;
+ struct qed_ll2_conn ll2_info;
struct qed_ll2_buffer *buffer, *tmp_buffer;
enum qed_ll2_conn_type conn_type;
struct qed_ptt *p_ptt;
@@ -2064,6 +2052,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
/* Prepare the temporary ll2 information */
memset(&ll2_info, 0, sizeof(ll2_info));
+
ll2_info.conn_type = conn_type;
ll2_info.mtu = params->mtu;
ll2_info.rx_drop_ttl0_flg = params->drop_ttl0_packets;
@@ -2143,7 +2132,6 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
}
ether_addr_copy(cdev->ll2_mac_address, params->ll2_mac_address);
-
return 0;
release_terminate_all:
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.h b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
index c7f2975..db3e4fc 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.h
@@ -135,15 +135,8 @@ struct qed_ll2_tx_queue {
bool b_completing_packet;
};
-struct qed_ll2_info {
- /* Lock protecting the state of LL2 */
- struct mutex mutex;
+struct qed_ll2_conn {
enum qed_ll2_conn_type conn_type;
- u32 cid;
- u8 my_id;
- u8 queue_id;
- u8 tx_stats_id;
- bool b_active;
u16 mtu;
u8 rx_drop_ttl0_flg;
u8 rx_vlan_removal_en;
@@ -151,10 +144,21 @@ struct qed_ll2_info {
enum core_tx_dest tx_dest;
enum core_error_handle ai_err_packet_too_big;
enum core_error_handle ai_err_no_buf;
+ u8 gsi_enable;
+};
+
+struct qed_ll2_info {
+ /* Lock protecting the state of LL2 */
+ struct mutex mutex;
+ struct qed_ll2_conn conn;
+ u32 cid;
+ u8 my_id;
+ u8 queue_id;
+ u8 tx_stats_id;
+ bool b_active;
u8 tx_stats_en;
struct qed_ll2_rx_queue rx_queue;
struct qed_ll2_tx_queue tx_queue;
- u8 gsi_enable;
};
/**
@@ -172,7 +176,7 @@ struct qed_ll2_info {
* @return 0 on success, failure otherwise
*/
int qed_ll2_acquire_connection(struct qed_hwfn *p_hwfn,
- struct qed_ll2_info *p_params,
+ struct qed_ll2_conn *p_params,
u16 rx_num_desc,
u16 tx_num_desc,
u8 *p_connection_handle);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_roce.c b/drivers/net/ethernet/qlogic/qed/qed_roce.c
index bd4cad2..c3c8c50 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_roce.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_roce.c
@@ -2632,7 +2632,7 @@ static int qed_roce_ll2_start(struct qed_dev *cdev,
{
struct qed_hwfn *hwfn = QED_LEADING_HWFN(cdev);
struct qed_roce_ll2_info *roce_ll2;
- struct qed_ll2_info ll2_params;
+ struct qed_ll2_conn ll2_params;
int rc;
if (!params) {
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index b121364..3f4bf31 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1224,7 +1224,7 @@ static void qed_iov_clean_vf(struct qed_hwfn *p_hwfn, u8 vfid)
return;
/* Clear the VF mac */
- memset(vf_info->mac, 0, ETH_ALEN);
+ eth_zero_addr(vf_info->mac);
vf_info->rx_accept_mode = 0;
vf_info->tx_accept_mode = 0;
@@ -2626,8 +2626,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++) {
if (ether_addr_equal(p_vf->shadow_config.macs[i],
p_params->mac)) {
- memset(p_vf->shadow_config.macs[i], 0,
- ETH_ALEN);
+ eth_zero_addr(p_vf->shadow_config.macs[i]);
break;
}
}
@@ -2640,7 +2639,7 @@ static int qed_iov_vf_update_mac_shadow(struct qed_hwfn *p_hwfn,
} else if (p_params->opcode == QED_FILTER_REPLACE ||
p_params->opcode == QED_FILTER_FLUSH) {
for (i = 0; i < QED_ETH_VF_NUM_MAC_FILTERS; i++)
- memset(p_vf->shadow_config.macs[i], 0, ETH_ALEN);
+ eth_zero_addr(p_vf->shadow_config.macs[i]);
}
/* List the new MAC address */
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a6ca48..26848ee 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -32,6 +32,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
+#include <linux/bpf_trace.h>
#include <net/udp_tunnel.h>
#include <linux/ip.h>
#include <net/ipv6.h>
@@ -1016,6 +1017,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
/* We need the replacement buffer before transmit. */
if (qede_alloc_rx_buffer(rxq, true)) {
qede_recycle_rx_bd_ring(rxq, 1);
+ trace_xdp_exception(edev->ndev, prog, act);
return false;
}
@@ -1026,6 +1028,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
dma_unmap_page(rxq->dev, bd->mapping,
PAGE_SIZE, DMA_BIDIRECTIONAL);
__free_page(bd->data);
+ trace_xdp_exception(edev->ndev, prog, act);
}
/* Regardless, we've consumed an Rx BD */
@@ -1035,6 +1038,7 @@ static bool qede_rx_xdp(struct qede_dev *edev,
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(edev->ndev, prog, act);
case XDP_DROP:
qede_recycle_rx_bd_ring(rxq, cqe->bd_num);
}
@@ -1368,7 +1372,7 @@ int qede_poll(struct napi_struct *napi, int budget)
qede_rx_int(fp, budget) : 0;
if (rx_work_done < budget) {
if (!qede_poll_is_more_work(fp)) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_work_done);
/* Update and reenable interrupts */
qed_sb_ack(fp->sb_info, IGU_INT_ENABLE, 1);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index fedd736..84dd830 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -975,7 +975,7 @@ static int qlcnic_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
qlcnic_enable_sds_intr(adapter, sds_ring);
qlcnic_enable_tx_intr(adapter, tx_ring);
@@ -1019,7 +1019,7 @@ static int qlcnic_rx_poll(struct napi_struct *napi, int budget)
work_done = qlcnic_process_rcv_ring(sds_ring, budget);
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1966,7 +1966,7 @@ static int qlcnic_83xx_msix_sriov_vf_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -1994,7 +1994,7 @@ static int qlcnic_83xx_poll(struct napi_struct *napi, int budget)
work_done = budget;
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
qlcnic_enable_sds_intr(adapter, sds_ring);
}
@@ -2032,7 +2032,7 @@ static int qlcnic_83xx_rx_poll(struct napi_struct *napi, int budget)
adapter = sds_ring->adapter;
work_done = qlcnic_83xx_process_rcv_ring(sds_ring, budget);
if (work_done < budget) {
- napi_complete(&sds_ring->napi);
+ napi_complete_done(&sds_ring->napi, work_done);
if (test_bit(__QLCNIC_DEV_UP, &adapter->state))
qlcnic_enable_sds_intr(adapter, sds_ring);
}
diff --git a/drivers/net/ethernet/qlogic/qlge/qlge_main.c b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
index 1409412..e9e6470 100644
--- a/drivers/net/ethernet/qlogic/qlge/qlge_main.c
+++ b/drivers/net/ethernet/qlogic/qlge/qlge_main.c
@@ -2334,7 +2334,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
ql_enable_completion_interrupt(qdev, rx_ring->irq);
}
return work_done;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.c b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
index 0b4deb3..b991219 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.c
@@ -103,14 +103,6 @@
#define RXEN 0x00000002
#define TXEN 0x00000001
-
-/* EMAC_WOL_CTRL0 */
-#define LK_CHG_PME 0x20
-#define LK_CHG_EN 0x10
-#define MG_FRAME_PME 0x8
-#define MG_FRAME_EN 0x4
-#define WK_FRAME_EN 0x1
-
/* EMAC_DESC_CTRL_3 */
#define RFD_RING_SIZE_BMSK 0xfff
@@ -314,8 +306,6 @@ struct emac_skb_cb {
RX_PKT_INT2 |\
RX_PKT_INT3)
-#define EMAC_MAC_IRQ_RES "core0"
-
void emac_mac_multicast_addr_set(struct emac_adapter *adpt, u8 *addr)
{
u32 crc32, bit, reg, mta;
@@ -558,7 +548,7 @@ void emac_mac_reset(struct emac_adapter *adpt)
emac_reg_update32(adpt->base + EMAC_DMA_MAS_CTRL, 0, INT_RD_CLR_EN);
}
-void emac_mac_start(struct emac_adapter *adpt)
+static void emac_mac_start(struct emac_adapter *adpt)
{
struct phy_device *phydev = adpt->phydev;
u32 mac, csr1;
@@ -621,8 +611,6 @@ void emac_mac_start(struct emac_adapter *adpt)
emac_reg_update32(adpt->base + EMAC_ATHR_HEADER_CTRL,
(HEADER_ENABLE | HEADER_CNT_EN), 0);
-
- emac_reg_update32(adpt->csr + EMAC_EMAC_WRAPPER_CSR2, 0, WOL_EN);
}
void emac_mac_stop(struct emac_adapter *adpt)
@@ -963,12 +951,16 @@ static void emac_mac_rx_descs_refill(struct emac_adapter *adpt,
static void emac_adjust_link(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_sgmii *sgmii = &adpt->phy;
struct phy_device *phydev = netdev->phydev;
- if (phydev->link)
+ if (phydev->link) {
emac_mac_start(adpt);
- else
+ sgmii->link_up(adpt);
+ } else {
+ sgmii->link_down(adpt);
emac_mac_stop(adpt);
+ }
phy_print_status(phydev);
}
@@ -977,40 +969,26 @@ static void emac_adjust_link(struct net_device *netdev)
int emac_mac_up(struct emac_adapter *adpt)
{
struct net_device *netdev = adpt->netdev;
- struct emac_irq *irq = &adpt->irq;
int ret;
emac_mac_rx_tx_ring_reset_all(adpt);
emac_mac_config(adpt);
-
- ret = request_irq(irq->irq, emac_isr, 0, EMAC_MAC_IRQ_RES, irq);
- if (ret) {
- netdev_err(adpt->netdev, "could not request %s irq\n",
- EMAC_MAC_IRQ_RES);
- return ret;
- }
-
emac_mac_rx_descs_refill(adpt, &adpt->rx_q);
+ adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
ret = phy_connect_direct(netdev, adpt->phydev, emac_adjust_link,
PHY_INTERFACE_MODE_SGMII);
if (ret) {
netdev_err(adpt->netdev, "could not connect phy\n");
- free_irq(irq->irq, irq);
return ret;
}
+ phy_attached_print(adpt->phydev, NULL);
+
/* enable mac irq */
writel((u32)~DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(adpt->irq.mask, adpt->base + EMAC_INT_MASK);
- /* Enable pause frames. Without this feature, the EMAC has been shown
- * to receive (and drop) frames with FCS errors at gigabit connections.
- */
- adpt->phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
- adpt->phydev->advertising |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
-
- adpt->phydev->irq = PHY_IGNORE_INTERRUPT;
phy_start(adpt->phydev);
napi_enable(&adpt->rx_q.napi);
@@ -1036,7 +1014,6 @@ void emac_mac_down(struct emac_adapter *adpt)
writel(DIS_INT, adpt->base + EMAC_INT_STATUS);
writel(0, adpt->base + EMAC_INT_MASK);
synchronize_irq(adpt->irq.irq);
- free_irq(adpt->irq.irq, &adpt->irq);
phy_disconnect(adpt->phydev);
@@ -1213,7 +1190,6 @@ void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
emac_receive_skb(rx_q, skb, (u16)RRD_CVALN_TAG(&rrd),
(bool)RRD_CVTAG(&rrd));
- netdev->last_rx = jiffies;
(*num_pkts)++;
} while (*num_pkts < max_pkts);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-mac.h b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
index f3aa24d..5028fb4 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-mac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-mac.h
@@ -230,7 +230,6 @@ struct emac_adapter;
int emac_mac_up(struct emac_adapter *adpt);
void emac_mac_down(struct emac_adapter *adpt);
void emac_mac_reset(struct emac_adapter *adpt);
-void emac_mac_start(struct emac_adapter *adpt);
void emac_mac_stop(struct emac_adapter *adpt);
void emac_mac_mode_config(struct emac_adapter *adpt);
void emac_mac_rx_process(struct emac_adapter *adpt, struct emac_rx_queue *rx_q,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.c b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
index 99a14df..441c1936 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.c
@@ -22,8 +22,6 @@
#include <linux/acpi.h>
#include "emac.h"
#include "emac-mac.h"
-#include "emac-phy.h"
-#include "emac-sgmii.h"
/* EMAC base register offsets */
#define EMAC_MDIO_CTRL 0x001414
@@ -201,6 +199,13 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
else
adpt->phydev = mdiobus_get_phy(mii_bus, phy_addr);
+ /* of_phy_find_device() claims a reference to the phydev,
+ * so we do that here manually as well. When the driver
+ * later unloads, it can unilaterally drop the reference
+ * without worrying about ACPI vs DT.
+ */
+ if (adpt->phydev)
+ get_device(&adpt->phydev->mdio.dev);
} else {
struct device_node *phy_np;
@@ -221,8 +226,5 @@ int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt)
return -ENODEV;
}
- if (adpt->phydev->drv)
- phy_attached_print(adpt->phydev, NULL);
-
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-phy.h b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
index 49f3701..c0c301c 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-phy.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-phy.h
@@ -13,19 +13,6 @@
#ifndef _EMAC_PHY_H_
#define _EMAC_PHY_H_
-typedef int (*emac_sgmii_initialize)(struct emac_adapter *adpt);
-
-/** emac_phy - internal emac phy
- * @base base address
- * @digital per-lane digital block
- * @initialize initialization function
- */
-struct emac_phy {
- void __iomem *base;
- void __iomem *digital;
- emac_sgmii_initialize initialize;
-};
-
struct emac_adapter;
int emac_phy_config(struct platform_device *pdev, struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
index af690e1..10de8d0 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-fsm9900.c
@@ -214,7 +214,7 @@ static const struct emac_reg_write tx_rx_setting[] = {
int emac_sgmii_init_fsm9900(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
unsigned int i;
emac_reg_write_all(phy->base, physical_coding_sublayer_programming,
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
index 5b84194..f62c215 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2400.c
@@ -174,7 +174,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = {
int emac_sgmii_init_qdf2400(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
void __iomem *phy_regs = phy->base;
void __iomem *laned = phy->digital;
unsigned int i;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
index 6170200..b9c0df7 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii-qdf2432.c
@@ -167,7 +167,7 @@ static const struct emac_reg_write physical_coding_sublayer_programming[] = {
int emac_sgmii_init_qdf2432(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
void __iomem *phy_regs = phy->base;
void __iomem *laned = phy->digital;
unsigned int i;
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
index bf722a9..040b289 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.c
@@ -25,7 +25,9 @@
#define EMAC_SGMII_PHY_SPEED_CFG1 0x0074
#define EMAC_SGMII_PHY_IRQ_CMD 0x00ac
#define EMAC_SGMII_PHY_INTERRUPT_CLEAR 0x00b0
+#define EMAC_SGMII_PHY_INTERRUPT_MASK 0x00b4
#define EMAC_SGMII_PHY_INTERRUPT_STATUS 0x00b8
+#define EMAC_SGMII_PHY_RX_CHK_STATUS 0x00d4
#define FORCE_AN_TX_CFG BIT(5)
#define FORCE_AN_RX_CFG BIT(4)
@@ -36,6 +38,8 @@
#define SPDMODE_100 BIT(0)
#define SPDMODE_10 0
+#define CDR_ALIGN_DET BIT(6)
+
#define IRQ_GLOBAL_CLEAR BIT(0)
#define DECODE_CODE_ERR BIT(7)
@@ -44,52 +48,28 @@
#define SGMII_PHY_IRQ_CLR_WAIT_TIME 10
#define SGMII_PHY_INTERRUPT_ERR (DECODE_CODE_ERR | DECODE_DISP_ERR)
+#define SGMII_ISR_MASK (SGMII_PHY_INTERRUPT_ERR)
#define SERDES_START_WAIT_TIMES 100
-static int emac_sgmii_link_init(struct emac_adapter *adpt)
+/* Initialize the SGMII link between the internal and external PHYs. */
+static void emac_sgmii_link_init(struct emac_adapter *adpt)
{
- struct phy_device *phydev = adpt->phydev;
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
u32 val;
+ /* Always use autonegotiation. It works no matter how the external
+ * PHY is configured.
+ */
val = readl(phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
-
- if (phydev->autoneg == AUTONEG_ENABLE) {
- val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
- val |= AN_ENABLE;
- writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
- } else {
- u32 speed_cfg;
-
- switch (phydev->speed) {
- case SPEED_10:
- speed_cfg = SPDMODE_10;
- break;
- case SPEED_100:
- speed_cfg = SPDMODE_100;
- break;
- case SPEED_1000:
- speed_cfg = SPDMODE_1000;
- break;
- default:
- return -EINVAL;
- }
-
- if (phydev->duplex == DUPLEX_FULL)
- speed_cfg |= DUPLEX_MODE;
-
- val &= ~AN_ENABLE;
- writel(speed_cfg, phy->base + EMAC_SGMII_PHY_SPEED_CFG1);
- writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
- }
-
- return 0;
+ val &= ~(FORCE_AN_RX_CFG | FORCE_AN_TX_CFG);
+ val |= AN_ENABLE;
+ writel(val, phy->base + EMAC_SGMII_PHY_AUTONEG_CFG2);
}
static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
u32 status;
writel_relaxed(irq_bits, phy->base + EMAC_SGMII_PHY_INTERRUPT_CLEAR);
@@ -121,9 +101,54 @@ static int emac_sgmii_irq_clear(struct emac_adapter *adpt, u32 irq_bits)
return 0;
}
+/* The number of decode errors that triggers a reset */
+#define DECODE_ERROR_LIMIT 2
+
+static irqreturn_t emac_sgmii_interrupt(int irq, void *data)
+{
+ struct emac_adapter *adpt = data;
+ struct emac_sgmii *phy = &adpt->phy;
+ u32 status;
+
+ status = readl(phy->base + EMAC_SGMII_PHY_INTERRUPT_STATUS);
+ status &= SGMII_ISR_MASK;
+ if (!status)
+ return IRQ_HANDLED;
+
+ /* If we get a decoding error and CDR is not locked, then try
+ * resetting the internal PHY. The internal PHY uses an embedded
+ * clock with Clock and Data Recovery (CDR) to recover the
+ * clock and data.
+ */
+ if (status & SGMII_PHY_INTERRUPT_ERR) {
+ int count;
+
+ /* The SGMII is capable of recovering from some decode
+ * errors automatically. However, if we get multiple
+ * decode errors in a row, then assume that something
+ * is wrong and reset the interface.
+ */
+ count = atomic_inc_return(&phy->decode_error_count);
+ if (count == DECODE_ERROR_LIMIT) {
+ schedule_work(&adpt->work_thread);
+ atomic_set(&phy->decode_error_count, 0);
+ }
+ } else {
+ /* We only care about consecutive decode errors. */
+ atomic_set(&phy->decode_error_count, 0);
+ }
+
+ if (emac_sgmii_irq_clear(adpt, status)) {
+ netdev_warn(adpt->netdev, "failed to clear SGMII interrupt\n");
+ schedule_work(&adpt->work_thread);
+ }
+
+ return IRQ_HANDLED;
+}
+
static void emac_sgmii_reset_prepare(struct emac_adapter *adpt)
{
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
u32 val;
/* Reset PHY */
@@ -145,12 +170,7 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
int ret;
emac_sgmii_reset_prepare(adpt);
-
- ret = emac_sgmii_link_init(adpt);
- if (ret) {
- netdev_err(adpt->netdev, "unsupported link speed\n");
- return;
- }
+ emac_sgmii_link_init(adpt);
ret = adpt->phy.initialize(adpt);
if (ret)
@@ -159,6 +179,68 @@ void emac_sgmii_reset(struct emac_adapter *adpt)
ret);
}
+static int emac_sgmii_open(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+ int ret;
+
+ if (sgmii->irq) {
+ /* Make sure interrupts are cleared and disabled first */
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
+ if (ret)
+ return ret;
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ ret = request_irq(sgmii->irq, emac_sgmii_interrupt, 0,
+ "emac-sgmii", adpt);
+ if (ret) {
+ netdev_err(adpt->netdev,
+ "could not register handler for internal PHY\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int emac_sgmii_close(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+
+ /* Make sure interrupts are disabled */
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ free_irq(sgmii->irq, adpt);
+
+ return 0;
+}
+
+/* The error interrupts are only valid after the link is up */
+static int emac_sgmii_link_up(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+ int ret;
+
+ /* Clear and enable interrupts */
+ ret = emac_sgmii_irq_clear(adpt, 0xff);
+ if (ret)
+ return ret;
+
+ writel(SGMII_ISR_MASK, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+
+ return 0;
+}
+
+static int emac_sgmii_link_down(struct emac_adapter *adpt)
+{
+ struct emac_sgmii *sgmii = &adpt->phy;
+
+ /* Disable interrupts */
+ writel(0, sgmii->base + EMAC_SGMII_PHY_INTERRUPT_MASK);
+ synchronize_irq(sgmii->irq);
+
+ return 0;
+}
+
static int emac_sgmii_acpi_match(struct device *dev, void *data)
{
#ifdef CONFIG_ACPI
@@ -169,7 +251,7 @@ static int emac_sgmii_acpi_match(struct device *dev, void *data)
{}
};
const struct acpi_device_id *id = acpi_match_device(match_table, dev);
- emac_sgmii_initialize *initialize = data;
+ emac_sgmii_function *initialize = data;
if (id) {
acpi_handle handle = ACPI_HANDLE(dev);
@@ -217,7 +299,7 @@ static const struct of_device_id emac_sgmii_dt_match[] = {
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
{
struct platform_device *sgmii_pdev = NULL;
- struct emac_phy *phy = &adpt->phy;
+ struct emac_sgmii *phy = &adpt->phy;
struct resource *res;
int ret;
@@ -256,9 +338,14 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
goto error_put_device;
}
- phy->initialize = (emac_sgmii_initialize)match->data;
+ phy->initialize = (emac_sgmii_function)match->data;
}
+ phy->open = emac_sgmii_open;
+ phy->close = emac_sgmii_close;
+ phy->link_up = emac_sgmii_link_up;
+ phy->link_down = emac_sgmii_link_down;
+
/* Base address is the first address */
res = platform_get_resource(sgmii_pdev, IORESOURCE_MEM, 0);
if (!res) {
@@ -286,7 +373,11 @@ int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt)
if (ret)
goto error;
- emac_sgmii_irq_clear(adpt, SGMII_PHY_INTERRUPT_ERR);
+ emac_sgmii_link_init(adpt);
+
+ ret = platform_get_irq(sgmii_pdev, 0);
+ if (ret > 0)
+ phy->irq = ret;
/* We've remapped the addresses, so we don't need the device any
* more. of_find_device_by_node() says we should release it.
diff --git a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
index 80ed3dc..e7c0c3b 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac-sgmii.h
@@ -16,6 +16,31 @@
struct emac_adapter;
struct platform_device;
+typedef int (*emac_sgmii_function)(struct emac_adapter *adpt);
+
+/** emac_sgmii - internal emac phy
+ * @base base address
+ * @digital per-lane digital block
+ * @irq the interrupt number
+ * @decode_error_count reference count of consecutive decode errors
+ * @initialize initialization function
+ * @open called when the driver is opened
+ * @close called when the driver is closed
+ * @link_up called when the link comes up
+ * @link_down called when the link comes down
+ */
+struct emac_sgmii {
+ void __iomem *base;
+ void __iomem *digital;
+ unsigned int irq;
+ atomic_t decode_error_count;
+ emac_sgmii_function initialize;
+ emac_sgmii_function open;
+ emac_sgmii_function close;
+ emac_sgmii_function link_up;
+ emac_sgmii_function link_down;
+};
+
int emac_sgmii_config(struct platform_device *pdev, struct emac_adapter *adpt);
void emac_sgmii_reset(struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.c b/drivers/net/ethernet/qualcomm/emac/emac.c
index 6ffe192..3387c0a 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.c
+++ b/drivers/net/ethernet/qualcomm/emac/emac.c
@@ -129,7 +129,7 @@ static int emac_napi_rtx(struct napi_struct *napi, int budget)
emac_mac_rx_process(adpt, rx_q, &work_done, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
irq->mask |= rx_q->intr;
writel(irq->mask, adpt->base + EMAC_INT_MASK);
@@ -256,22 +256,37 @@ static int emac_change_mtu(struct net_device *netdev, int new_mtu)
static int emac_open(struct net_device *netdev)
{
struct emac_adapter *adpt = netdev_priv(netdev);
+ struct emac_irq *irq = &adpt->irq;
int ret;
+ ret = request_irq(irq->irq, emac_isr, 0, "emac-core0", irq);
+ if (ret) {
+ netdev_err(adpt->netdev, "could not request emac-core0 irq\n");
+ return ret;
+ }
+
/* allocate rx/tx dma buffer & descriptors */
ret = emac_mac_rx_tx_rings_alloc_all(adpt);
if (ret) {
netdev_err(adpt->netdev, "error allocating rx/tx rings\n");
+ free_irq(irq->irq, irq);
return ret;
}
ret = emac_mac_up(adpt);
if (ret) {
emac_mac_rx_tx_rings_free_all(adpt);
+ free_irq(irq->irq, irq);
return ret;
}
- emac_mac_start(adpt);
+ ret = adpt->phy.open(adpt);
+ if (ret) {
+ emac_mac_down(adpt);
+ emac_mac_rx_tx_rings_free_all(adpt);
+ free_irq(irq->irq, irq);
+ return ret;
+ }
return 0;
}
@@ -283,9 +298,12 @@ static int emac_close(struct net_device *netdev)
mutex_lock(&adpt->reset_lock);
+ adpt->phy.close(adpt);
emac_mac_down(adpt);
emac_mac_rx_tx_rings_free_all(adpt);
+ free_irq(adpt->irq.irq, &adpt->irq);
+
mutex_unlock(&adpt->reset_lock);
return 0;
@@ -602,7 +620,7 @@ static int emac_probe(struct platform_device *pdev)
{
struct net_device *netdev;
struct emac_adapter *adpt;
- struct emac_phy *phy;
+ struct emac_sgmii *phy;
u16 devid, revid;
u32 reg;
int ret;
@@ -636,6 +654,7 @@ static int emac_probe(struct platform_device *pdev)
adpt->msg_enable = EMAC_MSG_DEFAULT;
phy = &adpt->phy;
+ atomic_set(&phy->decode_error_count, 0);
mutex_init(&adpt->reset_lock);
spin_lock_init(&adpt->stats.lock);
@@ -729,8 +748,7 @@ static int emac_probe(struct platform_device *pdev)
err_undo_napi:
netif_napi_del(&adpt->rx_q.napi);
err_undo_mdiobus:
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
err_undo_clocks:
emac_clks_teardown(adpt);
@@ -750,8 +768,7 @@ static int emac_remove(struct platform_device *pdev)
emac_clks_teardown(adpt);
- if (!has_acpi_companion(&pdev->dev))
- put_device(&adpt->phydev->mdio.dev);
+ put_device(&adpt->phydev->mdio.dev);
mdiobus_unregister(adpt->mii_bus);
free_netdev(netdev);
diff --git a/drivers/net/ethernet/qualcomm/emac/emac.h b/drivers/net/ethernet/qualcomm/emac/emac.h
index 4b8483c..ef91dcc 100644
--- a/drivers/net/ethernet/qualcomm/emac/emac.h
+++ b/drivers/net/ethernet/qualcomm/emac/emac.h
@@ -19,6 +19,7 @@
#include <linux/platform_device.h>
#include "emac-mac.h"
#include "emac-phy.h"
+#include "emac-sgmii.h"
/* EMAC base register offsets */
#define EMAC_DMA_MAS_CTRL 0x001400
@@ -166,10 +167,6 @@ enum emac_clk_id {
#define EMAC_MAX_SETUP_LNK_CYCLE 100
-/* Wake On Lan */
-#define EMAC_WOL_PHY 0x00000001 /* PHY Status Change */
-#define EMAC_WOL_MAGIC 0x00000002 /* Magic Packet */
-
struct emac_stats {
/* rx */
u64 rx_ok; /* good packets */
@@ -291,7 +288,7 @@ struct emac_adapter {
void __iomem *base;
void __iomem *csr;
- struct emac_phy phy;
+ struct emac_sgmii phy;
struct emac_stats stats;
struct emac_irq irq;
@@ -330,7 +327,6 @@ struct emac_adapter {
int emac_reinit_locked(struct emac_adapter *adpt);
void emac_reg_update32(void __iomem *addr, u32 mask, u32 val);
-irqreturn_t emac_isr(int irq, void *data);
void emac_set_ethtool_ops(struct net_device *netdev);
void emac_update_hw_stats(struct emac_adapter *adpt);
diff --git a/drivers/net/ethernet/realtek/atp.c b/drivers/net/ethernet/realtek/atp.c
index 570ed3b..9bcd4ae 100644
--- a/drivers/net/ethernet/realtek/atp.c
+++ b/drivers/net/ethernet/realtek/atp.c
@@ -170,7 +170,7 @@ struct net_local {
spinlock_t lock;
struct net_device *next_module;
struct timer_list timer; /* Media selection timer. */
- long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
+ unsigned long last_rx_time; /* Last Rx, in jiffies, to handle Rx hang. */
int saved_tx_size;
unsigned int tx_unit_busy:1;
unsigned char re_tx, /* Number of packet retransmissions. */
@@ -668,11 +668,11 @@ static irqreturn_t atp_interrupt(int irq, void *dev_instance)
}
num_tx_since_rx++;
} else if (num_tx_since_rx > 8 &&
- time_after(jiffies, dev->last_rx + HZ)) {
+ time_after(jiffies, lp->last_rx_time + HZ)) {
if (net_debug > 2)
printk(KERN_DEBUG "%s: Missed packet? No Rx after %d Tx and "
"%ld jiffies status %02x CMR1 %02x.\n", dev->name,
- num_tx_since_rx, jiffies - dev->last_rx, status,
+ num_tx_since_rx, jiffies - lp->last_rx_time, status,
(read_nibble(ioaddr, CMR1) >> 3) & 15);
dev->stats.rx_missed_errors++;
hardware_init(dev);
@@ -789,7 +789,6 @@ static void net_rx(struct net_device *dev)
read_block(ioaddr, pkt_len, skb_put(skb,pkt_len), dev->if_port);
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += pkt_len;
}
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 6073f46..81f18a8 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7583,7 +7583,7 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
rtl_irq_enable(tp, enable_mask);
mmiowb();
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index f110966..0525bd6 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -76,6 +76,7 @@ enum ravb_reg {
CDAR20 = 0x0060,
CDAR21 = 0x0064,
ESR = 0x0088,
+ APSR = 0x008C, /* R-Car Gen3 only */
RCR = 0x0090,
RQC0 = 0x0094,
RQC1 = 0x0098,
@@ -248,6 +249,15 @@ enum ESR_BIT {
ESR_EIL = 0x00001000,
};
+/* APSR */
+enum APSR_BIT {
+ APSR_MEMS = 0x00000002,
+ APSR_CMSW = 0x00000010,
+ APSR_DM = 0x00006000, /* Undocumented? */
+ APSR_DM_RDM = 0x00002000,
+ APSR_DM_TDM = 0x00004000,
+};
+
/* RCR */
enum RCR_BIT {
RCR_EFFS = 0x00000001,
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 92d7692..8cfc4a5 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -31,6 +31,7 @@
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
+#include <linux/sys_soc.h>
#include <asm/div64.h>
@@ -179,6 +180,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data,
};
+/* Free TX skb function for AVB-IP */
+static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ struct net_device_stats *stats = &priv->stats[q];
+ struct ravb_tx_desc *desc;
+ int free_num = 0;
+ int entry;
+ u32 size;
+
+ for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
+ bool txed;
+
+ entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
+ NUM_TX_DESC);
+ desc = &priv->tx_ring[q][entry];
+ txed = desc->die_dt == DT_FEMPTY;
+ if (free_txed_only && !txed)
+ break;
+ /* Descriptor type must be checked before all other reads */
+ dma_rmb();
+ size = le16_to_cpu(desc->ds_tagl) & TX_DS;
+ /* Free the original skb. */
+ if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
+ dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
+ size, DMA_TO_DEVICE);
+ /* Last packet descriptor? */
+ if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
+ entry /= NUM_TX_DESC;
+ dev_kfree_skb_any(priv->tx_skb[q][entry]);
+ priv->tx_skb[q][entry] = NULL;
+ if (txed)
+ stats->tx_packets++;
+ }
+ free_num++;
+ }
+ if (txed)
+ stats->tx_bytes += size;
+ desc->die_dt = DT_EEMPTY;
+ }
+ return free_num;
+}
+
/* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q)
{
@@ -194,19 +238,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL;
- /* Free TX skb ringbuffer */
- if (priv->tx_skb[q]) {
- for (i = 0; i < priv->num_tx_ring[q]; i++)
- dev_kfree_skb(priv->tx_skb[q][i]);
- }
- kfree(priv->tx_skb[q]);
- priv->tx_skb[q] = NULL;
-
/* Free aligned TX buffers */
kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) {
+ for (i = 0; i < priv->num_rx_ring[q]; i++) {
+ struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
+
+ if (!dma_mapping_error(ndev->dev.parent,
+ le32_to_cpu(desc->dptr)))
+ dma_unmap_single(ndev->dev.parent,
+ le32_to_cpu(desc->dptr),
+ PKT_BUF_SZ,
+ DMA_FROM_DEVICE);
+ }
ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
@@ -215,12 +261,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
}
if (priv->tx_ring[q]) {
+ ravb_tx_free(ndev, q, false);
+
ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL;
}
+
+ /* Free TX skb ringbuffer.
+ * SKBs are freed by ravb_tx_free() call above.
+ */
+ kfree(priv->tx_skb[q]);
+ priv->tx_skb[q] = NULL;
}
/* Format skb and descriptor buffer for Ethernet AVB */
@@ -431,44 +485,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0;
}
-/* Free TX skb function for AVB-IP */
-static int ravb_tx_free(struct net_device *ndev, int q)
-{
- struct ravb_private *priv = netdev_priv(ndev);
- struct net_device_stats *stats = &priv->stats[q];
- struct ravb_tx_desc *desc;
- int free_num = 0;
- int entry;
- u32 size;
-
- for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
- entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
- NUM_TX_DESC);
- desc = &priv->tx_ring[q][entry];
- if (desc->die_dt != DT_FEMPTY)
- break;
- /* Descriptor type must be checked before all other reads */
- dma_rmb();
- size = le16_to_cpu(desc->ds_tagl) & TX_DS;
- /* Free the original skb. */
- if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
- dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
- size, DMA_TO_DEVICE);
- /* Last packet descriptor? */
- if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
- entry /= NUM_TX_DESC;
- dev_kfree_skb_any(priv->tx_skb[q][entry]);
- priv->tx_skb[q][entry] = NULL;
- stats->tx_packets++;
- }
- free_num++;
- }
- stats->tx_bytes += size;
- desc->die_dt = DT_EEMPTY;
- }
- return free_num;
-}
-
static void ravb_get_tx_tstamp(struct net_device *ndev)
{
struct ravb_private *priv = netdev_priv(ndev);
@@ -902,7 +918,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS);
- ravb_tx_free(ndev, q);
+ ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
spin_unlock_irqrestore(&priv->lock, flags);
@@ -926,14 +942,10 @@ static int ravb_poll(struct napi_struct *napi, int budget)
/* Receive error message handling */
priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors;
priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors;
- if (priv->rx_over_errors != ndev->stats.rx_over_errors) {
+ if (priv->rx_over_errors != ndev->stats.rx_over_errors)
ndev->stats.rx_over_errors = priv->rx_over_errors;
- netif_err(priv, rx_err, ndev, "Receive Descriptor Empty\n");
- }
- if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) {
+ if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors)
ndev->stats.rx_fifo_errors = priv->rx_fifo_errors;
- netif_err(priv, rx_err, ndev, "Receive FIFO Overflow\n");
- }
out:
return budget - quota;
}
@@ -977,6 +989,11 @@ static void ravb_adjust_link(struct net_device *ndev)
phy_print_status(phydev);
}
+static const struct soc_device_attribute r8a7795es10[] = {
+ { .soc_id = "r8a7795", .revision = "ES1.0", },
+ { /* sentinel */ }
+};
+
/* PHY init function */
static int ravb_phy_init(struct net_device *ndev)
{
@@ -1012,10 +1029,10 @@ static int ravb_phy_init(struct net_device *ndev)
goto err_deregister_fixed_link;
}
- /* This driver only support 10/100Mbit speeds on Gen3
+ /* This driver only support 10/100Mbit speeds on R-Car H3 ES1.0
* at this time.
*/
- if (priv->chip_id == RCAR_GEN3) {
+ if (soc_device_match(r8a7795es10)) {
err = phy_set_max_speed(phydev, SPEED_100);
if (err) {
netdev_err(ndev, "failed to limit PHY to 100Mbit/s\n");
@@ -1508,6 +1525,19 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) +
entry / NUM_TX_DESC * DPTR_ALIGN;
len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data;
+ /* Zero length DMA descriptors are problematic as they seem to
+ * terminate DMA transfers. Avoid them by simply using a length of
+ * DPTR_ALIGN (4) when skb data is aligned to DPTR_ALIGN.
+ *
+ * As skb is guaranteed to have at least ETH_ZLEN (60) bytes of
+ * data by the call to skb_put_padto() above this is safe with
+ * respect to both the length of the first DMA descriptor (len)
+ * overflowing the available data and the length of the second DMA
+ * descriptor (skb->len - len) being negative.
+ */
+ if (len == 0)
+ len = DPTR_ALIGN;
+
memcpy(buffer, skb->data, len);
dma_addr = dma_map_single(ndev->dev.parent, buffer, len, DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, dma_addr))
@@ -1558,7 +1588,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] >
- (priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q))
+ (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
+ !ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q);
exit:
@@ -1895,6 +1926,23 @@ static void ravb_set_config_mode(struct net_device *ndev)
}
}
+/* Set tx and rx clock internal delay modes */
+static void ravb_set_delay_mode(struct net_device *ndev)
+{
+ struct ravb_private *priv = netdev_priv(ndev);
+ int set = 0;
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID)
+ set |= APSR_DM_RDM;
+
+ if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+ set |= APSR_DM_TDM;
+
+ ravb_modify(ndev, APSR, APSR_DM, set);
+}
+
static int ravb_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -2007,6 +2055,9 @@ static int ravb_probe(struct platform_device *pdev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ if (priv->chip_id != RCAR_GEN2)
+ ravb_set_delay_mode(ndev);
+
/* Allocate descriptor base address table */
priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM;
priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size,
@@ -2143,6 +2194,9 @@ static int __maybe_unused ravb_resume(struct device *dev)
/* Request GTI loading */
ravb_modify(ndev, GCCR, GCCR_LTI, GCCR_LTI);
+ if (priv->chip_id != RCAR_GEN2)
+ ravb_set_delay_mode(ndev);
+
/* Restore descriptor base address table */
ravb_write(ndev, priv->desc_bat_dma, DBAT);
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 45a7a6b..2f08d27 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -518,7 +518,14 @@ static struct sh_eth_cpu_data r7s72100_data = {
.ecsr_value = ECSR_ICD,
.ecsipr_value = ECSIPR_ICDIP,
- .eesipr_value = 0xe77f009f,
+ .eesipr_value = EESIPR_TWB1IP | EESIPR_TWBIP | EESIPR_TC1IP |
+ EESIPR_TABTIP | EESIPR_RABTIP | EESIPR_RFCOFIP |
+ EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -556,7 +563,14 @@ static struct sh_eth_cpu_data r8a7740_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -603,7 +617,12 @@ static struct sh_eth_cpu_data r8a777x_data = {
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
- .eesipr_value = 0x01ff009f,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -626,7 +645,12 @@ static struct sh_eth_cpu_data r8a779x_data = {
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP |
ECSIPR_MPDIP,
- .eesipr_value = 0x01ff009f,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -667,7 +691,12 @@ static struct sh_eth_cpu_data sh7724_data = {
.ecsr_value = ECSR_PSRTO | ECSR_LCHNG | ECSR_ICD,
.ecsipr_value = ECSIPR_PSRTOIP | ECSIPR_LCHNGIP | ECSIPR_ICDIP,
- .eesipr_value = 0x01ff009f,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ADEIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_RMAFIP | EESIPR_RRFIP |
+ EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -702,7 +731,14 @@ static struct sh_eth_cpu_data sh7757_data = {
.register_type = SH_ETH_REG_FAST_SH4,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_FTC | EESR_CND | EESR_DLC | EESR_CD | EESR_RTO,
.eesr_err_check = EESR_TWB | EESR_TABT | EESR_RABT | EESR_RFE |
@@ -769,7 +805,14 @@ static struct sh_eth_cpu_data sh7757_data_giga = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -800,7 +843,13 @@ static struct sh_eth_cpu_data sh7734_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+ EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -830,7 +879,13 @@ static struct sh_eth_cpu_data sh7763_data = {
.ecsr_value = ECSR_ICD | ECSR_MPD,
.ecsipr_value = ECSIPR_LCHNGIP | ECSIPR_ICDIP | ECSIPR_MPDIP,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003f07ff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ EESIPR_DLCIP | EESIPR_CDIP | EESIPR_TROIP |
+ EESIPR_RMAFIP | EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tx_check = EESR_TC1 | EESR_FTC,
.eesr_err_check = EESR_TWB1 | EESR_TWB | EESR_TABT | EESR_RABT |
@@ -851,7 +906,14 @@ static struct sh_eth_cpu_data sh7763_data = {
static struct sh_eth_cpu_data sh7619_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.apr = 1,
.mpr = 1,
@@ -862,7 +924,14 @@ static struct sh_eth_cpu_data sh7619_data = {
static struct sh_eth_cpu_data sh771x_data = {
.register_type = SH_ETH_REG_FAST_SH3_SH2,
- .eesipr_value = DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff,
+ .eesipr_value = EESIPR_RFCOFIP | EESIPR_ECIIP |
+ EESIPR_FTCIP | EESIPR_TDEIP | EESIPR_TFUFIP |
+ EESIPR_FRIP | EESIPR_RDEIP | EESIPR_RFOFIP |
+ 0x0000f000 | EESIPR_CNDIP | EESIPR_DLCIP |
+ EESIPR_CDIP | EESIPR_TROIP | EESIPR_RMAFIP |
+ EESIPR_CEEFIP | EESIPR_CELFIP |
+ EESIPR_RRFIP | EESIPR_RTLFIP | EESIPR_RTSFIP |
+ EESIPR_PREIP | EESIPR_CERFIP,
.tsu = 1,
};
@@ -1547,10 +1616,10 @@ static void sh_eth_emac_interrupt(struct net_device *ndev)
sh_eth_rcv_snd_disable(ndev);
} else {
/* Link Up */
- sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, 0);
+ sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, 0);
/* clear int */
sh_eth_modify(ndev, ECSR, 0, 0);
- sh_eth_modify(ndev, EESIPR, DMAC_M_ECI, DMAC_M_ECI);
+ sh_eth_modify(ndev, EESIPR, EESIPR_ECIIP, EESIPR_ECIIP);
/* enable tx and rx */
sh_eth_rcv_snd_enable(ndev);
}
@@ -1652,7 +1721,7 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
* bit...
*/
intr_enable = sh_eth_read(ndev, EESIPR);
- intr_status &= intr_enable | DMAC_M_ECI;
+ intr_status &= intr_enable | EESIPR_ECIIP;
if (intr_status & (EESR_RX_CHECK | cd->tx_check | EESR_ECI |
cd->eesr_err_check))
ret = IRQ_HANDLED;
@@ -3199,7 +3268,7 @@ static int sh_eth_wol_setup(struct net_device *ndev)
/* Only allow ECI interrupts */
synchronize_irq(ndev->irq);
napi_disable(&mdp->napi);
- sh_eth_write(ndev, DMAC_M_ECI, EESIPR);
+ sh_eth_write(ndev, EESIPR_ECIIP, EESIPR);
/* Enable MagicPacket */
sh_eth_modify(ndev, ECMR, 0, ECMR_MPDE);
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index a1bb8cc..a6753cc 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -268,19 +268,35 @@ enum EESR_BIT {
EESR_TFE | EESR_TDE)
/* EESIPR */
-enum DMAC_IM_BIT {
- DMAC_M_TWB = 0x40000000, DMAC_M_TABT = 0x04000000,
- DMAC_M_RABT = 0x02000000,
- DMAC_M_RFRMER = 0x01000000, DMAC_M_ADF = 0x00800000,
- DMAC_M_ECI = 0x00400000, DMAC_M_FTC = 0x00200000,
- DMAC_M_TDE = 0x00100000, DMAC_M_TFE = 0x00080000,
- DMAC_M_FRC = 0x00040000, DMAC_M_RDE = 0x00020000,
- DMAC_M_RFE = 0x00010000, DMAC_M_TINT4 = 0x00000800,
- DMAC_M_TINT3 = 0x00000400, DMAC_M_TINT2 = 0x00000200,
- DMAC_M_TINT1 = 0x00000100, DMAC_M_RINT8 = 0x00000080,
- DMAC_M_RINT5 = 0x00000010, DMAC_M_RINT4 = 0x00000008,
- DMAC_M_RINT3 = 0x00000004, DMAC_M_RINT2 = 0x00000002,
- DMAC_M_RINT1 = 0x00000001,
+enum EESIPR_BIT {
+ EESIPR_TWB1IP = 0x80000000,
+ EESIPR_TWBIP = 0x40000000, /* same as TWB0IP */
+ EESIPR_TC1IP = 0x20000000,
+ EESIPR_TUCIP = 0x10000000,
+ EESIPR_ROCIP = 0x08000000,
+ EESIPR_TABTIP = 0x04000000,
+ EESIPR_RABTIP = 0x02000000,
+ EESIPR_RFCOFIP = 0x01000000,
+ EESIPR_ADEIP = 0x00800000,
+ EESIPR_ECIIP = 0x00400000,
+ EESIPR_FTCIP = 0x00200000, /* same as TC0IP */
+ EESIPR_TDEIP = 0x00100000,
+ EESIPR_TFUFIP = 0x00080000,
+ EESIPR_FRIP = 0x00040000,
+ EESIPR_RDEIP = 0x00020000,
+ EESIPR_RFOFIP = 0x00010000,
+ EESIPR_CNDIP = 0x00000800,
+ EESIPR_DLCIP = 0x00000400,
+ EESIPR_CDIP = 0x00000200,
+ EESIPR_TROIP = 0x00000100,
+ EESIPR_RMAFIP = 0x00000080,
+ EESIPR_CEEFIP = 0x00000040,
+ EESIPR_CELFIP = 0x00000020,
+ EESIPR_RRFIP = 0x00000010,
+ EESIPR_RTLFIP = 0x00000008,
+ EESIPR_RTSFIP = 0x00000004,
+ EESIPR_PREIP = 0x00000002,
+ EESIPR_CERFIP = 0x00000001,
};
/* Receive descriptor 0 bits */
diff --git a/drivers/net/ethernet/rocker/rocker_main.c b/drivers/net/ethernet/rocker/rocker_main.c
index 7c450b5..0f63a44 100644
--- a/drivers/net/ethernet/rocker/rocker_main.c
+++ b/drivers/net/ethernet/rocker/rocker_main.c
@@ -2517,7 +2517,7 @@ static int rocker_port_poll_rx(struct napi_struct *napi, int budget)
}
if (credits < budget)
- napi_complete(napi);
+ napi_complete_done(napi, credits);
rocker_dma_ring_credits_set(rocker, &rocker_port->rx_ring, credits);
diff --git a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
index 07074d9..d54490d 100644
--- a/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
+++ b/drivers/net/ethernet/samsung/sxgbe/sxgbe_main.c
@@ -1563,7 +1563,7 @@ static int sxgbe_poll(struct napi_struct *napi, int budget)
work_done = sxgbe_rx(priv, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum);
}
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 208e004..0475f18 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -60,15 +60,33 @@ struct efx_ef10_vlan {
u16 vid;
};
+enum efx_ef10_default_filters {
+ EFX_EF10_BCAST,
+ EFX_EF10_UCDEF,
+ EFX_EF10_MCDEF,
+ EFX_EF10_VXLAN4_UCDEF,
+ EFX_EF10_VXLAN4_MCDEF,
+ EFX_EF10_VXLAN6_UCDEF,
+ EFX_EF10_VXLAN6_MCDEF,
+ EFX_EF10_NVGRE4_UCDEF,
+ EFX_EF10_NVGRE4_MCDEF,
+ EFX_EF10_NVGRE6_UCDEF,
+ EFX_EF10_NVGRE6_MCDEF,
+ EFX_EF10_GENEVE4_UCDEF,
+ EFX_EF10_GENEVE4_MCDEF,
+ EFX_EF10_GENEVE6_UCDEF,
+ EFX_EF10_GENEVE6_MCDEF,
+
+ EFX_EF10_NUM_DEFAULT_FILTERS
+};
+
/* Per-VLAN filters information */
struct efx_ef10_filter_vlan {
struct list_head list;
u16 vid;
u16 uc[EFX_EF10_FILTER_DEV_UC_MAX];
u16 mc[EFX_EF10_FILTER_DEV_MC_MAX];
- u16 ucdef;
- u16 bcast;
- u16 mcdef;
+ u16 default_filters[EFX_EF10_NUM_DEFAULT_FILTERS];
};
struct efx_ef10_dev_addr {
@@ -78,7 +96,7 @@ struct efx_ef10_dev_addr {
struct efx_ef10_filter_table {
/* The MCDI match masks supported by this fw & hw, in order of priority */
u32 rx_match_mcdi_flags[
- MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM];
+ MC_CMD_GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES_MAXNUM * 2];
unsigned int rx_match_count;
struct {
@@ -197,11 +215,15 @@ static int efx_ef10_init_datapath_caps(struct efx_nic *efx)
nic_data->datapath_caps =
MCDI_DWORD(outbuf, GET_CAPABILITIES_OUT_FLAGS1);
- if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN)
+ if (outlen >= MC_CMD_GET_CAPABILITIES_V2_OUT_LEN) {
nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
GET_CAPABILITIES_V2_OUT_FLAGS2);
- else
+ nic_data->piobuf_size = MCDI_WORD(outbuf,
+ GET_CAPABILITIES_V2_OUT_SIZE_PIO_BUFF);
+ } else {
nic_data->datapath_caps2 = 0;
+ nic_data->piobuf_size = ER_DZ_TX_PIOBUF_SIZE;
+ }
/* record the DPCPU firmware IDs to determine VEB vswitching support.
*/
@@ -823,8 +845,8 @@ static int efx_ef10_link_piobufs(struct efx_nic *efx)
offset = ((efx->tx_channel_offset + efx->n_tx_channels -
tx_queue->channel->channel - 1) *
efx_piobuf_size);
- index = offset / ER_DZ_TX_PIOBUF_SIZE;
- offset = offset % ER_DZ_TX_PIOBUF_SIZE;
+ index = offset / nic_data->piobuf_size;
+ offset = offset % nic_data->piobuf_size;
/* When the host page size is 4K, the first
* host page in the WC mapping may be within
@@ -1159,14 +1181,20 @@ static int efx_ef10_dimension_resources(struct efx_nic *efx)
* functions of the controller.
*/
if (efx_piobuf_size != 0 &&
- ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
+ nic_data->piobuf_size / efx_piobuf_size * EF10_TX_PIOBUF_COUNT >=
efx->n_tx_channels) {
unsigned int n_piobufs =
DIV_ROUND_UP(efx->n_tx_channels,
- ER_DZ_TX_PIOBUF_SIZE / efx_piobuf_size);
+ nic_data->piobuf_size / efx_piobuf_size);
rc = efx_ef10_alloc_piobufs(efx, n_piobufs);
- if (rc)
+ if (rc == -ENOSPC)
+ netif_dbg(efx, probe, efx->net_dev,
+ "out of PIO buffers; cannot allocate more\n");
+ else if (rc == -EPERM)
+ netif_dbg(efx, probe, efx->net_dev,
+ "not permitted to allocate PIO buffers\n");
+ else if (rc)
netif_err(efx, probe, efx->net_dev,
"failed to allocate PIO buffers (%d)\n", rc);
else
@@ -1313,15 +1341,21 @@ static int efx_ef10_init_nic(struct efx_nic *efx)
efx_ef10_free_piobufs(efx);
}
- /* Log an error on failure, but this is non-fatal */
- if (rc)
+ /* Log an error on failure, but this is non-fatal.
+ * Permission errors are less important - we've presumably
+ * had the PIO buffer licence removed.
+ */
+ if (rc == -EPERM)
+ netif_dbg(efx, drv, efx->net_dev,
+ "not permitted to restore PIO buffers\n");
+ else if (rc)
netif_err(efx, drv, efx->net_dev,
"failed to restore PIO buffers (%d)\n", rc);
nic_data->must_restore_piobufs = false;
}
/* don't fail init if RSS setup doesn't work */
- rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table);
+ rc = efx->type->rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
efx->rss_active = (rc == 0);
return 0;
@@ -2358,7 +2392,11 @@ static void efx_ef10_tx_write(struct efx_tx_queue *tx_queue)
/* Create TX descriptor ring entry */
if (buffer->flags & EFX_TX_BUF_OPTION) {
*txd = buffer->option;
+ if (EFX_QWORD_FIELD(*txd, ESF_DZ_TX_OPTION_TYPE) == 1)
+ /* PIO descriptor */
+ tx_queue->packet_write_count = tx_queue->write_count;
} else {
+ tx_queue->packet_write_count = tx_queue->write_count;
BUILD_BUG_ON(EFX_TX_BUF_CONT != 1);
EFX_POPULATE_QWORD_3(
*txd,
@@ -2527,7 +2565,7 @@ static void efx_ef10_free_rss_context(struct efx_nic *efx, u32 context)
}
static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table, const u8 *key)
{
MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_SET_TABLE_IN_LEN);
MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_SET_KEY_IN_LEN);
@@ -2538,6 +2576,11 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
MC_CMD_RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE_LEN);
+ /* This iterates over the length of efx->rx_indir_table, but copies
+ * bytes from rx_indir_table. That's because the latter is a pointer
+ * rather than an array, but should have the same length.
+ * The efx->rx_hash_key loop below is similar.
+ */
for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); ++i)
MCDI_PTR(tablebuf,
RSS_CONTEXT_SET_TABLE_IN_INDIRECTION_TABLE)[i] =
@@ -2553,8 +2596,7 @@ static int efx_ef10_populate_rss_table(struct efx_nic *efx, u32 context,
BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
- MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] =
- efx->rx_hash_key[i];
+ MCDI_PTR(keybuf, RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY)[i] = key[i];
return efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_SET_KEY, keybuf,
sizeof(keybuf), NULL, 0, NULL);
@@ -2587,7 +2629,8 @@ static int efx_ef10_rx_push_shared_rss_config(struct efx_nic *efx,
}
static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table,
+ const u8 *key)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
int rc;
@@ -2606,7 +2649,7 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
}
rc = efx_ef10_populate_rss_table(efx, new_rx_rss_context,
- rx_indir_table);
+ rx_indir_table, key);
if (rc != 0)
goto fail2;
@@ -2617,6 +2660,9 @@ static int efx_ef10_rx_push_exclusive_rss_config(struct efx_nic *efx,
if (rx_indir_table != efx->rx_indir_table)
memcpy(efx->rx_indir_table, rx_indir_table,
sizeof(efx->rx_indir_table));
+ if (key != efx->rx_hash_key)
+ memcpy(efx->rx_hash_key, key, efx->type->rx_hash_key_size);
+
return 0;
fail2:
@@ -2627,15 +2673,69 @@ fail1:
return rc;
}
+static int efx_ef10_rx_pull_rss_config(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN);
+ MCDI_DECLARE_BUF(tablebuf, MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN);
+ MCDI_DECLARE_BUF(keybuf, MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN);
+ size_t outlen;
+ int rc, i;
+
+ BUILD_BUG_ON(MC_CMD_RSS_CONTEXT_GET_TABLE_IN_LEN !=
+ MC_CMD_RSS_CONTEXT_GET_KEY_IN_LEN);
+
+ if (nic_data->rx_rss_context == EFX_EF10_RSS_CONTEXT_INVALID)
+ return -ENOENT;
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_TABLE_IN_RSS_CONTEXT_ID,
+ nic_data->rx_rss_context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_TABLE, inbuf, sizeof(inbuf),
+ tablebuf, sizeof(tablebuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_TABLE_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_indir_table); i++)
+ efx->rx_indir_table[i] = MCDI_PTR(tablebuf,
+ RSS_CONTEXT_GET_TABLE_OUT_INDIRECTION_TABLE)[i];
+
+ MCDI_SET_DWORD(inbuf, RSS_CONTEXT_GET_KEY_IN_RSS_CONTEXT_ID,
+ nic_data->rx_rss_context);
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_hash_key) !=
+ MC_CMD_RSS_CONTEXT_SET_KEY_IN_TOEPLITZ_KEY_LEN);
+ rc = efx_mcdi_rpc(efx, MC_CMD_RSS_CONTEXT_GET_KEY, inbuf, sizeof(inbuf),
+ keybuf, sizeof(keybuf), &outlen);
+ if (rc != 0)
+ return rc;
+
+ if (WARN_ON(outlen != MC_CMD_RSS_CONTEXT_GET_KEY_OUT_LEN))
+ return -EIO;
+
+ for (i = 0; i < ARRAY_SIZE(efx->rx_hash_key); ++i)
+ efx->rx_hash_key[i] = MCDI_PTR(
+ keybuf, RSS_CONTEXT_GET_KEY_OUT_TOEPLITZ_KEY)[i];
+
+ return 0;
+}
+
static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table,
+ const u8 *key)
{
int rc;
if (efx->rss_spread == 1)
return 0;
- rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table);
+ if (!key)
+ key = efx->rx_hash_key;
+
+ rc = efx_ef10_rx_push_exclusive_rss_config(efx, rx_indir_table, key);
if (rc == -ENOBUFS && !user) {
unsigned context_size;
@@ -2673,6 +2773,8 @@ static int efx_ef10_pf_rx_push_rss_config(struct efx_nic *efx, bool user,
static int efx_ef10_vf_rx_push_rss_config(struct efx_nic *efx, bool user,
const u32 *rx_indir_table
+ __attribute__ ((unused)),
+ const u8 *key
__attribute__ ((unused)))
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -3508,6 +3610,104 @@ efx_ef10_filter_set_entry(struct efx_ef10_filter_table *table,
table->entry[filter_idx].spec = (unsigned long)spec | flags;
}
+static void
+efx_ef10_filter_push_prep_set_match_fields(struct efx_nic *efx,
+ const struct efx_filter_spec *spec,
+ efx_dword_t *inbuf)
+{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
+ u32 match_fields = 0, uc_match, mc_match;
+
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
+ efx_ef10_filter_is_exclusive(spec) ?
+ MC_CMD_FILTER_OP_IN_OP_INSERT :
+ MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
+
+ /* Convert match flags and values. Unlike almost
+ * everything else in MCDI, these fields are in
+ * network byte order.
+ */
+#define COPY_VALUE(value, mcdi_field) \
+ do { \
+ match_fields |= \
+ 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
+ mcdi_field ## _LBN; \
+ BUILD_BUG_ON( \
+ MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
+ sizeof(value)); \
+ memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
+ &value, sizeof(value)); \
+ } while (0)
+#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
+ if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
+ COPY_VALUE(spec->gen_field, mcdi_field); \
+ }
+ /* Handle encap filters first. They will always be mismatch
+ * (unknown UC or MC) filters
+ */
+ if (encap_type) {
+ /* ether_type and outer_ip_proto need to be variables
+ * because COPY_VALUE wants to memcpy them
+ */
+ __be16 ether_type =
+ htons(encap_type & EFX_ENCAP_FLAG_IPV6 ?
+ ETH_P_IPV6 : ETH_P_IP);
+ u8 vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_GENEVE;
+ u8 outer_ip_proto;
+
+ switch (encap_type & EFX_ENCAP_TYPES_MASK) {
+ case EFX_ENCAP_TYPE_VXLAN:
+ vni_type = MC_CMD_FILTER_OP_EXT_IN_VNI_TYPE_VXLAN;
+ /* fallthrough */
+ case EFX_ENCAP_TYPE_GENEVE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_UDP;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ /* We always need to set the type field, even
+ * though we're not matching on the TNI.
+ */
+ MCDI_POPULATE_DWORD_1(inbuf,
+ FILTER_OP_EXT_IN_VNI_OR_VSID,
+ FILTER_OP_EXT_IN_VNI_TYPE,
+ vni_type);
+ break;
+ case EFX_ENCAP_TYPE_NVGRE:
+ COPY_VALUE(ether_type, ETHER_TYPE);
+ outer_ip_proto = IPPROTO_GRE;
+ COPY_VALUE(outer_ip_proto, IP_PROTO);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
+ match_fields |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ COPY_FIELD(REM_HOST, rem_host, SRC_IP);
+ COPY_FIELD(LOC_HOST, loc_host, DST_IP);
+ COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
+ COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
+ COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
+ COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
+ COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
+ COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
+ COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
+ COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
+#undef COPY_FIELD
+#undef COPY_VALUE
+ MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
+ match_fields);
+}
+
static void efx_ef10_filter_push_prep(struct efx_nic *efx,
const struct efx_filter_spec *spec,
efx_dword_t *inbuf, u64 handle,
@@ -3516,7 +3716,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
struct efx_ef10_nic_data *nic_data = efx->nic_data;
u32 flags = spec->flags;
- memset(inbuf, 0, MC_CMD_FILTER_OP_IN_LEN);
+ memset(inbuf, 0, MC_CMD_FILTER_OP_EXT_IN_LEN);
/* Remove RSS flag if we don't have an RSS context. */
if (flags & EFX_FILTER_FLAG_RX_RSS &&
@@ -3529,46 +3729,7 @@ static void efx_ef10_filter_push_prep(struct efx_nic *efx,
MC_CMD_FILTER_OP_IN_OP_REPLACE);
MCDI_SET_QWORD(inbuf, FILTER_OP_IN_HANDLE, handle);
} else {
- u32 match_fields = 0;
-
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_OP,
- efx_ef10_filter_is_exclusive(spec) ?
- MC_CMD_FILTER_OP_IN_OP_INSERT :
- MC_CMD_FILTER_OP_IN_OP_SUBSCRIBE);
-
- /* Convert match flags and values. Unlike almost
- * everything else in MCDI, these fields are in
- * network byte order.
- */
- if (spec->match_flags & EFX_FILTER_MATCH_LOC_MAC_IG)
- match_fields |=
- is_multicast_ether_addr(spec->loc_mac) ?
- 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN :
- 1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
-#define COPY_FIELD(gen_flag, gen_field, mcdi_field) \
- if (spec->match_flags & EFX_FILTER_MATCH_ ## gen_flag) { \
- match_fields |= \
- 1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN; \
- BUILD_BUG_ON( \
- MC_CMD_FILTER_OP_IN_ ## mcdi_field ## _LEN < \
- sizeof(spec->gen_field)); \
- memcpy(MCDI_PTR(inbuf, FILTER_OP_IN_ ## mcdi_field), \
- &spec->gen_field, sizeof(spec->gen_field)); \
- }
- COPY_FIELD(REM_HOST, rem_host, SRC_IP);
- COPY_FIELD(LOC_HOST, loc_host, DST_IP);
- COPY_FIELD(REM_MAC, rem_mac, SRC_MAC);
- COPY_FIELD(REM_PORT, rem_port, SRC_PORT);
- COPY_FIELD(LOC_MAC, loc_mac, DST_MAC);
- COPY_FIELD(LOC_PORT, loc_port, DST_PORT);
- COPY_FIELD(ETHER_TYPE, ether_type, ETHER_TYPE);
- COPY_FIELD(INNER_VID, inner_vid, INNER_VLAN);
- COPY_FIELD(OUTER_VID, outer_vid, OUTER_VLAN);
- COPY_FIELD(IP_PROTO, ip_proto, IP_PROTO);
-#undef COPY_FIELD
- MCDI_SET_DWORD(inbuf, FILTER_OP_IN_MATCH_FIELDS,
- match_fields);
+ efx_ef10_filter_push_prep_set_match_fields(efx, spec, inbuf);
}
MCDI_SET_DWORD(inbuf, FILTER_OP_IN_PORT_ID, nic_data->vport_id);
@@ -3597,8 +3758,8 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
const struct efx_filter_spec *spec,
u64 *handle, bool replacing)
{
- MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_IN_LEN);
- MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_OUT_LEN);
+ MCDI_DECLARE_BUF(inbuf, MC_CMD_FILTER_OP_EXT_IN_LEN);
+ MCDI_DECLARE_BUF(outbuf, MC_CMD_FILTER_OP_EXT_OUT_LEN);
int rc;
efx_ef10_filter_push_prep(efx, spec, inbuf, *handle, replacing);
@@ -3613,37 +3774,58 @@ static int efx_ef10_filter_push(struct efx_nic *efx,
static u32 efx_ef10_filter_mcdi_flags_from_spec(const struct efx_filter_spec *spec)
{
+ enum efx_encap_type encap_type = efx_filter_get_encap_type(spec);
unsigned int match_flags = spec->match_flags;
+ unsigned int uc_match, mc_match;
u32 mcdi_flags = 0;
- if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
- match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
- mcdi_flags |=
- is_multicast_ether_addr(spec->loc_mac) ?
- (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_MCAST_DST_LBN) :
- (1 << MC_CMD_FILTER_OP_IN_MATCH_UNKNOWN_UCAST_DST_LBN);
- }
-
-#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field) { \
- unsigned int old_match_flags = match_flags; \
+#define MAP_FILTER_TO_MCDI_FLAG(gen_flag, mcdi_field, encap) { \
+ unsigned int old_match_flags = match_flags; \
match_flags &= ~EFX_FILTER_MATCH_ ## gen_flag; \
if (match_flags != old_match_flags) \
mcdi_flags |= \
- (1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
+ (1 << ((encap) ? \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_ ## \
+ mcdi_field ## _LBN : \
+ MC_CMD_FILTER_OP_EXT_IN_MATCH_ ##\
+ mcdi_field ## _LBN)); \
}
- MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP);
- MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP);
- MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC);
- MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT);
- MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC);
- MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT);
- MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN);
- MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO);
+ /* inner or outer based on encap type */
+ MAP_FILTER_TO_MCDI_FLAG(REM_HOST, SRC_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_HOST, DST_IP, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_MAC, SRC_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(REM_PORT, SRC_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_MAC, DST_MAC, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(LOC_PORT, DST_PORT, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(ETHER_TYPE, ETHER_TYPE, encap_type);
+ MAP_FILTER_TO_MCDI_FLAG(IP_PROTO, IP_PROTO, encap_type);
+ /* always outer */
+ MAP_FILTER_TO_MCDI_FLAG(INNER_VID, INNER_VLAN, false);
+ MAP_FILTER_TO_MCDI_FLAG(OUTER_VID, OUTER_VLAN, false);
#undef MAP_FILTER_TO_MCDI_FLAG
+ /* special handling for encap type, and mismatch */
+ if (encap_type) {
+ match_flags &= ~EFX_FILTER_MATCH_ENCAP_TYPE;
+ mcdi_flags |=
+ (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ mcdi_flags |= (1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_IFRM_UNKNOWN_MCAST_DST_LBN;
+ } else {
+ uc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_UCAST_DST_LBN;
+ mc_match = MC_CMD_FILTER_OP_EXT_IN_MATCH_UNKNOWN_MCAST_DST_LBN;
+ }
+
+ if (match_flags & EFX_FILTER_MATCH_LOC_MAC_IG) {
+ match_flags &= ~EFX_FILTER_MATCH_LOC_MAC_IG;
+ mcdi_flags |=
+ is_multicast_ether_addr(spec->loc_mac) ?
+ 1 << mc_match :
+ 1 << uc_match;
+ }
+
/* Did we map them all? */
WARN_ON_ONCE(match_flags);
@@ -4303,29 +4485,54 @@ efx_ef10_filter_rfs_expire_complete(struct efx_nic *efx,
#endif /* CONFIG_RFS_ACCEL */
-static int efx_ef10_filter_match_flags_from_mcdi(u32 mcdi_flags)
+static int efx_ef10_filter_match_flags_from_mcdi(bool encap, u32 mcdi_flags)
{
int match_flags = 0;
-#define MAP_FLAG(gen_flag, mcdi_field) { \
+#define MAP_FLAG(gen_flag, mcdi_field) do { \
u32 old_mcdi_flags = mcdi_flags; \
- mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_IN_MATCH_ ## \
- mcdi_field ## _LBN); \
+ mcdi_flags &= ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ ## \
+ mcdi_field ## _LBN); \
if (mcdi_flags != old_mcdi_flags) \
match_flags |= EFX_FILTER_MATCH_ ## gen_flag; \
+ } while (0)
+
+ if (encap) {
+ /* encap filters must specify encap type */
+ match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ /* and imply ethertype and ip proto */
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_IP_PROTO_LBN);
+ mcdi_flags &=
+ ~(1 << MC_CMD_FILTER_OP_EXT_IN_MATCH_ETHER_TYPE_LBN);
+ /* VLAN tags refer to the outer packet */
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ /* everything else refers to the inner packet */
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, IFRM_UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, IFRM_SRC_IP);
+ MAP_FLAG(LOC_HOST, IFRM_DST_IP);
+ MAP_FLAG(REM_MAC, IFRM_SRC_MAC);
+ MAP_FLAG(REM_PORT, IFRM_SRC_PORT);
+ MAP_FLAG(LOC_MAC, IFRM_DST_MAC);
+ MAP_FLAG(LOC_PORT, IFRM_DST_PORT);
+ MAP_FLAG(ETHER_TYPE, IFRM_ETHER_TYPE);
+ MAP_FLAG(IP_PROTO, IFRM_IP_PROTO);
+ } else {
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
+ MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
+ MAP_FLAG(REM_HOST, SRC_IP);
+ MAP_FLAG(LOC_HOST, DST_IP);
+ MAP_FLAG(REM_MAC, SRC_MAC);
+ MAP_FLAG(REM_PORT, SRC_PORT);
+ MAP_FLAG(LOC_MAC, DST_MAC);
+ MAP_FLAG(LOC_PORT, DST_PORT);
+ MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
+ MAP_FLAG(INNER_VID, INNER_VLAN);
+ MAP_FLAG(OUTER_VID, OUTER_VLAN);
+ MAP_FLAG(IP_PROTO, IP_PROTO);
}
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_UCAST_DST);
- MAP_FLAG(LOC_MAC_IG, UNKNOWN_MCAST_DST);
- MAP_FLAG(REM_HOST, SRC_IP);
- MAP_FLAG(LOC_HOST, DST_IP);
- MAP_FLAG(REM_MAC, SRC_MAC);
- MAP_FLAG(REM_PORT, SRC_PORT);
- MAP_FLAG(LOC_MAC, DST_MAC);
- MAP_FLAG(LOC_PORT, DST_PORT);
- MAP_FLAG(ETHER_TYPE, ETHER_TYPE);
- MAP_FLAG(INNER_VID, INNER_VLAN);
- MAP_FLAG(OUTER_VID, OUTER_VLAN);
- MAP_FLAG(IP_PROTO, IP_PROTO);
#undef MAP_FLAG
/* Did we map them all? */
@@ -4352,6 +4559,7 @@ static void efx_ef10_filter_cleanup_vlans(struct efx_nic *efx)
}
static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
+ bool encap,
enum efx_filter_match_flags match_flags)
{
unsigned int match_pri;
@@ -4360,7 +4568,7 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
for (match_pri = 0;
match_pri < table->rx_match_count;
match_pri++) {
- mf = efx_ef10_filter_match_flags_from_mcdi(
+ mf = efx_ef10_filter_match_flags_from_mcdi(encap,
table->rx_match_mcdi_flags[match_pri]);
if (mf == match_flags)
return true;
@@ -4369,39 +4577,30 @@ static bool efx_ef10_filter_match_supported(struct efx_ef10_filter_table *table,
return false;
}
-static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+static int
+efx_ef10_filter_table_probe_matches(struct efx_nic *efx,
+ struct efx_ef10_filter_table *table,
+ bool encap)
{
MCDI_DECLARE_BUF(inbuf, MC_CMD_GET_PARSER_DISP_INFO_IN_LEN);
MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_PARSER_DISP_INFO_OUT_LENMAX);
- struct efx_ef10_nic_data *nic_data = efx->nic_data;
- struct net_device *net_dev = efx->net_dev;
unsigned int pd_match_pri, pd_match_count;
- struct efx_ef10_filter_table *table;
- struct efx_ef10_vlan *vlan;
size_t outlen;
int rc;
- if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
- return -EINVAL;
-
- if (efx->filter_state) /* already probed */
- return 0;
-
- table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
-
/* Find out which RX filter types are supported, and their priorities */
MCDI_SET_DWORD(inbuf, GET_PARSER_DISP_INFO_IN_OP,
+ encap ?
+ MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_ENCAP_RX_MATCHES :
MC_CMD_GET_PARSER_DISP_INFO_IN_OP_GET_SUPPORTED_RX_MATCHES);
rc = efx_mcdi_rpc(efx, MC_CMD_GET_PARSER_DISP_INFO,
inbuf, sizeof(inbuf), outbuf, sizeof(outbuf),
&outlen);
if (rc)
- goto fail;
+ return rc;
+
pd_match_count = MCDI_VAR_ARRAY_LEN(
outlen, GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES);
- table->rx_match_count = 0;
for (pd_match_pri = 0; pd_match_pri < pd_match_count; pd_match_pri++) {
u32 mcdi_flags =
@@ -4409,7 +4608,7 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
outbuf,
GET_PARSER_DISP_INFO_OUT_SUPPORTED_MATCHES,
pd_match_pri);
- rc = efx_ef10_filter_match_flags_from_mcdi(mcdi_flags);
+ rc = efx_ef10_filter_match_flags_from_mcdi(encap, mcdi_flags);
if (rc < 0) {
netif_dbg(efx, probe, efx->net_dev,
"%s: fw flags %#x pri %u not supported in driver\n",
@@ -4424,10 +4623,40 @@ static int efx_ef10_filter_table_probe(struct efx_nic *efx)
}
}
+ return 0;
+}
+
+static int efx_ef10_filter_table_probe(struct efx_nic *efx)
+{
+ struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ struct net_device *net_dev = efx->net_dev;
+ struct efx_ef10_filter_table *table;
+ struct efx_ef10_vlan *vlan;
+ int rc;
+
+ if (!efx_rwsem_assert_write_locked(&efx->filter_sem))
+ return -EINVAL;
+
+ if (efx->filter_state) /* already probed */
+ return 0;
+
+ table = kzalloc(sizeof(*table), GFP_KERNEL);
+ if (!table)
+ return -ENOMEM;
+
+ table->rx_match_count = 0;
+ rc = efx_ef10_filter_table_probe_matches(efx, table, false);
+ if (rc)
+ goto fail;
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ rc = efx_ef10_filter_table_probe_matches(efx, table, true);
+ if (rc)
+ goto fail;
if ((efx_supported_features(efx) & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- !(efx_ef10_filter_match_supported(table,
+ !(efx_ef10_filter_match_supported(table, false,
(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC)) &&
- efx_ef10_filter_match_supported(table,
+ efx_ef10_filter_match_supported(table, false,
(EFX_FILTER_MATCH_OUTER_VID | EFX_FILTER_MATCH_LOC_MAC_IG)))) {
netif_info(efx, probe, net_dev,
"VLAN filters are not supported in this firmware variant\n");
@@ -4473,10 +4702,13 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
{
struct efx_ef10_filter_table *table = efx->filter_state;
struct efx_ef10_nic_data *nic_data = efx->nic_data;
+ unsigned int invalid_filters = 0, failed = 0;
+ struct efx_ef10_filter_vlan *vlan;
struct efx_filter_spec *spec;
unsigned int filter_idx;
- bool failed = false;
- int rc;
+ u32 mcdi_flags;
+ int match_pri;
+ int rc, i;
WARN_ON(!rwsem_is_locked(&efx->filter_sem));
@@ -4493,6 +4725,20 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
if (!spec)
continue;
+ mcdi_flags = efx_ef10_filter_mcdi_flags_from_spec(spec);
+ match_pri = 0;
+ while (match_pri < table->rx_match_count &&
+ table->rx_match_mcdi_flags[match_pri] != mcdi_flags)
+ ++match_pri;
+ if (match_pri >= table->rx_match_count) {
+ invalid_filters++;
+ goto not_restored;
+ }
+ if (spec->rss_context != EFX_FILTER_RSS_CONTEXT_DEFAULT &&
+ spec->rss_context != nic_data->rx_rss_context)
+ netif_warn(efx, drv, efx->net_dev,
+ "Warning: unable to restore a filter with specific RSS context.\n");
+
table->entry[filter_idx].spec |= EFX_EF10_FILTER_FLAG_BUSY;
spin_unlock_bh(&efx->filter_lock);
@@ -4500,10 +4746,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
&table->entry[filter_idx].handle,
false);
if (rc)
- failed = true;
-
+ failed++;
spin_lock_bh(&efx->filter_lock);
+
if (rc) {
+not_restored:
+ list_for_each_entry(vlan, &table->vlan_list, list)
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; ++i)
+ if (vlan->default_filters[i] == filter_idx)
+ vlan->default_filters[i] =
+ EFX_EF10_FILTER_ID_INVALID;
+
kfree(spec);
efx_ef10_filter_set_entry(table, filter_idx, NULL, 0);
} else {
@@ -4514,9 +4767,17 @@ static void efx_ef10_filter_table_restore(struct efx_nic *efx)
spin_unlock_bh(&efx->filter_lock);
+ /* This can happen validly if the MC's capabilities have changed, so
+ * is not an error.
+ */
+ if (invalid_filters)
+ netif_dbg(efx, drv, efx->net_dev,
+ "Did not restore %u filters that are now unsupported.\n",
+ invalid_filters);
+
if (failed)
netif_err(efx, hw, efx->net_dev,
- "unable to restore all filters\n");
+ "unable to restore %u filters\n", failed);
else
nic_data->must_restore_filters = false;
}
@@ -4594,9 +4855,8 @@ static void _efx_ef10_filter_vlan_mark_old(struct efx_nic *efx,
efx_ef10_filter_mark_one_old(efx, &vlan->uc[i]);
for (i = 0; i < table->dev_mc_count; i++)
efx_ef10_filter_mark_one_old(efx, &vlan->mc[i]);
- efx_ef10_filter_mark_one_old(efx, &vlan->ucdef);
- efx_ef10_filter_mark_one_old(efx, &vlan->bcast);
- efx_ef10_filter_mark_one_old(efx, &vlan->mcdef);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ efx_ef10_filter_mark_one_old(efx, &vlan->default_filters[i]);
}
/* Mark old filters that may need to be removed.
@@ -4714,6 +4974,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
if (multicast && rollback) {
/* Also need an Ethernet broadcast filter */
+ EFX_WARN_ON_PARANOID(vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO, filter_flags, 0);
eth_broadcast_addr(baddr);
efx_filter_set_eth_local(&spec, vlan->vid, baddr);
@@ -4730,9 +4992,8 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
}
return rc;
} else {
- EFX_WARN_ON_PARANOID(vlan->bcast !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
@@ -4741,6 +5002,7 @@ static int efx_ef10_filter_insert_addr_list(struct efx_nic *efx,
static int efx_ef10_filter_insert_def(struct efx_nic *efx,
struct efx_ef10_filter_vlan *vlan,
+ enum efx_encap_type encap_type,
bool multicast, bool rollback)
{
struct efx_ef10_nic_data *nic_data = efx->nic_data;
@@ -4748,6 +5010,7 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
struct efx_filter_spec spec;
u8 baddr[ETH_ALEN];
int rc;
+ u16 *id;
filter_flags = efx_rss_enabled(efx) ? EFX_FILTER_FLAG_RX_RSS : 0;
@@ -4758,19 +5021,75 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
else
efx_filter_set_uc_def(&spec);
+ if (encap_type) {
+ if (nic_data->datapath_caps &
+ (1 << MC_CMD_GET_CAPABILITIES_OUT_VXLAN_NVGRE_LBN))
+ efx_filter_set_encap_type(&spec, encap_type);
+ else
+ /* don't insert encap filters on non-supporting
+ * platforms. ID will be left as INVALID.
+ */
+ return 0;
+ }
+
if (vlan->vid != EFX_FILTER_VID_UNSPEC)
efx_filter_set_eth_local(&spec, vlan->vid, NULL);
rc = efx_ef10_filter_insert(efx, &spec, true);
if (rc < 0) {
- netif_printk(efx, drv, rc == -EPERM ? KERN_DEBUG : KERN_WARNING,
- efx->net_dev,
- "%scast mismatch filter insert failed rc=%d\n",
- multicast ? "Multi" : "Uni", rc);
+ const char *um = multicast ? "Multicast" : "Unicast";
+ const char *encap_name = "";
+ const char *encap_ipv = "";
+
+ if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_VXLAN)
+ encap_name = "VXLAN ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_NVGRE)
+ encap_name = "NVGRE ";
+ else if ((encap_type & EFX_ENCAP_TYPES_MASK) ==
+ EFX_ENCAP_TYPE_GENEVE)
+ encap_name = "GENEVE ";
+ if (encap_type & EFX_ENCAP_FLAG_IPV6)
+ encap_ipv = "IPv6 ";
+ else if (encap_type)
+ encap_ipv = "IPv4 ";
+
+ /* unprivileged functions can't insert mismatch filters
+ * for encapsulated or unicast traffic, so downgrade
+ * those warnings to debug.
+ */
+ netif_cond_dbg(efx, drv, efx->net_dev,
+ rc == -EPERM && (encap_type || !multicast), warn,
+ "%s%s%s mismatch filter insert failed rc=%d\n",
+ encap_name, encap_ipv, um, rc);
} else if (multicast) {
- EFX_WARN_ON_PARANOID(vlan->mcdef != EFX_EF10_FILTER_ID_INVALID);
- vlan->mcdef = efx_ef10_filter_get_unsafe_id(efx, rc);
- if (!nic_data->workaround_26807) {
+ /* mapping from encap types to default filter IDs (multicast) */
+ static enum efx_ef10_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_MCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_MCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_MCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_MCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type >= ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = efx_ef10_filter_get_unsafe_id(efx, rc);
+ if (!nic_data->workaround_26807 && !encap_type) {
/* Also need an Ethernet broadcast filter */
efx_filter_init_rx(&spec, EFX_FILTER_PRI_AUTO,
filter_flags, 0);
@@ -4785,20 +5104,44 @@ static int efx_ef10_filter_insert_def(struct efx_nic *efx,
/* Roll back the mc_def filter */
efx_ef10_filter_remove_unsafe(
efx, EFX_FILTER_PRI_AUTO,
- vlan->mcdef);
- vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+ *id);
+ *id = EFX_EF10_FILTER_ID_INVALID;
return rc;
}
} else {
- EFX_WARN_ON_PARANOID(vlan->bcast !=
- EFX_EF10_FILTER_ID_INVALID);
- vlan->bcast = efx_ef10_filter_get_unsafe_id(efx, rc);
+ EFX_WARN_ON_PARANOID(
+ vlan->default_filters[EFX_EF10_BCAST] !=
+ EFX_EF10_FILTER_ID_INVALID);
+ vlan->default_filters[EFX_EF10_BCAST] =
+ efx_ef10_filter_get_unsafe_id(efx, rc);
}
}
rc = 0;
} else {
- EFX_WARN_ON_PARANOID(vlan->ucdef != EFX_EF10_FILTER_ID_INVALID);
- vlan->ucdef = rc;
+ /* mapping from encap types to default filter IDs (unicast) */
+ static enum efx_ef10_default_filters map[] = {
+ [EFX_ENCAP_TYPE_NONE] = EFX_EF10_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN] = EFX_EF10_VXLAN4_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE] = EFX_EF10_NVGRE4_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE] = EFX_EF10_GENEVE4_UCDEF,
+ [EFX_ENCAP_TYPE_VXLAN | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_VXLAN6_UCDEF,
+ [EFX_ENCAP_TYPE_NVGRE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_NVGRE6_UCDEF,
+ [EFX_ENCAP_TYPE_GENEVE | EFX_ENCAP_FLAG_IPV6] =
+ EFX_EF10_GENEVE6_UCDEF,
+ };
+
+ /* quick bounds check (BCAST result impossible) */
+ BUILD_BUG_ON(EFX_EF10_BCAST != 0);
+ if (encap_type > ARRAY_SIZE(map) || map[encap_type] == 0) {
+ WARN_ON(1);
+ return -EINVAL;
+ }
+ /* then follow map */
+ id = &vlan->default_filters[map[encap_type]];
+ EFX_WARN_ON_PARANOID(*id != EFX_EF10_FILTER_ID_INVALID);
+ *id = rc;
rc = 0;
}
return rc;
@@ -4921,7 +5264,8 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* Insert/renew unicast filters */
if (table->uc_promisc) {
- efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NONE,
+ false, false);
efx_ef10_filter_insert_addr_list(efx, vlan, false, false);
} else {
/* If any of the filters failed to insert, fall back to
@@ -4929,8 +5273,25 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
* our individual unicast filters.
*/
if (efx_ef10_filter_insert_addr_list(efx, vlan, false, false))
- efx_ef10_filter_insert_def(efx, vlan, false, false);
+ efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ false, false);
}
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ false, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ false, false);
/* Insert/renew multicast filters */
/* If changing promiscuous state with cascaded multicast filters, remove
@@ -4944,7 +5305,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* If we failed to insert promiscuous filters, rollback
* and fall back to individual multicast filters
*/
- if (efx_ef10_filter_insert_def(efx, vlan, true, true)) {
+ if (efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true)) {
/* Changing promisc state, so remove old filters */
efx_ef10_filter_remove_old(efx);
efx_ef10_filter_insert_addr_list(efx, vlan,
@@ -4954,7 +5317,9 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* If we failed to insert promiscuous filters, don't
* rollback. Regardless, also insert the mc_list
*/
- efx_ef10_filter_insert_def(efx, vlan, true, false);
+ efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, false);
efx_ef10_filter_insert_addr_list(efx, vlan, true, false);
}
} else {
@@ -4967,11 +5332,28 @@ static void efx_ef10_filter_vlan_sync_rx_mode(struct efx_nic *efx,
/* Changing promisc state, so remove old filters */
if (nic_data->workaround_26807)
efx_ef10_filter_remove_old(efx);
- if (efx_ef10_filter_insert_def(efx, vlan, true, true))
+ if (efx_ef10_filter_insert_def(efx, vlan,
+ EFX_ENCAP_TYPE_NONE,
+ true, true))
efx_ef10_filter_insert_addr_list(efx, vlan,
true, false);
}
}
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_VXLAN |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_NVGRE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE,
+ true, false);
+ efx_ef10_filter_insert_def(efx, vlan, EFX_ENCAP_TYPE_GENEVE |
+ EFX_ENCAP_FLAG_IPV6,
+ true, false);
}
/* Caller must hold efx->filter_sem for read if race against
@@ -5058,9 +5440,8 @@ static int efx_ef10_filter_add_vlan(struct efx_nic *efx, u16 vid)
vlan->uc[i] = EFX_EF10_FILTER_ID_INVALID;
for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
vlan->mc[i] = EFX_EF10_FILTER_ID_INVALID;
- vlan->ucdef = EFX_EF10_FILTER_ID_INVALID;
- vlan->bcast = EFX_EF10_FILTER_ID_INVALID;
- vlan->mcdef = EFX_EF10_FILTER_ID_INVALID;
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ vlan->default_filters[i] = EFX_EF10_FILTER_ID_INVALID;
list_add_tail(&vlan->list, &table->vlan_list);
@@ -5087,9 +5468,10 @@ static void efx_ef10_filter_del_vlan_internal(struct efx_nic *efx,
for (i = 0; i < ARRAY_SIZE(vlan->mc); i++)
efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
vlan->mc[i]);
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->ucdef);
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->bcast);
- efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO, vlan->mcdef);
+ for (i = 0; i < EFX_EF10_NUM_DEFAULT_FILTERS; i++)
+ if (vlan->default_filters[i] != EFX_EF10_FILTER_ID_INVALID)
+ efx_ef10_filter_remove_unsafe(efx, EFX_FILTER_PRI_AUTO,
+ vlan->default_filters[i]);
kfree(vlan);
}
@@ -5621,6 +6003,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_vf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -5678,6 +6061,7 @@ const struct efx_nic_type efx_hunt_a0_vf_nic_type = {
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
+ .rx_hash_key_size = 40,
};
const struct efx_nic_type efx_hunt_a0_nic_type = {
@@ -5728,6 +6112,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.tx_write = efx_ef10_tx_write,
.tx_limit_len = efx_ef10_tx_limit_len,
.rx_push_rss_config = efx_ef10_pf_rx_push_rss_config,
+ .rx_pull_rss_config = efx_ef10_rx_pull_rss_config,
.rx_probe = efx_ef10_rx_probe,
.rx_init = efx_ef10_rx_init,
.rx_remove = efx_ef10_rx_remove,
@@ -5796,6 +6181,7 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.rx_ts_offset = ES_DZ_RX_PREFIX_TSTAMP_OFST,
.can_rx_scatter = true,
.always_rx_scatter = true,
+ .option_descriptors = true,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << ERF_DD_EVQ_IND_TIMER_VAL_WIDTH,
.offload_features = EF10_OFFLOAD_FEATURES,
@@ -5803,4 +6189,5 @@ const struct efx_nic_type efx_hunt_a0_nic_type = {
.max_rx_ip_filters = HUNT_FILTER_TBL_ROWS,
.hwtstamp_filters = 1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_ALL,
+ .rx_hash_key_size = 40,
};
diff --git a/drivers/net/ethernet/sfc/ef10_sriov.c b/drivers/net/ethernet/sfc/ef10_sriov.c
index a55c53d..ed4b142 100644
--- a/drivers/net/ethernet/sfc/ef10_sriov.c
+++ b/drivers/net/ethernet/sfc/ef10_sriov.c
@@ -6,6 +6,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation, incorporated herein by reference.
*/
+#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/module.h>
#include "net_driver.h"
@@ -554,7 +555,7 @@ int efx_ef10_sriov_set_vf_mac(struct efx_nic *efx, int vf_i, u8 *mac)
return 0;
fail:
- memset(vf->mac, 0, ETH_ALEN);
+ eth_zero_addr(vf->mac);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index 543fa48..466c028 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -331,7 +331,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
* since efx_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
- napi_complete(napi);
+ napi_complete_done(napi, spent);
efx_nic_eventq_read_ack(channel);
}
@@ -2334,8 +2334,8 @@ static int efx_set_features(struct net_device *net_dev, netdev_features_t data)
return 0;
}
-int efx_get_phys_port_id(struct net_device *net_dev,
- struct netdev_phys_item_id *ppid)
+static int efx_get_phys_port_id(struct net_device *net_dev,
+ struct netdev_phys_item_id *ppid)
{
struct efx_nic *efx = netdev_priv(net_dev);
diff --git a/drivers/net/ethernet/sfc/ethtool.c b/drivers/net/ethernet/sfc/ethtool.c
index 18ebaea..adddf70 100644
--- a/drivers/net/ethernet/sfc/ethtool.c
+++ b/drivers/net/ethernet/sfc/ethtool.c
@@ -1278,15 +1278,29 @@ static u32 efx_ethtool_get_rxfh_indir_size(struct net_device *net_dev)
return (efx->n_rx_channels == 1) ? 0 : ARRAY_SIZE(efx->rx_indir_table);
}
+static u32 efx_ethtool_get_rxfh_key_size(struct net_device *net_dev)
+{
+ struct efx_nic *efx = netdev_priv(net_dev);
+
+ return efx->type->rx_hash_key_size;
+}
+
static int efx_ethtool_get_rxfh(struct net_device *net_dev, u32 *indir, u8 *key,
u8 *hfunc)
{
struct efx_nic *efx = netdev_priv(net_dev);
+ int rc;
+
+ rc = efx->type->rx_pull_rss_config(efx);
+ if (rc)
+ return rc;
if (hfunc)
*hfunc = ETH_RSS_HASH_TOP;
if (indir)
memcpy(indir, efx->rx_indir_table, sizeof(efx->rx_indir_table));
+ if (key)
+ memcpy(key, efx->rx_hash_key, efx->type->rx_hash_key_size);
return 0;
}
@@ -1295,14 +1309,18 @@ static int efx_ethtool_set_rxfh(struct net_device *net_dev, const u32 *indir,
{
struct efx_nic *efx = netdev_priv(net_dev);
- /* We do not allow change in unsupported parameters */
- if (key ||
- (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP))
+ /* Hash function is Toeplitz, cannot be changed */
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP;
- if (!indir)
+ if (!indir && !key)
return 0;
- return efx->type->rx_push_rss_config(efx, true, indir);
+ if (!key)
+ key = efx->rx_hash_key;
+ if (!indir)
+ indir = efx->rx_indir_table;
+
+ return efx->type->rx_push_rss_config(efx, true, indir, key);
}
static int efx_ethtool_get_ts_info(struct net_device *net_dev,
@@ -1377,6 +1395,7 @@ const struct ethtool_ops efx_ethtool_ops = {
.get_rxnfc = efx_ethtool_get_rxnfc,
.set_rxnfc = efx_ethtool_set_rxnfc,
.get_rxfh_indir_size = efx_ethtool_get_rxfh_indir_size,
+ .get_rxfh_key_size = efx_ethtool_get_rxfh_key_size,
.get_rxfh = efx_ethtool_get_rxfh,
.set_rxfh = efx_ethtool_set_rxfh,
.get_ts_info = efx_ethtool_get_ts_info,
diff --git a/drivers/net/ethernet/sfc/falcon/efx.c b/drivers/net/ethernet/sfc/falcon/efx.c
index 8cfbe01..c4ff3bb 100644
--- a/drivers/net/ethernet/sfc/falcon/efx.c
+++ b/drivers/net/ethernet/sfc/falcon/efx.c
@@ -327,7 +327,7 @@ static int ef4_poll(struct napi_struct *napi, int budget)
* since ef4_nic_eventq_read_ack() will have no effect if
* interrupts have already been disabled.
*/
- napi_complete(napi);
+ napi_complete_done(napi, spent);
ef4_nic_eventq_read_ack(channel);
}
diff --git a/drivers/net/ethernet/sfc/farch.c b/drivers/net/ethernet/sfc/farch.c
index e4ca216..ba45150 100644
--- a/drivers/net/ethernet/sfc/farch.c
+++ b/drivers/net/ethernet/sfc/farch.c
@@ -1649,6 +1649,22 @@ void efx_farch_rx_push_indir_table(struct efx_nic *efx)
}
}
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx)
+{
+ size_t i = 0;
+ efx_dword_t dword;
+
+ BUILD_BUG_ON(ARRAY_SIZE(efx->rx_indir_table) !=
+ FR_BZ_RX_INDIRECTION_TBL_ROWS);
+
+ for (i = 0; i < FR_BZ_RX_INDIRECTION_TBL_ROWS; i++) {
+ efx_readd(efx, &dword,
+ FR_BZ_RX_INDIRECTION_TBL +
+ FR_BZ_RX_INDIRECTION_TBL_STEP * i);
+ efx->rx_indir_table[i] = EFX_DWORD_FIELD(dword, FRF_BZ_IT_QUEUE);
+ }
+}
+
/* Looks at available SRAM resources and works out how many queues we
* can support, and where things like descriptor caches should live.
*
diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
index d0ed7f7..8189a1c 100644
--- a/drivers/net/ethernet/sfc/filter.h
+++ b/drivers/net/ethernet/sfc/filter.h
@@ -27,6 +27,7 @@
* @EFX_FILTER_MATCH_OUTER_VID: Match by outer VLAN ID
* @EFX_FILTER_MATCH_IP_PROTO: Match by IP transport protocol
* @EFX_FILTER_MATCH_LOC_MAC_IG: Match by local MAC address I/G bit.
+ * @EFX_FILTER_MATCH_ENCAP_TYPE: Match by encapsulation type.
* Used for RX default unicast and multicast/broadcast filters.
*
* Only some combinations are supported, depending on NIC type:
@@ -54,6 +55,7 @@ enum efx_filter_match_flags {
EFX_FILTER_MATCH_OUTER_VID = 0x0100,
EFX_FILTER_MATCH_IP_PROTO = 0x0200,
EFX_FILTER_MATCH_LOC_MAC_IG = 0x0400,
+ EFX_FILTER_MATCH_ENCAP_TYPE = 0x0800,
};
/**
@@ -98,6 +100,26 @@ enum efx_filter_flags {
EFX_FILTER_FLAG_TX = 0x10,
};
+/** enum efx_encap_type - types of encapsulation
+ * @EFX_ENCAP_TYPE_NONE: no encapsulation
+ * @EFX_ENCAP_TYPE_VXLAN: VXLAN encapsulation
+ * @EFX_ENCAP_TYPE_NVGRE: NVGRE encapsulation
+ * @EFX_ENCAP_TYPE_GENEVE: GENEVE encapsulation
+ * @EFX_ENCAP_FLAG_IPV6: indicates IPv6 outer frame
+ *
+ * Contains both enumerated types and flags.
+ * To get just the type, OR with @EFX_ENCAP_TYPES_MASK.
+ */
+enum efx_encap_type {
+ EFX_ENCAP_TYPE_NONE = 0,
+ EFX_ENCAP_TYPE_VXLAN = 1,
+ EFX_ENCAP_TYPE_NVGRE = 2,
+ EFX_ENCAP_TYPE_GENEVE = 3,
+
+ EFX_ENCAP_TYPES_MASK = 7,
+ EFX_ENCAP_FLAG_IPV6 = 8,
+};
+
/**
* struct efx_filter_spec - specification for a hardware filter
* @match_flags: Match type flags, from &enum efx_filter_match_flags
@@ -118,6 +140,8 @@ enum efx_filter_flags {
* @rem_host: Remote IP host to match, if %EFX_FILTER_MATCH_REM_HOST is set
* @loc_port: Local TCP/UDP port to match, if %EFX_FILTER_MATCH_LOC_PORT is set
* @rem_port: Remote TCP/UDP port to match, if %EFX_FILTER_MATCH_REM_PORT is set
+ * @encap_type: Encapsulation type to match (from &enum efx_encap_type), if
+ * %EFX_FILTER_MATCH_ENCAP_TYPE is set
*
* The efx_filter_init_rx() or efx_filter_init_tx() function *must* be
* used to initialise the structure. The efx_filter_set_*() functions
@@ -144,7 +168,8 @@ struct efx_filter_spec {
__be32 rem_host[4];
__be16 loc_port;
__be16 rem_port;
- /* total 64 bytes */
+ u32 encap_type:4;
+ /* total 65 bytes */
};
enum {
@@ -269,4 +294,18 @@ static inline int efx_filter_set_mc_def(struct efx_filter_spec *spec)
return 0;
}
+static inline void efx_filter_set_encap_type(struct efx_filter_spec *spec,
+ enum efx_encap_type encap_type)
+{
+ spec->match_flags |= EFX_FILTER_MATCH_ENCAP_TYPE;
+ spec->encap_type = encap_type;
+}
+
+static inline enum efx_encap_type efx_filter_get_encap_type(
+ const struct efx_filter_spec *spec)
+{
+ if (spec->match_flags & EFX_FILTER_MATCH_ENCAP_TYPE)
+ return spec->encap_type;
+ return EFX_ENCAP_TYPE_NONE;
+}
#endif /* EFX_FILTER_H */
diff --git a/drivers/net/ethernet/sfc/mcdi.c b/drivers/net/ethernet/sfc/mcdi.c
index 9956513..24b271b 100644
--- a/drivers/net/ethernet/sfc/mcdi.c
+++ b/drivers/net/ethernet/sfc/mcdi.c
@@ -837,11 +837,9 @@ static int _efx_mcdi_rpc(struct efx_nic *efx, unsigned int cmd,
outbuf, outlen, outlen_actual,
quiet, NULL, raw_rc);
} else {
- netif_printk(efx, hw,
- rc == -EPERM ? KERN_DEBUG : KERN_ERR,
- efx->net_dev,
- "MC command 0x%x failed after proxy auth rc=%d\n",
- cmd, rc);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+ "MC command 0x%x failed after proxy auth rc=%d\n",
+ cmd, rc);
if (rc == -EINTR || rc == -EIO)
efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
@@ -1084,10 +1082,9 @@ void efx_mcdi_display_error(struct efx_nic *efx, unsigned cmd,
code = MCDI_DWORD(outbuf, ERR_CODE);
if (outlen >= MC_CMD_ERR_ARG_OFST + 4)
err_arg = MCDI_DWORD(outbuf, ERR_ARG);
- netif_printk(efx, hw, rc == -EPERM ? KERN_DEBUG : KERN_ERR,
- efx->net_dev,
- "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
- cmd, inlen, rc, code, err_arg);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -EPERM, err,
+ "MC command 0x%x inlen %zu failed rc=%d (raw=%d) arg=%d\n",
+ cmd, inlen, rc, code, err_arg);
}
/* Switch to polled MCDI completions. This can be called in various
@@ -2057,8 +2054,8 @@ fail:
/* Older firmware lacks GET_WORKAROUNDS and this isn't especially
* terrifying. The call site will have to deal with it though.
*/
- netif_printk(efx, hw, rc == -ENOSYS ? KERN_DEBUG : KERN_ERR,
- efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
+ netif_cond_dbg(efx, hw, efx->net_dev, rc == -ENOSYS, err,
+ "%s: failed rc=%d\n", __func__, rc);
return rc;
}
diff --git a/drivers/net/ethernet/sfc/net_driver.h b/drivers/net/ethernet/sfc/net_driver.h
index 49db9e8..5927c20 100644
--- a/drivers/net/ethernet/sfc/net_driver.h
+++ b/drivers/net/ethernet/sfc/net_driver.h
@@ -208,6 +208,12 @@ struct efx_tx_buffer {
* @write_count: Current write pointer
* This is the number of buffers that have been added to the
* hardware ring.
+ * @packet_write_count: Completable write pointer
+ * This is the write pointer of the last packet written.
+ * Normally this will equal @write_count, but as option descriptors
+ * don't produce completion events, they won't update this.
+ * Filled in iff @efx->type->option_descriptors; only used for PIO.
+ * Thus, this is written and used on EF10, and neither on farch.
* @old_read_count: The value of read_count when last checked.
* This is here for performance reasons. The xmit path will
* only get the up-to-date value of read_count if this
@@ -255,6 +261,7 @@ struct efx_tx_queue {
/* Members used only on the xmit path */
unsigned int insert_count ____cacheline_aligned_in_smp;
unsigned int write_count;
+ unsigned int packet_write_count;
unsigned int old_read_count;
unsigned int tso_bursts;
unsigned int tso_long_headers;
@@ -1174,6 +1181,7 @@ struct efx_mtd_partition {
* @tx_remove: Free resources for TX queue
* @tx_write: Write TX descriptors and doorbell
* @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
+ * @rx_pull_rss_config: Read RSS hash key and indirection table back from the NIC
* @rx_probe: Allocate resources for RX queue
* @rx_init: Initialise RX queue on the NIC
* @rx_remove: Free resources for RX queue
@@ -1237,6 +1245,7 @@ struct efx_mtd_partition {
* @rx_buffer_padding: Size of padding at end of RX packet
* @can_rx_scatter: NIC is able to scatter packets to multiple buffers
* @always_rx_scatter: NIC will always scatter packets to multiple buffers
+ * @option_descriptors: NIC supports TX option descriptors
* @max_interrupt_mode: Highest capability interrupt mode supported
* from &enum efx_init_mode.
* @timer_period_max: Maximum period of interrupt timer (in ticks)
@@ -1303,7 +1312,8 @@ struct efx_nic_type {
unsigned int (*tx_limit_len)(struct efx_tx_queue *tx_queue,
dma_addr_t dma_addr, unsigned int len);
int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table);
+ const u32 *rx_indir_table, const u8 *key);
+ int (*rx_pull_rss_config)(struct efx_nic *efx);
int (*rx_probe)(struct efx_rx_queue *rx_queue);
void (*rx_init)(struct efx_rx_queue *rx_queue);
void (*rx_remove)(struct efx_rx_queue *rx_queue);
@@ -1395,12 +1405,14 @@ struct efx_nic_type {
unsigned int rx_buffer_padding;
bool can_rx_scatter;
bool always_rx_scatter;
+ bool option_descriptors;
unsigned int max_interrupt_mode;
unsigned int timer_period_max;
netdev_features_t offload_features;
int mcdi_max_ver;
unsigned int max_rx_ip_filters;
u32 hwtstamp_filters;
+ unsigned int rx_hash_key_size;
};
/**************************************************************************
diff --git a/drivers/net/ethernet/sfc/nic.h b/drivers/net/ethernet/sfc/nic.h
index 2237746..85cf131 100644
--- a/drivers/net/ethernet/sfc/nic.h
+++ b/drivers/net/ethernet/sfc/nic.h
@@ -85,6 +85,17 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
}
+/* Report whether the NIC considers this TX queue empty, using
+ * packet_write_count (the write count recorded for the last completable
+ * doorbell push). May return false negative. EF10 only, which is OK
+ * because only EF10 supports PIO.
+ */
+static inline bool efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue)
+{
+ EFX_WARN_ON_ONCE_PARANOID(!tx_queue->efx->type->option_descriptors);
+ return __efx_nic_tx_is_empty(tx_queue, tx_queue->packet_write_count);
+}
+
/* Decide whether we can use TX PIO, ie. write packet data directly into
* a buffer on the device. This can reduce latency at the expense of
* throughput, so we only do this if both hardware and software TX rings
@@ -94,9 +105,9 @@ static inline bool __efx_nic_tx_is_empty(struct efx_tx_queue *tx_queue,
static inline bool efx_nic_may_tx_pio(struct efx_tx_queue *tx_queue)
{
struct efx_tx_queue *partner = efx_tx_queue_partner(tx_queue);
- return tx_queue->piobuf &&
- __efx_nic_tx_is_empty(tx_queue, tx_queue->insert_count) &&
- __efx_nic_tx_is_empty(partner, partner->insert_count);
+
+ return tx_queue->piobuf && efx_nic_tx_is_empty(tx_queue) &&
+ efx_nic_tx_is_empty(partner);
}
/* Decide whether to push a TX descriptor to the NIC vs merely writing
@@ -332,6 +343,7 @@ enum {
* @pio_write_base: Base address for writing PIO buffers
* @pio_write_vi_base: Relative VI number for @pio_write_base
* @piobuf_handle: Handle of each PIO buffer allocated
+ * @piobuf_size: size of a single PIO buffer
* @must_restore_piobufs: Flag: PIO buffers have yet to be restored after MC
* reboot
* @rx_rss_context: Firmware handle for our RSS context
@@ -369,6 +381,7 @@ struct efx_ef10_nic_data {
void __iomem *wc_membase, *pio_write_base;
unsigned int pio_write_vi_base;
unsigned int piobuf_handle[EF10_TX_PIOBUF_COUNT];
+ u16 piobuf_size;
bool must_restore_piobufs;
u32 rx_rss_context;
bool rx_rss_context_exclusive;
@@ -613,6 +626,7 @@ void efx_farch_dimension_resources(struct efx_nic *efx, unsigned sram_lim_qw);
void efx_farch_init_common(struct efx_nic *efx);
void efx_ef10_handle_drain_event(struct efx_nic *efx);
void efx_farch_rx_push_indir_table(struct efx_nic *efx);
+void efx_farch_rx_pull_indir_table(struct efx_nic *efx);
int efx_nic_alloc_buffer(struct efx_nic *efx, struct efx_buffer *buffer,
unsigned int len, gfp_t gfp_flags);
diff --git a/drivers/net/ethernet/sfc/siena.c b/drivers/net/ethernet/sfc/siena.c
index 4e54e5d..af7cd85 100644
--- a/drivers/net/ethernet/sfc/siena.c
+++ b/drivers/net/ethernet/sfc/siena.c
@@ -332,12 +332,33 @@ fail1:
return rc;
}
+static int siena_rx_pull_rss_config(struct efx_nic *efx)
+{
+ efx_oword_t temp;
+
+ /* Read from IPv6 RSS key as that's longer (the IPv4 key is just the
+ * first 128 bits of the same key, assuming it's been set by
+ * siena_rx_push_rss_config, below)
+ */
+ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG1);
+ memcpy(efx->rx_hash_key, &temp, sizeof(temp));
+ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG2);
+ memcpy(efx->rx_hash_key + sizeof(temp), &temp, sizeof(temp));
+ efx_reado(efx, &temp, FR_CZ_RX_RSS_IPV6_REG3);
+ memcpy(efx->rx_hash_key + 2 * sizeof(temp), &temp,
+ FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH / 8);
+ efx_farch_rx_pull_indir_table(efx);
+ return 0;
+}
+
static int siena_rx_push_rss_config(struct efx_nic *efx, bool user,
- const u32 *rx_indir_table)
+ const u32 *rx_indir_table, const u8 *key)
{
efx_oword_t temp;
/* Set hash key for IPv4 */
+ if (key)
+ memcpy(efx->rx_hash_key, key, sizeof(temp));
memcpy(&temp, efx->rx_hash_key, sizeof(temp));
efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY);
@@ -402,7 +423,7 @@ static int siena_init_nic(struct efx_nic *efx)
EFX_RX_USR_BUF_SIZE >> 5);
efx_writeo(efx, &temp, FR_AZ_RX_CFG);
- siena_rx_push_rss_config(efx, false, efx->rx_indir_table);
+ siena_rx_push_rss_config(efx, false, efx->rx_indir_table, NULL);
efx->rss_active = true;
/* Enable event logging */
@@ -979,6 +1000,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.tx_write = efx_farch_tx_write,
.tx_limit_len = efx_farch_tx_limit_len,
.rx_push_rss_config = siena_rx_push_rss_config,
+ .rx_pull_rss_config = siena_rx_pull_rss_config,
.rx_probe = efx_farch_rx_probe,
.rx_init = efx_farch_rx_init,
.rx_remove = efx_farch_rx_remove,
@@ -1044,6 +1066,7 @@ const struct efx_nic_type siena_a0_nic_type = {
.rx_hash_offset = FS_BZ_RX_PREFIX_HASH_OFST,
.rx_buffer_padding = 0,
.can_rx_scatter = true,
+ .option_descriptors = false,
.max_interrupt_mode = EFX_INT_MODE_MSIX,
.timer_period_max = 1 << FRF_CZ_TC_TIMER_VAL_WIDTH,
.offload_features = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -1053,4 +1076,5 @@ const struct efx_nic_type siena_a0_nic_type = {
.hwtstamp_filters = (1 << HWTSTAMP_FILTER_NONE |
1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT |
1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT),
+ .rx_hash_key_size = 16,
};
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index 3c01514..ff88d60 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -28,7 +28,6 @@
#ifdef EFX_USE_PIO
-#define EFX_PIOBUF_SIZE_MAX ER_DZ_TX_PIOBUF_SIZE
#define EFX_PIOBUF_SIZE_DEF ALIGN(256, L1_CACHE_BYTES)
unsigned int efx_piobuf_size __read_mostly = EFX_PIOBUF_SIZE_DEF;
@@ -817,6 +816,7 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
tx_queue->insert_count = 0;
tx_queue->write_count = 0;
+ tx_queue->packet_write_count = 0;
tx_queue->old_write_count = 0;
tx_queue->read_count = 0;
tx_queue->old_read_count = 0;
diff --git a/drivers/net/ethernet/smsc/smc91c92_cs.c b/drivers/net/ethernet/smsc/smc91c92_cs.c
index 6715462..97280da 100644
--- a/drivers/net/ethernet/smsc/smc91c92_cs.c
+++ b/drivers/net/ethernet/smsc/smc91c92_cs.c
@@ -113,6 +113,7 @@ struct smc_private {
struct mii_if_info mii_if;
int duplex;
int rx_ovrn;
+ unsigned long last_rx;
};
/* Special definitions for Megahertz multifunction cards */
@@ -1491,6 +1492,7 @@ static void smc_rx(struct net_device *dev)
if (!(rx_status & RS_ERRORS)) {
/* do stuff to make a new packet */
struct sk_buff *skb;
+ struct smc_private *smc = netdev_priv(dev);
/* Note: packet_length adds 5 or 6 extra bytes here! */
skb = netdev_alloc_skb(dev, packet_length+2);
@@ -1509,7 +1511,7 @@ static void smc_rx(struct net_device *dev)
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
- dev->last_rx = jiffies;
+ smc->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += packet_length;
if (rx_status & RS_MULTICAST)
@@ -1790,7 +1792,7 @@ static void media_check(u_long arg)
}
/* Ignore collisions unless we've had no rx's recently */
- if (time_after(jiffies, dev->last_rx + HZ)) {
+ if (time_after(jiffies, smc->last_rx + HZ)) {
if (smc->tx_err || (smc->media_status & EPH_16COL))
media |= EPH_16COL;
}
diff --git a/drivers/net/ethernet/smsc/smsc9420.c b/drivers/net/ethernet/smsc/smsc9420.c
index 3174aeb..2fa3c1d 100644
--- a/drivers/net/ethernet/smsc/smsc9420.c
+++ b/drivers/net/ethernet/smsc/smsc9420.c
@@ -861,7 +861,7 @@ static int smsc9420_rx_poll(struct napi_struct *napi, int budget)
smsc9420_pci_flush_write(pd);
if (work_done < budget) {
- napi_complete(&pd->napi);
+ napi_complete_done(&pd->napi, work_done);
/* re-enable RX DMA interrupts */
dma_intr_ena = smsc9420_reg_read(pd, DMAC_INTR_ENA);
diff --git a/drivers/net/ethernet/stmicro/Kconfig b/drivers/net/ethernet/stmicro/Kconfig
index 1c1157d..ecd7a5e 100644
--- a/drivers/net/ethernet/stmicro/Kconfig
+++ b/drivers/net/ethernet/stmicro/Kconfig
@@ -7,7 +7,8 @@ config NET_VENDOR_STMICRO
default y
depends on HAS_IOMEM
---help---
- If you have a network (Ethernet) card belonging to this class, say Y.
+ If you have a network (Ethernet) card based on Synopsys Ethernet IP
+ Cores, say Y.
Note that the answer to this question doesn't directly affect the
kernel: saying N will just cause the configurator to skip all
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 99594e3..cfbe363 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -1,5 +1,5 @@
config STMMAC_ETH
- tristate "STMicroelectronics 10/100/1000 Ethernet driver"
+ tristate "STMicroelectronics 10/100/1000/EQOS Ethernet driver"
depends on HAS_IOMEM && HAS_DMA
select MII
select PHYLIB
@@ -7,9 +7,8 @@ config STMMAC_ETH
imply PTP_1588_CLOCK
select RESET_CONTROLLER
---help---
- This is the driver for the Ethernet IPs are built around a
- Synopsys IP Core and only tested on the STMicroelectronics
- platforms.
+ This is the driver for the Ethernet IPs built around a
+ Synopsys IP Core.
if STMMAC_ETH
@@ -152,11 +151,11 @@ config STMMAC_PCI
tristate "STMMAC PCI bus support"
depends on STMMAC_ETH && PCI
---help---
- This is to select the Synopsys DWMAC available on PCI devices,
- if you have a controller with this interface, say Y or M here.
+ This selects the platform specific bus support for the stmmac driver.
+ This driver was tested on XLINX XC2V3000 FF1152AMT0221
+ D1215994A VIRTEX FPGA board and SNPS QoS IPK Prototyping Kit.
- This PCI support is tested on XLINX XC2V3000 FF1152AMT0221
- D1215994A VIRTEX FPGA board.
+ If you have a controller with this interface, say Y or M here.
If unsure, say N.
endif
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
index ffaed1f..8840a36 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c
@@ -35,10 +35,6 @@
#define PRG_ETH0_TXDLY_SHIFT 5
#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5)
-#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT)
-#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT)
/* divider for the result of m250_sel */
#define PRG_ETH0_CLK_M250_DIV_SHIFT 7
@@ -69,6 +65,8 @@ struct meson8b_dwmac {
struct clk_divider m25_div;
struct clk *m25_div_clk;
+
+ u32 tx_delay_ns;
};
static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg,
@@ -179,6 +177,7 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
{
int ret;
unsigned long clk_rate;
+ u8 tx_dly_val;
switch (dwmac->phy_mode) {
case PHY_INTERFACE_MODE_RGMII:
@@ -196,9 +195,13 @@ static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac)
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0,
PRG_ETH0_INVERTED_RMII_CLK, 0);
- /* TX clock delay - all known boards use a 1/4 cycle delay */
+ /* TX clock delay in ns = "8ns / 4 * tx_dly_val" (where
+ * 8ns are exactly one cycle of the 125MHz RGMII TX clock):
+ * 0ns = 0x0, 2ns = 0x1, 4ns = 0x2, 6ns = 0x3
+ */
+ tx_dly_val = dwmac->tx_delay_ns >> 1;
meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK,
- PRG_ETH0_TXDLY_QUARTER);
+ tx_dly_val << PRG_ETH0_TXDLY_SHIFT);
break;
case PHY_INTERFACE_MODE_RMII:
@@ -284,6 +287,11 @@ static int meson8b_dwmac_probe(struct platform_device *pdev)
goto err_remove_config_dt;
}
+ /* use 2ns as fallback since this value was previously hardcoded */
+ if (of_property_read_u32(pdev->dev.of_node, "amlogic,tx-delay-ns",
+ &dwmac->tx_delay_ns))
+ dwmac->tx_delay_ns = 2;
+
ret = meson8b_init_clk(dwmac);
if (ret)
goto err_remove_config_dt;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
index 834f40f..202216c 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac4_core.c
@@ -184,7 +184,7 @@ static void dwmac4_set_eee_pls(struct mac_device_info *hw, int link)
static void dwmac4_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
{
void __iomem *ioaddr = hw->pcsr;
- int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
+ int value = ((tw & 0xffff)) | ((ls & 0x3ff) << 16);
/* Program the timers in the LPI timer control register:
* LS: minimum time (ms) for which the link
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index d481c5f..26a2185 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2684,7 +2684,7 @@ static int stmmac_poll(struct napi_struct *napi, int budget)
work_done = stmmac_rx(priv, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
stmmac_enable_dma_irq(priv);
}
return work_done;
@@ -3305,9 +3305,9 @@ int stmmac_dvr_probe(struct device *device,
(priv->plat->maxmtu >= ndev->min_mtu))
ndev->max_mtu = priv->plat->maxmtu;
else if (priv->plat->maxmtu < ndev->min_mtu)
- netdev_warn(priv->dev,
- "%s: warning: maxmtu having invalid value (%d)\n",
- __func__, priv->plat->maxmtu);
+ dev_warn(priv->device,
+ "%s: warning: maxmtu having invalid value (%d)\n",
+ __func__, priv->plat->maxmtu);
if (flow_ctrl)
priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
@@ -3319,7 +3319,8 @@ int stmmac_dvr_probe(struct device *device,
*/
if ((priv->synopsys_id >= DWMAC_CORE_3_50) && (!priv->plat->riwt_off)) {
priv->use_riwt = 1;
- netdev_info(priv->dev, "Enable RX Mitigation via HW Watchdog Timer\n");
+ dev_info(priv->device,
+ "Enable RX Mitigation via HW Watchdog Timer\n");
}
netif_napi_add(ndev, &priv->napi, stmmac_poll, 64);
@@ -3345,17 +3346,17 @@ int stmmac_dvr_probe(struct device *device,
/* MDIO bus Registration */
ret = stmmac_mdio_register(ndev);
if (ret < 0) {
- netdev_err(priv->dev,
- "%s: MDIO bus (id: %d) registration failed",
- __func__, priv->plat->bus_id);
+ dev_err(priv->device,
+ "%s: MDIO bus (id: %d) registration failed",
+ __func__, priv->plat->bus_id);
goto error_mdio_register;
}
}
ret = register_netdev(ndev);
if (ret) {
- netdev_err(priv->dev, "%s: ERROR %i registering the device\n",
- __func__, ret);
+ dev_err(priv->device, "%s: ERROR %i registering the device\n",
+ __func__, ret);
goto error_netdev_register;
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index ac32f9e..460f94f 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -191,7 +191,7 @@ static int stmmac_dt_phy(struct plat_stmmacenet_data *plat,
for_each_child_of_node(np, plat->mdio_node) {
if (of_device_is_compatible(plat->mdio_node,
"snps,dwmac-mdio"))
- break;
+ break;
}
}
@@ -409,6 +409,7 @@ void stmmac_remove_config_dt(struct platform_device *pdev,
if (of_phy_is_fixed_link(np))
of_phy_deregister_fixed_link(np);
of_node_put(plat->phy_node);
+ of_node_put(plat->mdio_node);
}
#else
struct plat_stmmacenet_data *
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index e557a32..5797805 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3786,7 +3786,7 @@ static int niu_poll(struct napi_struct *napi, int budget)
work_done = niu_poll_core(np, lp, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
niu_ldg_rearm(np, lp, 1);
}
return work_done;
diff --git a/drivers/net/ethernet/sun/sungem.c b/drivers/net/ethernet/sun/sungem.c
index d277e41..5c5952e 100644
--- a/drivers/net/ethernet/sun/sungem.c
+++ b/drivers/net/ethernet/sun/sungem.c
@@ -922,7 +922,7 @@ static int gem_poll(struct napi_struct *napi, int budget)
gp->status = readl(gp->regs + GREG_STAT);
} while (gp->status & GREG_STAT_NAPI);
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
gem_enable_ints(gp);
return work_done;
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index 8878b75..191c8ad 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -860,7 +860,7 @@ int sunvnet_poll_common(struct napi_struct *napi, int budget)
int processed = vnet_event_napi(port, budget);
if (processed < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, processed);
port->rx_event &= ~LDC_EVENT_DATA_READY;
vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
}
diff --git a/drivers/net/ethernet/synopsys/Kconfig b/drivers/net/ethernet/synopsys/Kconfig
deleted file mode 100644
index 8276ee5..0000000
--- a/drivers/net/ethernet/synopsys/Kconfig
+++ /dev/null
@@ -1,27 +0,0 @@
-#
-# Synopsys network device configuration
-#
-
-config NET_VENDOR_SYNOPSYS
- bool "Synopsys devices"
- default y
- ---help---
- If you have a network (Ethernet) device belonging to this class, say Y.
-
- Note that the answer to this question doesn't directly affect the
- kernel: saying N will just cause the configurator to skip all
- the questions about Synopsys devices. If you say Y, you will be asked
- for your specific device in the following questions.
-
-if NET_VENDOR_SYNOPSYS
-
-config SYNOPSYS_DWC_ETH_QOS
- tristate "Sypnopsys DWC Ethernet QOS v4.10a support"
- select PHYLIB
- select CRC32
- select MII
- depends on OF && HAS_DMA
- ---help---
- This driver supports the DWC Ethernet QoS from Synopsys
-
-endif # NET_VENDOR_SYNOPSYS
diff --git a/drivers/net/ethernet/synopsys/Makefile b/drivers/net/ethernet/synopsys/Makefile
deleted file mode 100644
index 7a37572..0000000
--- a/drivers/net/ethernet/synopsys/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the Synopsys network device drivers.
-#
-
-obj-$(CONFIG_SYNOPSYS_DWC_ETH_QOS) += dwc_eth_qos.o
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
deleted file mode 100644
index 467dcc5..0000000
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ /dev/null
@@ -1,2996 +0,0 @@
-/* Synopsys DWC Ethernet Quality-of-Service v4.10a linux driver
- *
- * This is a driver for the Synopsys DWC Ethernet QoS IP version 4.10a (GMAC).
- * This version introduced a lot of changes which breaks backwards
- * compatibility the non-QoS IP from Synopsys (used in the ST Micro drivers).
- * Some fields differ between version 4.00a and 4.10a, mainly the interrupt
- * bit fields. The driver could be made compatible with 4.00, if all relevant
- * HW erratas are handled.
- *
- * The GMAC is highly configurable at synthesis time. This driver has been
- * developed for a subset of the total available feature set. Currently
- * it supports:
- * - TSO
- * - Checksum offload for RX and TX.
- * - Energy efficient ethernet.
- * - GMII phy interface.
- * - The statistics module.
- * - Single RX and TX queue.
- *
- * Copyright (C) 2015 Axis Communications AB.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- */
-
-#include <linux/clk.h>
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/ethtool.h>
-#include <linux/stat.h>
-#include <linux/types.h>
-
-#include <linux/slab.h>
-#include <linux/delay.h>
-#include <linux/mm.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/platform_device.h>
-
-#include <linux/phy.h>
-#include <linux/mii.h>
-#include <linux/dma-mapping.h>
-#include <linux/vmalloc.h>
-
-#include <linux/device.h>
-#include <linux/bitrev.h>
-#include <linux/crc32.h>
-
-#include <linux/of.h>
-#include <linux/interrupt.h>
-#include <linux/clocksource.h>
-#include <linux/net_tstamp.h>
-#include <linux/pm_runtime.h>
-#include <linux/of_net.h>
-#include <linux/of_address.h>
-#include <linux/of_mdio.h>
-#include <linux/timer.h>
-#include <linux/tcp.h>
-
-#define DRIVER_NAME "dwceqos"
-#define DRIVER_DESCRIPTION "Synopsys DWC Ethernet QoS driver"
-#define DRIVER_VERSION "0.9"
-
-#define DWCEQOS_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \
- NETIF_MSG_LINK | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP)
-
-#define DWCEQOS_TX_TIMEOUT 5 /* Seconds */
-
-#define DWCEQOS_LPI_TIMER_MIN 8
-#define DWCEQOS_LPI_TIMER_MAX ((1 << 20) - 1)
-
-#define DWCEQOS_RX_BUF_SIZE 2048
-
-#define DWCEQOS_RX_DCNT 256
-#define DWCEQOS_TX_DCNT 256
-
-#define DWCEQOS_HASH_TABLE_SIZE 64
-
-/* The size field in the DMA descriptor is 14 bits */
-#define BYTES_PER_DMA_DESC 16376
-
-/* Hardware registers */
-#define START_MAC_REG_OFFSET 0x0000
-#define MAX_MAC_REG_OFFSET 0x0bd0
-#define START_MTL_REG_OFFSET 0x0c00
-#define MAX_MTL_REG_OFFSET 0x0d7c
-#define START_DMA_REG_OFFSET 0x1000
-#define MAX_DMA_REG_OFFSET 0x117C
-
-#define REG_SPACE_SIZE 0x1800
-
-/* DMA */
-#define REG_DWCEQOS_DMA_MODE 0x1000
-#define REG_DWCEQOS_DMA_SYSBUS_MODE 0x1004
-#define REG_DWCEQOS_DMA_IS 0x1008
-#define REG_DWCEQOS_DMA_DEBUG_ST0 0x100c
-
-/* DMA channel registers */
-#define REG_DWCEQOS_DMA_CH0_CTRL 0x1100
-#define REG_DWCEQOS_DMA_CH0_TX_CTRL 0x1104
-#define REG_DWCEQOS_DMA_CH0_RX_CTRL 0x1108
-#define REG_DWCEQOS_DMA_CH0_TXDESC_LIST 0x1114
-#define REG_DWCEQOS_DMA_CH0_RXDESC_LIST 0x111c
-#define REG_DWCEQOS_DMA_CH0_TXDESC_TAIL 0x1120
-#define REG_DWCEQOS_DMA_CH0_RXDESC_TAIL 0x1128
-#define REG_DWCEQOS_DMA_CH0_TXDESC_LEN 0x112c
-#define REG_DWCEQOS_DMA_CH0_RXDESC_LEN 0x1130
-#define REG_DWCEQOS_DMA_CH0_IE 0x1134
-#define REG_DWCEQOS_DMA_CH0_CUR_TXDESC 0x1144
-#define REG_DWCEQOS_DMA_CH0_CUR_RXDESC 0x114c
-#define REG_DWCEQOS_DMA_CH0_CUR_TXBUF 0x1154
-#define REG_DWCEQOS_DMA_CH0_CUR_RXBUG 0x115c
-#define REG_DWCEQOS_DMA_CH0_STA 0x1160
-
-#define DWCEQOS_DMA_MODE_TXPR BIT(11)
-#define DWCEQOS_DMA_MODE_DA BIT(1)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_EN_LPI BIT(31)
-#define DWCEQOS_DMA_SYSBUS_MODE_FB BIT(0)
-#define DWCEQOS_DMA_SYSBUS_MODE_AAL BIT(12)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(x) \
- (((x) << 16) & 0x000F0000)
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT 3
-#define DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_MASK GENMASK(19, 16)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(x) \
- (((x) << 24) & 0x0F000000)
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT 3
-#define DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_MASK GENMASK(27, 24)
-
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK GENMASK(7, 1)
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST(x) \
- (((x) << 1) & DWCEQOS_DMA_SYSBUS_MODE_BURST_MASK)
-#define DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT GENMASK(3, 1)
-
-#define DWCEQOS_DMA_CH_CTRL_PBLX8 BIT(16)
-#define DWCEQOS_DMA_CH_CTRL_DSL(x) ((x) << 18)
-
-#define DWCEQOS_DMA_CH_CTRL_PBL(x) ((x) << 16)
-#define DWCEQOS_DMA_CH_CTRL_START BIT(0)
-#define DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(x) ((x) << 1)
-#define DWCEQOS_DMA_CH_TX_OSP BIT(4)
-#define DWCEQOS_DMA_CH_TX_TSE BIT(12)
-
-#define DWCEQOS_DMA_CH0_IE_NIE BIT(15)
-#define DWCEQOS_DMA_CH0_IE_AIE BIT(14)
-#define DWCEQOS_DMA_CH0_IE_RIE BIT(6)
-#define DWCEQOS_DMA_CH0_IE_TIE BIT(0)
-#define DWCEQOS_DMA_CH0_IE_FBEE BIT(12)
-#define DWCEQOS_DMA_CH0_IE_RBUE BIT(7)
-
-#define DWCEQOS_DMA_IS_DC0IS BIT(0)
-#define DWCEQOS_DMA_IS_MTLIS BIT(16)
-#define DWCEQOS_DMA_IS_MACIS BIT(17)
-
-#define DWCEQOS_DMA_CH0_IS_TI BIT(0)
-#define DWCEQOS_DMA_CH0_IS_RI BIT(6)
-#define DWCEQOS_DMA_CH0_IS_RBU BIT(7)
-#define DWCEQOS_DMA_CH0_IS_FBE BIT(12)
-#define DWCEQOS_DMA_CH0_IS_CDE BIT(13)
-#define DWCEQOS_DMA_CH0_IS_AIS BIT(14)
-
-#define DWCEQOS_DMA_CH0_IS_TEB GENMASK(18, 16)
-#define DWCEQOS_DMA_CH0_IS_TX_ERR_READ BIT(16)
-#define DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR BIT(17)
-
-#define DWCEQOS_DMA_CH0_IS_REB GENMASK(21, 19)
-#define DWCEQOS_DMA_CH0_IS_RX_ERR_READ BIT(19)
-#define DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR BIT(20)
-
-/* DMA descriptor bits for RX normal descriptor (read format) */
-#define DWCEQOS_DMA_RDES3_OWN BIT(31)
-#define DWCEQOS_DMA_RDES3_INTE BIT(30)
-#define DWCEQOS_DMA_RDES3_BUF2V BIT(25)
-#define DWCEQOS_DMA_RDES3_BUF1V BIT(24)
-
-/* DMA descriptor bits for RX normal descriptor (write back format) */
-#define DWCEQOS_DMA_RDES1_IPCE BIT(7)
-#define DWCEQOS_DMA_RDES3_ES BIT(15)
-#define DWCEQOS_DMA_RDES3_E_JT BIT(14)
-#define DWCEQOS_DMA_RDES3_PL(x) ((x) & 0x7fff)
-#define DWCEQOS_DMA_RDES1_PT 0x00000007
-#define DWCEQOS_DMA_RDES1_PT_UDP BIT(0)
-#define DWCEQOS_DMA_RDES1_PT_TCP BIT(1)
-#define DWCEQOS_DMA_RDES1_PT_ICMP 0x00000003
-
-/* DMA descriptor bits for TX normal descriptor (read format) */
-#define DWCEQOS_DMA_TDES2_IOC BIT(31)
-#define DWCEQOS_DMA_TDES3_OWN BIT(31)
-#define DWCEQOS_DMA_TDES3_CTXT BIT(30)
-#define DWCEQOS_DMA_TDES3_FD BIT(29)
-#define DWCEQOS_DMA_TDES3_LD BIT(28)
-#define DWCEQOS_DMA_TDES3_CIPH BIT(16)
-#define DWCEQOS_DMA_TDES3_CIPP BIT(17)
-#define DWCEQOS_DMA_TDES3_CA 0x00030000
-#define DWCEQOS_DMA_TDES3_TSE BIT(18)
-#define DWCEQOS_DMA_DES3_THL(x) ((x) << 19)
-#define DWCEQOS_DMA_DES2_B2L(x) ((x) << 16)
-
-#define DWCEQOS_DMA_TDES3_TCMSSV BIT(26)
-
-/* DMA channel states */
-#define DMA_TX_CH_STOPPED 0
-#define DMA_TX_CH_SUSPENDED 6
-
-#define DMA_GET_TX_STATE_CH0(status0) ((status0 & 0xF000) >> 12)
-
-/* MTL */
-#define REG_DWCEQOS_MTL_OPER 0x0c00
-#define REG_DWCEQOS_MTL_DEBUG_ST 0x0c0c
-#define REG_DWCEQOS_MTL_TXQ0_DEBUG_ST 0x0d08
-#define REG_DWCEQOS_MTL_RXQ0_DEBUG_ST 0x0d38
-
-#define REG_DWCEQOS_MTL_IS 0x0c20
-#define REG_DWCEQOS_MTL_TXQ0_OPER 0x0d00
-#define REG_DWCEQOS_MTL_RXQ0_OPER 0x0d30
-#define REG_DWCEQOS_MTL_RXQ0_MIS_CNT 0x0d34
-#define REG_DWCEQOS_MTL_RXQ0_CTRL 0x0d3c
-
-#define REG_DWCEQOS_MTL_Q0_ISCTRL 0x0d2c
-
-#define DWCEQOS_MTL_SCHALG_STRICT 0x00000060
-
-#define DWCEQOS_MTL_TXQ_TXQEN BIT(3)
-#define DWCEQOS_MTL_TXQ_TSF BIT(1)
-#define DWCEQOS_MTL_TXQ_FTQ BIT(0)
-#define DWCEQOS_MTL_TXQ_TTC512 0x00000070
-
-#define DWCEQOS_MTL_TXQ_SIZE(x) ((((x) - 256) & 0xff00) << 8)
-
-#define DWCEQOS_MTL_RXQ_SIZE(x) ((((x) - 256) & 0xff00) << 12)
-#define DWCEQOS_MTL_RXQ_EHFC BIT(7)
-#define DWCEQOS_MTL_RXQ_DIS_TCP_EF BIT(6)
-#define DWCEQOS_MTL_RXQ_FEP BIT(4)
-#define DWCEQOS_MTL_RXQ_FUP BIT(3)
-#define DWCEQOS_MTL_RXQ_RSF BIT(5)
-#define DWCEQOS_MTL_RXQ_RTC32 BIT(0)
-
-/* MAC */
-#define REG_DWCEQOS_MAC_CFG 0x0000
-#define REG_DWCEQOS_MAC_EXT_CFG 0x0004
-#define REG_DWCEQOS_MAC_PKT_FILT 0x0008
-#define REG_DWCEQOS_MAC_WD_TO 0x000c
-#define REG_DWCEQOS_HASTABLE_LO 0x0010
-#define REG_DWCEQOS_HASTABLE_HI 0x0014
-#define REG_DWCEQOS_MAC_IS 0x00b0
-#define REG_DWCEQOS_MAC_IE 0x00b4
-#define REG_DWCEQOS_MAC_STAT 0x00b8
-#define REG_DWCEQOS_MAC_MDIO_ADDR 0x0200
-#define REG_DWCEQOS_MAC_MDIO_DATA 0x0204
-#define REG_DWCEQOS_MAC_MAC_ADDR0_HI 0x0300
-#define REG_DWCEQOS_MAC_MAC_ADDR0_LO 0x0304
-#define REG_DWCEQOS_MAC_RXQ0_CTRL0 0x00a0
-#define REG_DWCEQOS_MAC_HW_FEATURE0 0x011c
-#define REG_DWCEQOS_MAC_HW_FEATURE1 0x0120
-#define REG_DWCEQOS_MAC_HW_FEATURE2 0x0124
-#define REG_DWCEQOS_MAC_HASHTABLE_LO 0x0010
-#define REG_DWCEQOS_MAC_HASHTABLE_HI 0x0014
-#define REG_DWCEQOS_MAC_LPI_CTRL_STATUS 0x00d0
-#define REG_DWCEQOS_MAC_LPI_TIMERS_CTRL 0x00d4
-#define REG_DWCEQOS_MAC_LPI_ENTRY_TIMER 0x00d8
-#define REG_DWCEQOS_MAC_1US_TIC_COUNTER 0x00dc
-#define REG_DWCEQOS_MAC_RX_FLOW_CTRL 0x0090
-#define REG_DWCEQOS_MAC_Q0_TX_FLOW 0x0070
-
-#define DWCEQOS_MAC_CFG_ACS BIT(20)
-#define DWCEQOS_MAC_CFG_JD BIT(17)
-#define DWCEQOS_MAC_CFG_JE BIT(16)
-#define DWCEQOS_MAC_CFG_PS BIT(15)
-#define DWCEQOS_MAC_CFG_FES BIT(14)
-#define DWCEQOS_MAC_CFG_DM BIT(13)
-#define DWCEQOS_MAC_CFG_DO BIT(10)
-#define DWCEQOS_MAC_CFG_TE BIT(1)
-#define DWCEQOS_MAC_CFG_IPC BIT(27)
-#define DWCEQOS_MAC_CFG_RE BIT(0)
-
-#define DWCEQOS_ADDR_HIGH(reg) (0x00000300 + (reg * 8))
-#define DWCEQOS_ADDR_LOW(reg) (0x00000304 + (reg * 8))
-
-#define DWCEQOS_MAC_IS_LPI_INT BIT(5)
-#define DWCEQOS_MAC_IS_MMC_INT BIT(8)
-
-#define DWCEQOS_MAC_RXQ_EN BIT(1)
-#define DWCEQOS_MAC_MAC_ADDR_HI_EN BIT(31)
-#define DWCEQOS_MAC_PKT_FILT_RA BIT(31)
-#define DWCEQOS_MAC_PKT_FILT_HPF BIT(10)
-#define DWCEQOS_MAC_PKT_FILT_SAF BIT(9)
-#define DWCEQOS_MAC_PKT_FILT_SAIF BIT(8)
-#define DWCEQOS_MAC_PKT_FILT_DBF BIT(5)
-#define DWCEQOS_MAC_PKT_FILT_PM BIT(4)
-#define DWCEQOS_MAC_PKT_FILT_DAIF BIT(3)
-#define DWCEQOS_MAC_PKT_FILT_HMC BIT(2)
-#define DWCEQOS_MAC_PKT_FILT_HUC BIT(1)
-#define DWCEQOS_MAC_PKT_FILT_PR BIT(0)
-
-#define DWCEQOS_MAC_MDIO_ADDR_CR(x) (((x & 15)) << 8)
-#define DWCEQOS_MAC_MDIO_ADDR_CR_20 2
-#define DWCEQOS_MAC_MDIO_ADDR_CR_35 3
-#define DWCEQOS_MAC_MDIO_ADDR_CR_60 0
-#define DWCEQOS_MAC_MDIO_ADDR_CR_100 1
-#define DWCEQOS_MAC_MDIO_ADDR_CR_150 4
-#define DWCEQOS_MAC_MDIO_ADDR_CR_250 5
-#define DWCEQOS_MAC_MDIO_ADDR_GOC_READ 0x0000000c
-#define DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE BIT(2)
-#define DWCEQOS_MAC_MDIO_ADDR_GB BIT(0)
-
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEN BIT(0)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIEX BIT(1)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEN BIT(2)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIEX BIT(3)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST BIT(8)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST BIT(9)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN BIT(16)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLS BIT(17)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_PLSEN BIT(18)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA BIT(19)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE BIT(20)
-#define DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE BIT(21)
-
-#define DWCEQOS_MAC_1US_TIC_COUNTER_VAL(x) ((x) & GENMASK(11, 0))
-
-#define DWCEQOS_LPI_CTRL_ENABLE_EEE (DWCEQOS_MAC_LPI_CTRL_STATUS_LPITE | \
- DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA | \
- DWCEQOS_MAC_LPI_CTRL_STATUS_LPIEN)
-
-#define DWCEQOS_MAC_RX_FLOW_CTRL_RFE BIT(0)
-
-#define DWCEQOS_MAC_Q0_TX_FLOW_TFE BIT(1)
-#define DWCEQOS_MAC_Q0_TX_FLOW_PT(time) ((time) << 16)
-#define DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS (0 << 4)
-
-/* Features */
-#define DWCEQOS_MAC_HW_FEATURE0_RXCOESEL BIT(16)
-#define DWCEQOS_MAC_HW_FEATURE0_TXCOESEL BIT(14)
-#define DWCEQOS_MAC_HW_FEATURE0_HDSEL BIT(2)
-#define DWCEQOS_MAC_HW_FEATURE0_EEESEL BIT(13)
-#define DWCEQOS_MAC_HW_FEATURE0_GMIISEL BIT(1)
-#define DWCEQOS_MAC_HW_FEATURE0_MIISEL BIT(0)
-
-#define DWCEQOS_MAC_HW_FEATURE1_TSOEN BIT(18)
-#define DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(x) ((128 << ((x) & 0x7c0)) >> 6)
-#define DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(x) (128 << ((x) & 0x1f))
-
-#define DWCEQOS_MAX_PERFECT_ADDRESSES(feature1) \
- (1 + (((feature1) & 0x1fc0000) >> 18))
-
-#define DWCEQOS_MDIO_PHYADDR(x) (((x) & 0x1f) << 21)
-#define DWCEQOS_MDIO_PHYREG(x) (((x) & 0x1f) << 16)
-
-#define DWCEQOS_DMA_MODE_SWR BIT(0)
-
-#define DWCEQOS_DWCEQOS_RX_BUF_SIZE 2048
-
-/* Mac Management Counters */
-#define REG_DWCEQOS_MMC_CTRL 0x0700
-#define REG_DWCEQOS_MMC_RXIRQ 0x0704
-#define REG_DWCEQOS_MMC_TXIRQ 0x0708
-#define REG_DWCEQOS_MMC_RXIRQMASK 0x070c
-#define REG_DWCEQOS_MMC_TXIRQMASK 0x0710
-
-#define DWCEQOS_MMC_CTRL_CNTRST BIT(0)
-#define DWCEQOS_MMC_CTRL_RSTONRD BIT(2)
-
-#define DWC_MMC_TXLPITRANSCNTR 0x07F0
-#define DWC_MMC_TXLPIUSCNTR 0x07EC
-#define DWC_MMC_TXOVERSIZE_G 0x0778
-#define DWC_MMC_TXVLANPACKETS_G 0x0774
-#define DWC_MMC_TXPAUSEPACKETS 0x0770
-#define DWC_MMC_TXEXCESSDEF 0x076C
-#define DWC_MMC_TXPACKETCOUNT_G 0x0768
-#define DWC_MMC_TXOCTETCOUNT_G 0x0764
-#define DWC_MMC_TXCARRIERERROR 0x0760
-#define DWC_MMC_TXEXCESSCOL 0x075C
-#define DWC_MMC_TXLATECOL 0x0758
-#define DWC_MMC_TXDEFERRED 0x0754
-#define DWC_MMC_TXMULTICOL_G 0x0750
-#define DWC_MMC_TXSINGLECOL_G 0x074C
-#define DWC_MMC_TXUNDERFLOWERROR 0x0748
-#define DWC_MMC_TXBROADCASTPACKETS_GB 0x0744
-#define DWC_MMC_TXMULTICASTPACKETS_GB 0x0740
-#define DWC_MMC_TXUNICASTPACKETS_GB 0x073C
-#define DWC_MMC_TX1024TOMAXOCTETS_GB 0x0738
-#define DWC_MMC_TX512TO1023OCTETS_GB 0x0734
-#define DWC_MMC_TX256TO511OCTETS_GB 0x0730
-#define DWC_MMC_TX128TO255OCTETS_GB 0x072C
-#define DWC_MMC_TX65TO127OCTETS_GB 0x0728
-#define DWC_MMC_TX64OCTETS_GB 0x0724
-#define DWC_MMC_TXMULTICASTPACKETS_G 0x0720
-#define DWC_MMC_TXBROADCASTPACKETS_G 0x071C
-#define DWC_MMC_TXPACKETCOUNT_GB 0x0718
-#define DWC_MMC_TXOCTETCOUNT_GB 0x0714
-
-#define DWC_MMC_RXLPITRANSCNTR 0x07F8
-#define DWC_MMC_RXLPIUSCNTR 0x07F4
-#define DWC_MMC_RXCTRLPACKETS_G 0x07E4
-#define DWC_MMC_RXRCVERROR 0x07E0
-#define DWC_MMC_RXWATCHDOG 0x07DC
-#define DWC_MMC_RXVLANPACKETS_GB 0x07D8
-#define DWC_MMC_RXFIFOOVERFLOW 0x07D4
-#define DWC_MMC_RXPAUSEPACKETS 0x07D0
-#define DWC_MMC_RXOUTOFRANGETYPE 0x07CC
-#define DWC_MMC_RXLENGTHERROR 0x07C8
-#define DWC_MMC_RXUNICASTPACKETS_G 0x07C4
-#define DWC_MMC_RX1024TOMAXOCTETS_GB 0x07C0
-#define DWC_MMC_RX512TO1023OCTETS_GB 0x07BC
-#define DWC_MMC_RX256TO511OCTETS_GB 0x07B8
-#define DWC_MMC_RX128TO255OCTETS_GB 0x07B4
-#define DWC_MMC_RX65TO127OCTETS_GB 0x07B0
-#define DWC_MMC_RX64OCTETS_GB 0x07AC
-#define DWC_MMC_RXOVERSIZE_G 0x07A8
-#define DWC_MMC_RXUNDERSIZE_G 0x07A4
-#define DWC_MMC_RXJABBERERROR 0x07A0
-#define DWC_MMC_RXRUNTERROR 0x079C
-#define DWC_MMC_RXALIGNMENTERROR 0x0798
-#define DWC_MMC_RXCRCERROR 0x0794
-#define DWC_MMC_RXMULTICASTPACKETS_G 0x0790
-#define DWC_MMC_RXBROADCASTPACKETS_G 0x078C
-#define DWC_MMC_RXOCTETCOUNT_G 0x0788
-#define DWC_MMC_RXOCTETCOUNT_GB 0x0784
-#define DWC_MMC_RXPACKETCOUNT_GB 0x0780
-
-static int debug = -1;
-module_param(debug, int, 0);
-MODULE_PARM_DESC(debug, "DWC_eth_qos debug level (0=none,...,16=all)");
-
-/* DMA ring descriptor. These are used as support descriptors for the HW DMA */
-struct ring_desc {
- struct sk_buff *skb;
- dma_addr_t mapping;
- size_t len;
-};
-
-/* DMA hardware descriptor */
-struct dwceqos_dma_desc {
- u32 des0;
- u32 des1;
- u32 des2;
- u32 des3;
-} ____cacheline_aligned;
-
-struct dwceqos_mmc_counters {
- __u64 txlpitranscntr;
- __u64 txpiuscntr;
- __u64 txoversize_g;
- __u64 txvlanpackets_g;
- __u64 txpausepackets;
- __u64 txexcessdef;
- __u64 txpacketcount_g;
- __u64 txoctetcount_g;
- __u64 txcarriererror;
- __u64 txexcesscol;
- __u64 txlatecol;
- __u64 txdeferred;
- __u64 txmulticol_g;
- __u64 txsinglecol_g;
- __u64 txunderflowerror;
- __u64 txbroadcastpackets_gb;
- __u64 txmulticastpackets_gb;
- __u64 txunicastpackets_gb;
- __u64 tx1024tomaxoctets_gb;
- __u64 tx512to1023octets_gb;
- __u64 tx256to511octets_gb;
- __u64 tx128to255octets_gb;
- __u64 tx65to127octets_gb;
- __u64 tx64octets_gb;
- __u64 txmulticastpackets_g;
- __u64 txbroadcastpackets_g;
- __u64 txpacketcount_gb;
- __u64 txoctetcount_gb;
-
- __u64 rxlpitranscntr;
- __u64 rxlpiuscntr;
- __u64 rxctrlpackets_g;
- __u64 rxrcverror;
- __u64 rxwatchdog;
- __u64 rxvlanpackets_gb;
- __u64 rxfifooverflow;
- __u64 rxpausepackets;
- __u64 rxoutofrangetype;
- __u64 rxlengtherror;
- __u64 rxunicastpackets_g;
- __u64 rx1024tomaxoctets_gb;
- __u64 rx512to1023octets_gb;
- __u64 rx256to511octets_gb;
- __u64 rx128to255octets_gb;
- __u64 rx65to127octets_gb;
- __u64 rx64octets_gb;
- __u64 rxoversize_g;
- __u64 rxundersize_g;
- __u64 rxjabbererror;
- __u64 rxrunterror;
- __u64 rxalignmenterror;
- __u64 rxcrcerror;
- __u64 rxmulticastpackets_g;
- __u64 rxbroadcastpackets_g;
- __u64 rxoctetcount_g;
- __u64 rxoctetcount_gb;
- __u64 rxpacketcount_gb;
-};
-
-/* Ethtool statistics */
-
-struct dwceqos_stat {
- const char stat_name[ETH_GSTRING_LEN];
- int offset;
-};
-
-#define STAT_ITEM(name, var) \
- {\
- name,\
- offsetof(struct dwceqos_mmc_counters, var),\
- }
-
-static const struct dwceqos_stat dwceqos_ethtool_stats[] = {
- STAT_ITEM("tx_bytes", txoctetcount_gb),
- STAT_ITEM("tx_packets", txpacketcount_gb),
- STAT_ITEM("tx_unicst_packets", txunicastpackets_gb),
- STAT_ITEM("tx_broadcast_packets", txbroadcastpackets_gb),
- STAT_ITEM("tx_multicast_packets", txmulticastpackets_gb),
- STAT_ITEM("tx_pause_packets", txpausepackets),
- STAT_ITEM("tx_up_to_64_byte_packets", tx64octets_gb),
- STAT_ITEM("tx_65_to_127_byte_packets", tx65to127octets_gb),
- STAT_ITEM("tx_128_to_255_byte_packets", tx128to255octets_gb),
- STAT_ITEM("tx_256_to_511_byte_packets", tx256to511octets_gb),
- STAT_ITEM("tx_512_to_1023_byte_packets", tx512to1023octets_gb),
- STAT_ITEM("tx_1024_to_maxsize_packets", tx1024tomaxoctets_gb),
- STAT_ITEM("tx_underflow_errors", txunderflowerror),
- STAT_ITEM("tx_lpi_count", txlpitranscntr),
-
- STAT_ITEM("rx_bytes", rxoctetcount_gb),
- STAT_ITEM("rx_packets", rxpacketcount_gb),
- STAT_ITEM("rx_unicast_packets", rxunicastpackets_g),
- STAT_ITEM("rx_broadcast_packets", rxbroadcastpackets_g),
- STAT_ITEM("rx_multicast_packets", rxmulticastpackets_g),
- STAT_ITEM("rx_vlan_packets", rxvlanpackets_gb),
- STAT_ITEM("rx_pause_packets", rxpausepackets),
- STAT_ITEM("rx_up_to_64_byte_packets", rx64octets_gb),
- STAT_ITEM("rx_65_to_127_byte_packets", rx65to127octets_gb),
- STAT_ITEM("rx_128_to_255_byte_packets", rx128to255octets_gb),
- STAT_ITEM("rx_256_to_511_byte_packets", rx256to511octets_gb),
- STAT_ITEM("rx_512_to_1023_byte_packets", rx512to1023octets_gb),
- STAT_ITEM("rx_1024_to_maxsize_packets", rx1024tomaxoctets_gb),
- STAT_ITEM("rx_fifo_overflow_errors", rxfifooverflow),
- STAT_ITEM("rx_oversize_packets", rxoversize_g),
- STAT_ITEM("rx_undersize_packets", rxundersize_g),
- STAT_ITEM("rx_jabbers", rxjabbererror),
- STAT_ITEM("rx_align_errors", rxalignmenterror),
- STAT_ITEM("rx_crc_errors", rxcrcerror),
- STAT_ITEM("rx_lpi_count", rxlpitranscntr),
-};
-
-/* Configuration of AXI bus parameters.
- * These values depend on the parameters set on the MAC core as well
- * as the AXI interconnect.
- */
-struct dwceqos_bus_cfg {
- /* Enable AXI low-power interface. */
- bool en_lpi;
- /* Limit on number of outstanding AXI write requests. */
- u32 write_requests;
- /* Limit on number of outstanding AXI read requests. */
- u32 read_requests;
- /* Bitmap of allowed AXI burst lengths, 4-256 beats. */
- u32 burst_map;
- /* DMA Programmable burst length*/
- u32 tx_pbl;
- u32 rx_pbl;
-};
-
-struct dwceqos_flowcontrol {
- int autoneg;
- int rx;
- int rx_current;
- int tx;
- int tx_current;
-};
-
-struct net_local {
- void __iomem *baseaddr;
- struct clk *phy_ref_clk;
- struct clk *apb_pclk;
-
- struct device_node *phy_node;
- struct net_device *ndev;
- struct platform_device *pdev;
-
- u32 msg_enable;
-
- struct tasklet_struct tx_bdreclaim_tasklet;
- struct workqueue_struct *txtimeout_handler_wq;
- struct work_struct txtimeout_reinit;
-
- phy_interface_t phy_interface;
- struct mii_bus *mii_bus;
-
- unsigned int link;
- unsigned int speed;
- unsigned int duplex;
-
- struct napi_struct napi;
-
- /* DMA Descriptor Areas */
- struct ring_desc *rx_skb;
- struct ring_desc *tx_skb;
-
- struct dwceqos_dma_desc *tx_descs;
- struct dwceqos_dma_desc *rx_descs;
-
- /* DMA Mapped Descriptor areas*/
- dma_addr_t tx_descs_addr;
- dma_addr_t rx_descs_addr;
- dma_addr_t tx_descs_tail_addr;
- dma_addr_t rx_descs_tail_addr;
-
- size_t tx_free;
- size_t tx_next;
- size_t rx_cur;
- size_t tx_cur;
-
- /* Spinlocks for accessing DMA Descriptors */
- spinlock_t tx_lock;
-
- /* Spinlock for register read-modify-writes. */
- spinlock_t hw_lock;
-
- u32 feature0;
- u32 feature1;
- u32 feature2;
-
- struct dwceqos_bus_cfg bus_cfg;
- bool en_tx_lpi_clockgating;
-
- int eee_enabled;
- int eee_active;
- int csr_val;
- u32 gso_size;
-
- struct dwceqos_mmc_counters mmc_counters;
- /* Protect the mmc_counter updates. */
- spinlock_t stats_lock;
- u32 mmc_rx_counters_mask;
- u32 mmc_tx_counters_mask;
-
- struct dwceqos_flowcontrol flowcontrol;
-
- /* Tracks the intermediate state of phy started but hardware
- * init not finished yet.
- */
- bool phy_defer;
-};
-
-static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
- u32 tx_mask);
-
-static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
- unsigned int reg_n);
-static int dwceqos_stop(struct net_device *ndev);
-static int dwceqos_open(struct net_device *ndev);
-static void dwceqos_tx_poll_demand(struct net_local *lp);
-
-static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable);
-static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable);
-
-static void dwceqos_reset_state(struct net_local *lp);
-
-#define dwceqos_read(lp, reg) \
- readl_relaxed(((void __iomem *)((lp)->baseaddr)) + (reg))
-#define dwceqos_write(lp, reg, val) \
- writel_relaxed((val), ((void __iomem *)((lp)->baseaddr)) + (reg))
-
-static void dwceqos_reset_state(struct net_local *lp)
-{
- lp->link = 0;
- lp->speed = 0;
- lp->duplex = DUPLEX_UNKNOWN;
- lp->flowcontrol.rx_current = 0;
- lp->flowcontrol.tx_current = 0;
- lp->eee_active = 0;
- lp->eee_enabled = 0;
-}
-
-static void print_descriptor(struct net_local *lp, int index, int tx)
-{
- struct dwceqos_dma_desc *dd;
-
- if (tx)
- dd = (struct dwceqos_dma_desc *)&lp->tx_descs[index];
- else
- dd = (struct dwceqos_dma_desc *)&lp->rx_descs[index];
-
- pr_info("%s DMA Descriptor #%d@%p Contents:\n", tx ? "TX" : "RX",
- index, dd);
- pr_info("0x%08x 0x%08x 0x%08x 0x%08x\n", dd->des0, dd->des1, dd->des2,
- dd->des3);
-}
-
-static void print_status(struct net_local *lp)
-{
- size_t desci, i;
-
- pr_info("tx_free %zu, tx_cur %zu, tx_next %zu\n", lp->tx_free,
- lp->tx_cur, lp->tx_next);
-
- print_descriptor(lp, lp->rx_cur, 0);
-
- for (desci = (lp->tx_cur - 10) % DWCEQOS_TX_DCNT, i = 0;
- i < DWCEQOS_TX_DCNT;
- ++i) {
- print_descriptor(lp, desci, 1);
- desci = (desci + 1) % DWCEQOS_TX_DCNT;
- }
-
- pr_info("DMA_Debug_Status0: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0));
- pr_info("DMA_CH0_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_DMA_IS));
- pr_info("DMA_CH0_Current_App_TxDesc: 0x%08x\n",
- dwceqos_read(lp, 0x1144));
- pr_info("DMA_CH0_Current_App_TxBuff: 0x%08x\n",
- dwceqos_read(lp, 0x1154));
- pr_info("MTL_Debug_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_MTL_DEBUG_ST));
- pr_info("MTL_TXQ0_Debug_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_MTL_TXQ0_DEBUG_ST));
- pr_info("MTL_RXQ0_Debug_Status: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_DEBUG_ST));
- pr_info("Current TX DMA: 0x%08x, RX DMA: 0x%08x\n",
- dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_TXDESC),
- dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_CUR_RXDESC));
-}
-
-static void dwceqos_mdio_set_csr(struct net_local *lp)
-{
- int rate = clk_get_rate(lp->apb_pclk);
-
- if (rate <= 20000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_20;
- else if (rate <= 35000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_35;
- else if (rate <= 60000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_60;
- else if (rate <= 100000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_100;
- else if (rate <= 150000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_150;
- else if (rate <= 250000000)
- lp->csr_val = DWCEQOS_MAC_MDIO_ADDR_CR_250;
-}
-
-/* Simple MDIO functions implementing mii_bus */
-static int dwceqos_mdio_read(struct mii_bus *bus, int mii_id, int phyreg)
-{
- struct net_local *lp = bus->priv;
- u32 regval;
- int i;
- int data;
-
- regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
- DWCEQOS_MDIO_PHYREG(phyreg) |
- DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
- DWCEQOS_MAC_MDIO_ADDR_GB |
- DWCEQOS_MAC_MDIO_ADDR_GOC_READ;
- dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
-
- for (i = 0; i < 5; ++i) {
- usleep_range(64, 128);
- if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
- DWCEQOS_MAC_MDIO_ADDR_GB))
- break;
- }
-
- data = dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_DATA);
- if (i == 5) {
- netdev_warn(lp->ndev, "MDIO read timed out\n");
- data = 0xffff;
- }
-
- return data & 0xffff;
-}
-
-static int dwceqos_mdio_write(struct mii_bus *bus, int mii_id, int phyreg,
- u16 value)
-{
- struct net_local *lp = bus->priv;
- u32 regval;
- int i;
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_DATA, value);
-
- regval = DWCEQOS_MDIO_PHYADDR(mii_id) |
- DWCEQOS_MDIO_PHYREG(phyreg) |
- DWCEQOS_MAC_MDIO_ADDR_CR(lp->csr_val) |
- DWCEQOS_MAC_MDIO_ADDR_GB |
- DWCEQOS_MAC_MDIO_ADDR_GOC_WRITE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_MDIO_ADDR, regval);
-
- for (i = 0; i < 5; ++i) {
- usleep_range(64, 128);
- if (!(dwceqos_read(lp, REG_DWCEQOS_MAC_MDIO_ADDR) &
- DWCEQOS_MAC_MDIO_ADDR_GB))
- break;
- }
- if (i == 5)
- netdev_warn(lp->ndev, "MDIO write timed out\n");
- return 0;
-}
-
-static int dwceqos_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
-
- if (!netif_running(ndev))
- return -EINVAL;
-
- if (!phydev)
- return -ENODEV;
-
- switch (cmd) {
- case SIOCGMIIPHY:
- case SIOCGMIIREG:
- case SIOCSMIIREG:
- return phy_mii_ioctl(phydev, rq, cmd);
- default:
- dev_info(&lp->pdev->dev, "ioctl %X not implemented.\n", cmd);
- return -EOPNOTSUPP;
- }
-}
-
-static void dwceqos_link_down(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- /* Indicate link down to the LPI state machine */
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval &= ~DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_link_up(struct net_local *lp)
-{
- struct net_device *ndev = lp->ndev;
- u32 regval;
- unsigned long flags;
-
- /* Indicate link up to the LPI state machine */
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_PLS;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-
- lp->eee_active = !phy_init_eee(ndev->phydev, 0);
-
- /* Check for changed EEE capability */
- if (!lp->eee_active && lp->eee_enabled) {
- lp->eee_enabled = 0;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- }
-}
-
-static void dwceqos_set_speed(struct net_local *lp)
-{
- struct net_device *ndev = lp->ndev;
- struct phy_device *phydev = ndev->phydev;
- u32 regval;
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
- regval &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES |
- DWCEQOS_MAC_CFG_DM);
-
- if (phydev->duplex)
- regval |= DWCEQOS_MAC_CFG_DM;
- if (phydev->speed == SPEED_10) {
- regval |= DWCEQOS_MAC_CFG_PS;
- } else if (phydev->speed == SPEED_100) {
- regval |= DWCEQOS_MAC_CFG_PS |
- DWCEQOS_MAC_CFG_FES;
- } else if (phydev->speed != SPEED_1000) {
- netdev_err(lp->ndev,
- "unknown PHY speed %d\n",
- phydev->speed);
- return;
- }
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, regval);
-}
-
-static void dwceqos_adjust_link(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = ndev->phydev;
- int status_change = 0;
-
- if (lp->phy_defer)
- return;
-
- if (phydev->link) {
- if ((lp->speed != phydev->speed) ||
- (lp->duplex != phydev->duplex)) {
- dwceqos_set_speed(lp);
-
- lp->speed = phydev->speed;
- lp->duplex = phydev->duplex;
- status_change = 1;
- }
-
- if (lp->flowcontrol.autoneg) {
- lp->flowcontrol.rx = phydev->pause ||
- phydev->asym_pause;
- lp->flowcontrol.tx = phydev->pause ||
- phydev->asym_pause;
- }
-
- if (lp->flowcontrol.rx != lp->flowcontrol.rx_current) {
- if (netif_msg_link(lp))
- netdev_dbg(ndev, "set rx flow to %d\n",
- lp->flowcontrol.rx);
- dwceqos_set_rx_flowcontrol(lp, lp->flowcontrol.rx);
- lp->flowcontrol.rx_current = lp->flowcontrol.rx;
- }
- if (lp->flowcontrol.tx != lp->flowcontrol.tx_current) {
- if (netif_msg_link(lp))
- netdev_dbg(ndev, "set tx flow to %d\n",
- lp->flowcontrol.tx);
- dwceqos_set_tx_flowcontrol(lp, lp->flowcontrol.tx);
- lp->flowcontrol.tx_current = lp->flowcontrol.tx;
- }
- }
-
- if (phydev->link != lp->link) {
- lp->link = phydev->link;
- status_change = 1;
- }
-
- if (status_change) {
- if (phydev->link) {
- netif_trans_update(lp->ndev);
- dwceqos_link_up(lp);
- } else {
- dwceqos_link_down(lp);
- }
- phy_print_status(phydev);
- }
-}
-
-static int dwceqos_mii_probe(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct phy_device *phydev = NULL;
-
- if (lp->phy_node) {
- phydev = of_phy_connect(lp->ndev,
- lp->phy_node,
- &dwceqos_adjust_link,
- 0,
- lp->phy_interface);
-
- if (!phydev) {
- netdev_err(ndev, "no PHY found\n");
- return -1;
- }
- } else {
- netdev_err(ndev, "no PHY configured\n");
- return -ENODEV;
- }
-
- if (netif_msg_probe(lp))
- phy_attached_info(phydev);
-
- phydev->supported &= PHY_GBIT_FEATURES | SUPPORTED_Pause |
- SUPPORTED_Asym_Pause;
-
- lp->link = 0;
- lp->speed = 0;
- lp->duplex = DUPLEX_UNKNOWN;
- lp->flowcontrol.autoneg = AUTONEG_ENABLE;
-
- return 0;
-}
-
-static void dwceqos_alloc_rxring_desc(struct net_local *lp, int index)
-{
- struct sk_buff *new_skb;
- dma_addr_t new_skb_baddr = 0;
-
- new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
- if (!new_skb) {
- netdev_err(lp->ndev, "alloc_skb error for desc %d\n", index);
- goto err_out;
- }
-
- new_skb_baddr = dma_map_single(lp->ndev->dev.parent,
- new_skb->data, DWCEQOS_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
- netdev_err(lp->ndev, "DMA map error\n");
- dev_kfree_skb(new_skb);
- new_skb = NULL;
- goto err_out;
- }
-
- lp->rx_descs[index].des0 = new_skb_baddr;
- lp->rx_descs[index].des1 = 0;
- lp->rx_descs[index].des2 = 0;
- lp->rx_descs[index].des3 = DWCEQOS_DMA_RDES3_INTE |
- DWCEQOS_DMA_RDES3_BUF1V |
- DWCEQOS_DMA_RDES3_OWN;
-
- lp->rx_skb[index].mapping = new_skb_baddr;
- lp->rx_skb[index].len = DWCEQOS_RX_BUF_SIZE;
-
-err_out:
- lp->rx_skb[index].skb = new_skb;
-}
-
-static void dwceqos_clean_rings(struct net_local *lp)
-{
- int i;
-
- if (lp->rx_skb) {
- for (i = 0; i < DWCEQOS_RX_DCNT; i++) {
- if (lp->rx_skb[i].skb) {
- dma_unmap_single(lp->ndev->dev.parent,
- lp->rx_skb[i].mapping,
- lp->rx_skb[i].len,
- DMA_FROM_DEVICE);
-
- dev_kfree_skb(lp->rx_skb[i].skb);
- lp->rx_skb[i].skb = NULL;
- lp->rx_skb[i].mapping = 0;
- }
- }
- }
-
- if (lp->tx_skb) {
- for (i = 0; i < DWCEQOS_TX_DCNT; i++) {
- if (lp->tx_skb[i].skb) {
- dev_kfree_skb(lp->tx_skb[i].skb);
- lp->tx_skb[i].skb = NULL;
- }
- if (lp->tx_skb[i].mapping) {
- dma_unmap_single(lp->ndev->dev.parent,
- lp->tx_skb[i].mapping,
- lp->tx_skb[i].len,
- DMA_TO_DEVICE);
- lp->tx_skb[i].mapping = 0;
- }
- }
- }
-}
-
-static void dwceqos_descriptor_free(struct net_local *lp)
-{
- int size;
-
- dwceqos_clean_rings(lp);
-
- kfree(lp->tx_skb);
- lp->tx_skb = NULL;
- kfree(lp->rx_skb);
- lp->rx_skb = NULL;
-
- size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
- if (lp->rx_descs) {
- dma_free_coherent(lp->ndev->dev.parent, size,
- (void *)(lp->rx_descs), lp->rx_descs_addr);
- lp->rx_descs = NULL;
- }
-
- size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
- if (lp->tx_descs) {
- dma_free_coherent(lp->ndev->dev.parent, size,
- (void *)(lp->tx_descs), lp->tx_descs_addr);
- lp->tx_descs = NULL;
- }
-}
-
-static int dwceqos_descriptor_init(struct net_local *lp)
-{
- int size;
- u32 i;
-
- lp->gso_size = 0;
-
- lp->tx_skb = NULL;
- lp->rx_skb = NULL;
- lp->rx_descs = NULL;
- lp->tx_descs = NULL;
-
- /* Reset the DMA indexes */
- lp->rx_cur = 0;
- lp->tx_cur = 0;
- lp->tx_next = 0;
- lp->tx_free = DWCEQOS_TX_DCNT;
-
- /* Allocate Ring descriptors */
- size = DWCEQOS_RX_DCNT * sizeof(struct ring_desc);
- lp->rx_skb = kzalloc(size, GFP_KERNEL);
- if (!lp->rx_skb)
- goto err_out;
-
- size = DWCEQOS_TX_DCNT * sizeof(struct ring_desc);
- lp->tx_skb = kzalloc(size, GFP_KERNEL);
- if (!lp->tx_skb)
- goto err_out;
-
- /* Allocate DMA descriptors */
- size = DWCEQOS_RX_DCNT * sizeof(struct dwceqos_dma_desc);
- lp->rx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->rx_descs_addr, GFP_KERNEL);
- if (!lp->rx_descs)
- goto err_out;
- lp->rx_descs_tail_addr = lp->rx_descs_addr +
- sizeof(struct dwceqos_dma_desc) * DWCEQOS_RX_DCNT;
-
- size = DWCEQOS_TX_DCNT * sizeof(struct dwceqos_dma_desc);
- lp->tx_descs = dma_alloc_coherent(lp->ndev->dev.parent, size,
- &lp->tx_descs_addr, GFP_KERNEL);
- if (!lp->tx_descs)
- goto err_out;
- lp->tx_descs_tail_addr = lp->tx_descs_addr +
- sizeof(struct dwceqos_dma_desc) * DWCEQOS_TX_DCNT;
-
- /* Initialize RX Ring Descriptors and buffers */
- for (i = 0; i < DWCEQOS_RX_DCNT; ++i) {
- dwceqos_alloc_rxring_desc(lp, i);
- if (!(lp->rx_skb[lp->rx_cur].skb))
- goto err_out;
- }
-
- /* Initialize TX Descriptors */
- for (i = 0; i < DWCEQOS_TX_DCNT; ++i) {
- lp->tx_descs[i].des0 = 0;
- lp->tx_descs[i].des1 = 0;
- lp->tx_descs[i].des2 = 0;
- lp->tx_descs[i].des3 = 0;
- }
-
- /* Make descriptor writes visible to the DMA. */
- wmb();
-
- return 0;
-
-err_out:
- dwceqos_descriptor_free(lp);
- return -ENOMEM;
-}
-
-static int dwceqos_packet_avail(struct net_local *lp)
-{
- return !(lp->rx_descs[lp->rx_cur].des3 & DWCEQOS_DMA_RDES3_OWN);
-}
-
-static void dwceqos_get_hwfeatures(struct net_local *lp)
-{
- lp->feature0 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE0);
- lp->feature1 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE1);
- lp->feature2 = dwceqos_read(lp, REG_DWCEQOS_MAC_HW_FEATURE2);
-}
-
-static void dwceqos_dma_enable_txirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval |= DWCEQOS_DMA_CH0_IE_TIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_disable_txirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval &= ~DWCEQOS_DMA_CH0_IE_TIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_enable_rxirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval |= DWCEQOS_DMA_CH0_IE_RIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_dma_disable_rxirq(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_IE);
- regval &= ~DWCEQOS_DMA_CH0_IE_RIE;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_enable_mmc_interrupt(struct net_local *lp)
-{
- dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, 0);
- dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, 0);
-}
-
-static int dwceqos_mii_init(struct net_local *lp)
-{
- int ret = -ENXIO;
- struct resource res;
- struct device_node *mdionode;
-
- mdionode = of_get_child_by_name(lp->pdev->dev.of_node, "mdio");
-
- if (!mdionode)
- return 0;
-
- lp->mii_bus = mdiobus_alloc();
- if (!lp->mii_bus) {
- ret = -ENOMEM;
- goto err_out;
- }
-
- lp->mii_bus->name = "DWCEQOS MII bus";
- lp->mii_bus->read = &dwceqos_mdio_read;
- lp->mii_bus->write = &dwceqos_mdio_write;
- lp->mii_bus->priv = lp;
- lp->mii_bus->parent = &lp->pdev->dev;
-
- of_address_to_resource(lp->pdev->dev.of_node, 0, &res);
- snprintf(lp->mii_bus->id, MII_BUS_ID_SIZE, "%.8llx",
- (unsigned long long)res.start);
- if (of_mdiobus_register(lp->mii_bus, mdionode))
- goto err_out_free_mdiobus;
-
- return 0;
-
-err_out_free_mdiobus:
- mdiobus_free(lp->mii_bus);
-err_out:
- of_node_put(mdionode);
- return ret;
-}
-
-/* DMA reset. When issued also resets all MTL and MAC registers as well */
-static void dwceqos_reset_hw(struct net_local *lp)
-{
- /* Wait (at most) 0.5 seconds for DMA reset*/
- int i = 5000;
- u32 reg;
-
- /* Force gigabit to guarantee a TX clock for GMII. */
- reg = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
- reg &= ~(DWCEQOS_MAC_CFG_PS | DWCEQOS_MAC_CFG_FES);
- reg |= DWCEQOS_MAC_CFG_DM;
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, reg);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_MODE, DWCEQOS_DMA_MODE_SWR);
-
- do {
- udelay(100);
- i--;
- reg = dwceqos_read(lp, REG_DWCEQOS_DMA_MODE);
- } while ((reg & DWCEQOS_DMA_MODE_SWR) && i);
- /* We might experience a timeout if the chip clock mux is broken */
- if (!i)
- netdev_err(lp->ndev, "DMA reset timed out!\n");
-}
-
-static void dwceqos_fatal_bus_error(struct net_local *lp, u32 dma_status)
-{
- if (dma_status & DWCEQOS_DMA_CH0_IS_TEB) {
- netdev_err(lp->ndev, "txdma bus error %s %s (status=%08x)\n",
- dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_READ ?
- "read" : "write",
- dma_status & DWCEQOS_DMA_CH0_IS_TX_ERR_DESCR ?
- "descr" : "data",
- dma_status);
-
- print_status(lp);
- }
- if (dma_status & DWCEQOS_DMA_CH0_IS_REB) {
- netdev_err(lp->ndev, "rxdma bus error %s %s (status=%08x)\n",
- dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_READ ?
- "read" : "write",
- dma_status & DWCEQOS_DMA_CH0_IS_RX_ERR_DESCR ?
- "descr" : "data",
- dma_status);
-
- print_status(lp);
- }
-}
-
-static void dwceqos_mmc_interrupt(struct net_local *lp)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&lp->stats_lock, flags);
-
- /* A latched mmc interrupt can not be masked, we must read
- * all the counters with an interrupt pending.
- */
- dwceqos_read_mmc_counters(lp,
- dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQ),
- dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQ));
-
- spin_unlock_irqrestore(&lp->stats_lock, flags);
-}
-
-static void dwceqos_mac_interrupt(struct net_local *lp)
-{
- u32 cause;
-
- cause = dwceqos_read(lp, REG_DWCEQOS_MAC_IS);
-
- if (cause & DWCEQOS_MAC_IS_MMC_INT)
- dwceqos_mmc_interrupt(lp);
-}
-
-static irqreturn_t dwceqos_interrupt(int irq, void *dev_id)
-{
- struct net_device *ndev = dev_id;
- struct net_local *lp = netdev_priv(ndev);
-
- u32 cause;
- u32 dma_status;
- irqreturn_t ret = IRQ_NONE;
-
- cause = dwceqos_read(lp, REG_DWCEQOS_DMA_IS);
- /* DMA Channel 0 Interrupt */
- if (cause & DWCEQOS_DMA_IS_DC0IS) {
- dma_status = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_STA);
-
- /* Transmit Interrupt */
- if (dma_status & DWCEQOS_DMA_CH0_IS_TI) {
- tasklet_schedule(&lp->tx_bdreclaim_tasklet);
- dwceqos_dma_disable_txirq(lp);
- }
-
- /* Receive Interrupt */
- if (dma_status & DWCEQOS_DMA_CH0_IS_RI) {
- /* Disable RX IRQs */
- dwceqos_dma_disable_rxirq(lp);
- napi_schedule(&lp->napi);
- }
-
- /* Fatal Bus Error interrupt */
- if (unlikely(dma_status & DWCEQOS_DMA_CH0_IS_FBE)) {
- dwceqos_fatal_bus_error(lp, dma_status);
-
- /* errata 9000831707 */
- dma_status |= DWCEQOS_DMA_CH0_IS_TEB |
- DWCEQOS_DMA_CH0_IS_REB;
- }
-
- /* Ack all DMA Channel 0 IRQs */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, dma_status);
- ret = IRQ_HANDLED;
- }
-
- if (cause & DWCEQOS_DMA_IS_MTLIS) {
- u32 val = dwceqos_read(lp, REG_DWCEQOS_MTL_Q0_ISCTRL);
-
- dwceqos_write(lp, REG_DWCEQOS_MTL_Q0_ISCTRL, val);
- ret = IRQ_HANDLED;
- }
-
- if (cause & DWCEQOS_DMA_IS_MACIS) {
- dwceqos_mac_interrupt(lp);
- ret = IRQ_HANDLED;
- }
- return ret;
-}
-
-static void dwceqos_set_rx_flowcontrol(struct net_local *lp, bool enable)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL);
- if (enable)
- regval |= DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
- else
- regval &= ~DWCEQOS_MAC_RX_FLOW_CTRL_RFE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_RX_FLOW_CTRL, regval);
-
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_set_tx_flowcontrol(struct net_local *lp, bool enable)
-{
- u32 regval;
- unsigned long flags;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- /* MTL flow control */
- regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
- if (enable)
- regval |= DWCEQOS_MTL_RXQ_EHFC;
- else
- regval &= ~DWCEQOS_MTL_RXQ_EHFC;
-
- dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-
- /* MAC flow control */
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW);
- if (enable)
- regval |= DWCEQOS_MAC_Q0_TX_FLOW_TFE;
- else
- regval &= ~DWCEQOS_MAC_Q0_TX_FLOW_TFE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
-
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_configure_flow_control(struct net_local *lp)
-{
- u32 regval;
- unsigned long flags;
- int RQS, RFD, RFA;
-
- spin_lock_irqsave(&lp->hw_lock, flags);
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MTL_RXQ0_OPER);
-
- /* The queue size is in units of 256 bytes. We want 512 bytes units for
- * the threshold fields.
- */
- RQS = ((regval >> 20) & 0x3FF) + 1;
- RQS /= 2;
-
- /* The thresholds are relative to a full queue, with a bias
- * of 1 KiByte below full.
- */
- RFD = RQS / 2 - 2;
- RFA = RQS / 8 - 2;
-
- regval = (regval & 0xFFF000FF) | (RFD << 14) | (RFA << 8);
-
- if (RFD >= 0 && RFA >= 0) {
- dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
- } else {
- netdev_warn(lp->ndev,
- "FIFO too small for flow control.");
- }
-
- regval = DWCEQOS_MAC_Q0_TX_FLOW_PT(256) |
- DWCEQOS_MAC_Q0_TX_FLOW_PLT_4_SLOTS;
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_Q0_TX_FLOW, regval);
-
- spin_unlock_irqrestore(&lp->hw_lock, flags);
-}
-
-static void dwceqos_configure_clock(struct net_local *lp)
-{
- unsigned long rate_mhz = clk_get_rate(lp->apb_pclk) / 1000000;
-
- BUG_ON(!rate_mhz);
-
- dwceqos_write(lp,
- REG_DWCEQOS_MAC_1US_TIC_COUNTER,
- DWCEQOS_MAC_1US_TIC_COUNTER_VAL(rate_mhz - 1));
-}
-
-static void dwceqos_configure_bus(struct net_local *lp)
-{
- u32 sysbus_reg;
-
- /* N.B. We do not support the Fixed Burst mode because it
- * opens a race window by making HW access to DMA descriptors
- * non-atomic.
- */
-
- sysbus_reg = DWCEQOS_DMA_SYSBUS_MODE_AAL;
-
- if (lp->bus_cfg.en_lpi)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_EN_LPI;
-
- if (lp->bus_cfg.burst_map)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
- lp->bus_cfg.burst_map);
- else
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_BURST(
- DWCEQOS_DMA_SYSBUS_MODE_BURST_DEFAULT);
-
- if (lp->bus_cfg.read_requests)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
- lp->bus_cfg.read_requests - 1);
- else
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT(
- DWCEQOS_DMA_SYSBUS_MODE_RD_OSR_LIMIT_DEFAULT);
-
- if (lp->bus_cfg.write_requests)
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
- lp->bus_cfg.write_requests - 1);
- else
- sysbus_reg |= DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT(
- DWCEQOS_DMA_SYSBUS_MODE_WR_OSR_LIMIT_DEFAULT);
-
- if (netif_msg_hw(lp))
- netdev_dbg(lp->ndev, "SysbusMode %#X\n", sysbus_reg);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_SYSBUS_MODE, sysbus_reg);
-}
-
-static void dwceqos_init_hw(struct net_local *lp)
-{
- struct net_device *ndev = lp->ndev;
- u32 regval;
- u32 buswidth;
- u32 dma_skip;
-
- /* Software reset */
- dwceqos_reset_hw(lp);
-
- dwceqos_configure_bus(lp);
-
- /* Probe data bus width, 32/64/128 bits. */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL, 0xF);
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL);
- buswidth = (regval ^ 0xF) + 1;
-
- /* Cache-align dma descriptors. */
- dma_skip = (sizeof(struct dwceqos_dma_desc) - 16) / buswidth;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_CTRL,
- DWCEQOS_DMA_CH_CTRL_DSL(dma_skip) |
- DWCEQOS_DMA_CH_CTRL_PBLX8);
-
- /* Initialize DMA Channel 0 */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LEN, DWCEQOS_TX_DCNT - 1);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LEN, DWCEQOS_RX_DCNT - 1);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_LIST,
- (u32)lp->tx_descs_addr);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_LIST,
- (u32)lp->rx_descs_addr);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
- lp->tx_descs_tail_addr);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
- lp->rx_descs_tail_addr);
-
- if (lp->bus_cfg.tx_pbl)
- regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.tx_pbl);
- else
- regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
-
- /* Enable TSO if the HW support it */
- if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
- regval |= DWCEQOS_DMA_CH_TX_TSE;
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL, regval);
-
- if (lp->bus_cfg.rx_pbl)
- regval = DWCEQOS_DMA_CH_CTRL_PBL(lp->bus_cfg.rx_pbl);
- else
- regval = DWCEQOS_DMA_CH_CTRL_PBL(2);
-
- regval |= DWCEQOS_DMA_CH_RX_CTRL_BUFSIZE(DWCEQOS_DWCEQOS_RX_BUF_SIZE);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
-
- regval |= DWCEQOS_DMA_CH_CTRL_START;
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RX_CTRL, regval);
-
- /* Initialize MTL Queues */
- regval = DWCEQOS_MTL_SCHALG_STRICT;
- dwceqos_write(lp, REG_DWCEQOS_MTL_OPER, regval);
-
- regval = DWCEQOS_MTL_TXQ_SIZE(
- DWCEQOS_MAC_HW_FEATURE1_TXFIFOSIZE(lp->feature1)) |
- DWCEQOS_MTL_TXQ_TXQEN | DWCEQOS_MTL_TXQ_TSF |
- DWCEQOS_MTL_TXQ_TTC512;
- dwceqos_write(lp, REG_DWCEQOS_MTL_TXQ0_OPER, regval);
-
- regval = DWCEQOS_MTL_RXQ_SIZE(
- DWCEQOS_MAC_HW_FEATURE1_RXFIFOSIZE(lp->feature1)) |
- DWCEQOS_MTL_RXQ_FUP | DWCEQOS_MTL_RXQ_FEP | DWCEQOS_MTL_RXQ_RSF;
- dwceqos_write(lp, REG_DWCEQOS_MTL_RXQ0_OPER, regval);
-
- dwceqos_configure_flow_control(lp);
-
- /* Initialize MAC */
- dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-
- lp->eee_enabled = 0;
-
- dwceqos_configure_clock(lp);
-
- /* MMC counters */
-
- /* probe implemented counters */
- dwceqos_write(lp, REG_DWCEQOS_MMC_RXIRQMASK, ~0u);
- dwceqos_write(lp, REG_DWCEQOS_MMC_TXIRQMASK, ~0u);
- lp->mmc_rx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_RXIRQMASK);
- lp->mmc_tx_counters_mask = dwceqos_read(lp, REG_DWCEQOS_MMC_TXIRQMASK);
-
- dwceqos_write(lp, REG_DWCEQOS_MMC_CTRL, DWCEQOS_MMC_CTRL_CNTRST |
- DWCEQOS_MMC_CTRL_RSTONRD);
- dwceqos_enable_mmc_interrupt(lp);
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE, 0);
- dwceqos_write(lp, REG_DWCEQOS_MAC_IE, 0);
-
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG, DWCEQOS_MAC_CFG_IPC |
- DWCEQOS_MAC_CFG_DM | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
-
- /* Start TX DMA */
- regval = dwceqos_read(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL);
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TX_CTRL,
- regval | DWCEQOS_DMA_CH_CTRL_START);
-
- /* Enable MAC TX/RX */
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_CFG);
- dwceqos_write(lp, REG_DWCEQOS_MAC_CFG,
- regval | DWCEQOS_MAC_CFG_TE | DWCEQOS_MAC_CFG_RE);
-
- lp->phy_defer = false;
- mutex_lock(&ndev->phydev->lock);
- phy_read_status(ndev->phydev);
- dwceqos_adjust_link(lp->ndev);
- mutex_unlock(&ndev->phydev->lock);
-}
-
-static void dwceqos_tx_reclaim(unsigned long data)
-{
- struct net_device *ndev = (struct net_device *)data;
- struct net_local *lp = netdev_priv(ndev);
- unsigned int tx_bytes = 0;
- unsigned int tx_packets = 0;
-
- spin_lock(&lp->tx_lock);
-
- while (lp->tx_free < DWCEQOS_TX_DCNT) {
- struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_cur];
- struct ring_desc *rd = &lp->tx_skb[lp->tx_cur];
-
- /* Descriptor still being held by DMA ? */
- if (dd->des3 & DWCEQOS_DMA_TDES3_OWN)
- break;
-
- if (rd->mapping)
- dma_unmap_single(ndev->dev.parent, rd->mapping, rd->len,
- DMA_TO_DEVICE);
-
- if (unlikely(rd->skb)) {
- ++tx_packets;
- tx_bytes += rd->skb->len;
- dev_consume_skb_any(rd->skb);
- }
-
- rd->skb = NULL;
- rd->mapping = 0;
- lp->tx_free++;
- lp->tx_cur = (lp->tx_cur + 1) % DWCEQOS_TX_DCNT;
-
- if ((dd->des3 & DWCEQOS_DMA_TDES3_LD) &&
- (dd->des3 & DWCEQOS_DMA_RDES3_ES)) {
- if (netif_msg_tx_err(lp))
- netdev_err(ndev, "TX Error, TDES3 = 0x%x\n",
- dd->des3);
- if (netif_msg_hw(lp))
- print_status(lp);
- }
- }
- spin_unlock(&lp->tx_lock);
-
- netdev_completed_queue(ndev, tx_packets, tx_bytes);
-
- dwceqos_dma_enable_txirq(lp);
- netif_wake_queue(ndev);
-}
-
-static int dwceqos_rx(struct net_local *lp, int budget)
-{
- struct sk_buff *skb;
- u32 tot_size = 0;
- unsigned int n_packets = 0;
- unsigned int n_descs = 0;
- u32 len;
-
- struct dwceqos_dma_desc *dd;
- struct sk_buff *new_skb;
- dma_addr_t new_skb_baddr = 0;
-
- while (n_descs < budget) {
- if (!dwceqos_packet_avail(lp))
- break;
-
- new_skb = netdev_alloc_skb(lp->ndev, DWCEQOS_RX_BUF_SIZE);
- if (!new_skb) {
- netdev_err(lp->ndev, "no memory for new sk_buff\n");
- break;
- }
-
- /* Get dma handle of skb->data */
- new_skb_baddr = (u32)dma_map_single(lp->ndev->dev.parent,
- new_skb->data,
- DWCEQOS_RX_BUF_SIZE,
- DMA_FROM_DEVICE);
- if (dma_mapping_error(lp->ndev->dev.parent, new_skb_baddr)) {
- netdev_err(lp->ndev, "DMA map error\n");
- dev_kfree_skb(new_skb);
- break;
- }
-
- /* Read descriptor data after reading owner bit. */
- dma_rmb();
-
- dd = &lp->rx_descs[lp->rx_cur];
- len = DWCEQOS_DMA_RDES3_PL(dd->des3);
- skb = lp->rx_skb[lp->rx_cur].skb;
-
- /* Unmap old buffer */
- dma_unmap_single(lp->ndev->dev.parent,
- lp->rx_skb[lp->rx_cur].mapping,
- lp->rx_skb[lp->rx_cur].len, DMA_FROM_DEVICE);
-
- /* Discard packet on reception error or bad checksum */
- if ((dd->des3 & DWCEQOS_DMA_RDES3_ES) ||
- (dd->des1 & DWCEQOS_DMA_RDES1_IPCE)) {
- dev_kfree_skb(skb);
- skb = NULL;
- } else {
- skb_put(skb, len);
- skb->protocol = eth_type_trans(skb, lp->ndev);
- switch (dd->des1 & DWCEQOS_DMA_RDES1_PT) {
- case DWCEQOS_DMA_RDES1_PT_UDP:
- case DWCEQOS_DMA_RDES1_PT_TCP:
- case DWCEQOS_DMA_RDES1_PT_ICMP:
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- break;
- default:
- skb->ip_summed = CHECKSUM_NONE;
- break;
- }
- }
-
- if (unlikely(!skb)) {
- if (netif_msg_rx_err(lp))
- netdev_dbg(lp->ndev, "rx error: des3=%X\n",
- lp->rx_descs[lp->rx_cur].des3);
- } else {
- tot_size += skb->len;
- n_packets++;
-
- netif_receive_skb(skb);
- }
-
- lp->rx_descs[lp->rx_cur].des0 = new_skb_baddr;
- lp->rx_descs[lp->rx_cur].des1 = 0;
- lp->rx_descs[lp->rx_cur].des2 = 0;
- /* The DMA must observe des0/1/2 written before des3. */
- wmb();
- lp->rx_descs[lp->rx_cur].des3 = DWCEQOS_DMA_RDES3_INTE |
- DWCEQOS_DMA_RDES3_OWN |
- DWCEQOS_DMA_RDES3_BUF1V;
-
- lp->rx_skb[lp->rx_cur].mapping = new_skb_baddr;
- lp->rx_skb[lp->rx_cur].len = DWCEQOS_RX_BUF_SIZE;
- lp->rx_skb[lp->rx_cur].skb = new_skb;
-
- n_descs++;
- lp->rx_cur = (lp->rx_cur + 1) % DWCEQOS_RX_DCNT;
- }
-
- /* Make sure any ownership update is written to the descriptors before
- * DMA wakeup.
- */
- wmb();
-
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_STA, DWCEQOS_DMA_CH0_IS_RI);
- /* Wake up RX by writing tail pointer */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_RXDESC_TAIL,
- lp->rx_descs_tail_addr);
-
- return n_descs;
-}
-
-static int dwceqos_rx_poll(struct napi_struct *napi, int budget)
-{
- struct net_local *lp = container_of(napi, struct net_local, napi);
- int work_done = 0;
-
- work_done = dwceqos_rx(lp, budget - work_done);
-
- if (!dwceqos_packet_avail(lp) && work_done < budget) {
- napi_complete(napi);
- dwceqos_dma_enable_rxirq(lp);
- } else {
- work_done = budget;
- }
-
- return work_done;
-}
-
-/* Reinitialize function if a TX timed out */
-static void dwceqos_reinit_for_txtimeout(struct work_struct *data)
-{
- struct net_local *lp = container_of(data, struct net_local,
- txtimeout_reinit);
-
- netdev_err(lp->ndev, "transmit timeout %d s, resetting...\n",
- DWCEQOS_TX_TIMEOUT);
-
- if (netif_msg_hw(lp))
- print_status(lp);
-
- rtnl_lock();
- dwceqos_stop(lp->ndev);
- dwceqos_open(lp->ndev);
- rtnl_unlock();
-}
-
-/* DT Probing function called by main probe */
-static inline int dwceqos_probe_config_dt(struct platform_device *pdev)
-{
- struct net_device *ndev;
- struct net_local *lp;
- const void *mac_address;
- struct dwceqos_bus_cfg *bus_cfg;
- struct device_node *np = pdev->dev.of_node;
-
- ndev = platform_get_drvdata(pdev);
- lp = netdev_priv(ndev);
- bus_cfg = &lp->bus_cfg;
-
- /* Set the MAC address. */
- mac_address = of_get_mac_address(pdev->dev.of_node);
- if (mac_address)
- ether_addr_copy(ndev->dev_addr, mac_address);
-
- /* These are all optional parameters */
- lp->en_tx_lpi_clockgating = of_property_read_bool(np,
- "snps,en-tx-lpi-clockgating");
- bus_cfg->en_lpi = of_property_read_bool(np, "snps,en-lpi");
- of_property_read_u32(np, "snps,write-requests",
- &bus_cfg->write_requests);
- of_property_read_u32(np, "snps,read-requests", &bus_cfg->read_requests);
- of_property_read_u32(np, "snps,burst-map", &bus_cfg->burst_map);
- of_property_read_u32(np, "snps,txpbl", &bus_cfg->tx_pbl);
- of_property_read_u32(np, "snps,rxpbl", &bus_cfg->rx_pbl);
-
- netdev_dbg(ndev, "BusCfg: lpi:%u wr:%u rr:%u bm:%X rxpbl:%u txpbl:%d\n",
- bus_cfg->en_lpi,
- bus_cfg->write_requests,
- bus_cfg->read_requests,
- bus_cfg->burst_map,
- bus_cfg->rx_pbl,
- bus_cfg->tx_pbl);
-
- return 0;
-}
-
-static int dwceqos_open(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- int res;
-
- dwceqos_reset_state(lp);
- res = dwceqos_descriptor_init(lp);
- if (res) {
- netdev_err(ndev, "Unable to allocate DMA memory, rc %d\n", res);
- return res;
- }
- netdev_reset_queue(ndev);
-
- /* The dwceqos reset state machine requires all phy clocks to complete,
- * hence the unusual init order with phy_start first.
- */
- lp->phy_defer = true;
- phy_start(ndev->phydev);
- dwceqos_init_hw(lp);
- napi_enable(&lp->napi);
-
- netif_start_queue(ndev);
- tasklet_enable(&lp->tx_bdreclaim_tasklet);
-
- /* Enable Interrupts -- do this only after we enable NAPI and the
- * tasklet.
- */
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_IE,
- DWCEQOS_DMA_CH0_IE_NIE |
- DWCEQOS_DMA_CH0_IE_RIE | DWCEQOS_DMA_CH0_IE_TIE |
- DWCEQOS_DMA_CH0_IE_AIE |
- DWCEQOS_DMA_CH0_IE_FBEE);
-
- return 0;
-}
-
-static bool dweqos_is_tx_dma_suspended(struct net_local *lp)
-{
- u32 reg;
-
- reg = dwceqos_read(lp, REG_DWCEQOS_DMA_DEBUG_ST0);
- reg = DMA_GET_TX_STATE_CH0(reg);
-
- return reg == DMA_TX_CH_SUSPENDED;
-}
-
-static void dwceqos_drain_dma(struct net_local *lp)
-{
- /* Wait for all pending TX buffers to be sent. Upper limit based
- * on max frame size on a 10 Mbit link.
- */
- size_t limit = (DWCEQOS_TX_DCNT * 1250) / 100;
-
- while (!dweqos_is_tx_dma_suspended(lp) && limit--)
- usleep_range(100, 200);
-}
-
-static int dwceqos_stop(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
-
- tasklet_disable(&lp->tx_bdreclaim_tasklet);
- napi_disable(&lp->napi);
-
- /* Stop all tx before we drain the tx dma. */
- netif_tx_lock_bh(lp->ndev);
- netif_stop_queue(ndev);
- netif_tx_unlock_bh(lp->ndev);
-
- dwceqos_drain_dma(lp);
- dwceqos_reset_hw(lp);
- phy_stop(ndev->phydev);
-
- dwceqos_descriptor_free(lp);
-
- return 0;
-}
-
-static void dwceqos_dmadesc_set_ctx(struct net_local *lp,
- unsigned short gso_size)
-{
- struct dwceqos_dma_desc *dd = &lp->tx_descs[lp->tx_next];
-
- dd->des0 = 0;
- dd->des1 = 0;
- dd->des2 = gso_size;
- dd->des3 = DWCEQOS_DMA_TDES3_CTXT | DWCEQOS_DMA_TDES3_TCMSSV;
-
- lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-}
-
-static void dwceqos_tx_poll_demand(struct net_local *lp)
-{
- dwceqos_write(lp, REG_DWCEQOS_DMA_CH0_TXDESC_TAIL,
- lp->tx_descs_tail_addr);
-}
-
-struct dwceqos_tx {
- size_t nr_descriptors;
- size_t initial_descriptor;
- size_t last_descriptor;
- size_t prev_gso_size;
- size_t network_header_len;
-};
-
-static void dwceqos_tx_prepare(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- size_t n = 1;
- size_t i;
-
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size)
- ++n;
-
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
-
- n += (skb_frag_size(frag) + BYTES_PER_DMA_DESC - 1) /
- BYTES_PER_DMA_DESC;
- }
-
- tx->nr_descriptors = n;
- tx->initial_descriptor = lp->tx_next;
- tx->last_descriptor = lp->tx_next;
- tx->prev_gso_size = lp->gso_size;
-
- tx->network_header_len = skb_transport_offset(skb);
- if (skb_is_gso(skb))
- tx->network_header_len += tcp_hdrlen(skb);
-}
-
-static int dwceqos_tx_linear(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- struct ring_desc *rd;
- struct dwceqos_dma_desc *dd;
- size_t payload_len;
- dma_addr_t dma_handle;
-
- if (skb_is_gso(skb) && skb_shinfo(skb)->gso_size != lp->gso_size) {
- dwceqos_dmadesc_set_ctx(lp, skb_shinfo(skb)->gso_size);
- lp->gso_size = skb_shinfo(skb)->gso_size;
- }
-
- dma_handle = dma_map_single(lp->ndev->dev.parent, skb->data,
- skb_headlen(skb), DMA_TO_DEVICE);
-
- if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
- netdev_err(lp->ndev, "TX DMA Mapping error\n");
- return -ENOMEM;
- }
-
- rd = &lp->tx_skb[lp->tx_next];
- dd = &lp->tx_descs[lp->tx_next];
-
- rd->skb = NULL;
- rd->len = skb_headlen(skb);
- rd->mapping = dma_handle;
-
- /* Set up DMA Descriptor */
- dd->des0 = dma_handle;
-
- if (skb_is_gso(skb)) {
- payload_len = skb_headlen(skb) - tx->network_header_len;
-
- if (payload_len)
- dd->des1 = dma_handle + tx->network_header_len;
- dd->des2 = tx->network_header_len |
- DWCEQOS_DMA_DES2_B2L(payload_len);
- dd->des3 = DWCEQOS_DMA_TDES3_TSE |
- DWCEQOS_DMA_DES3_THL((tcp_hdrlen(skb) / 4)) |
- (skb->len - tx->network_header_len);
- } else {
- dd->des1 = 0;
- dd->des2 = skb_headlen(skb);
- dd->des3 = skb->len;
-
- switch (skb->ip_summed) {
- case CHECKSUM_PARTIAL:
- dd->des3 |= DWCEQOS_DMA_TDES3_CA;
- case CHECKSUM_NONE:
- case CHECKSUM_UNNECESSARY:
- case CHECKSUM_COMPLETE:
- default:
- break;
- }
- }
-
- dd->des3 |= DWCEQOS_DMA_TDES3_FD;
- if (lp->tx_next != tx->initial_descriptor)
- dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
-
- tx->last_descriptor = lp->tx_next;
- lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
-
- return 0;
-}
-
-static int dwceqos_tx_frags(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- struct ring_desc *rd = NULL;
- struct dwceqos_dma_desc *dd;
- dma_addr_t dma_handle;
- size_t i;
-
- /* Setup more ring and DMA descriptor if the packet is fragmented */
- for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) {
- skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
- size_t frag_size;
- size_t consumed_size;
-
- /* Map DMA Area */
- dma_handle = skb_frag_dma_map(lp->ndev->dev.parent, frag, 0,
- skb_frag_size(frag),
- DMA_TO_DEVICE);
- if (dma_mapping_error(lp->ndev->dev.parent, dma_handle)) {
- netdev_err(lp->ndev, "DMA Mapping error\n");
- return -ENOMEM;
- }
-
- /* order-3 fragments span more than one descriptor. */
- frag_size = skb_frag_size(frag);
- consumed_size = 0;
- while (consumed_size < frag_size) {
- size_t dma_size = min_t(size_t, 16376,
- frag_size - consumed_size);
-
- rd = &lp->tx_skb[lp->tx_next];
- memset(rd, 0, sizeof(*rd));
-
- dd = &lp->tx_descs[lp->tx_next];
-
- /* Set DMA Descriptor fields */
- dd->des0 = dma_handle + consumed_size;
- dd->des1 = 0;
- dd->des2 = dma_size;
-
- if (skb_is_gso(skb))
- dd->des3 = (skb->len - tx->network_header_len);
- else
- dd->des3 = skb->len;
-
- dd->des3 |= DWCEQOS_DMA_TDES3_OWN;
-
- tx->last_descriptor = lp->tx_next;
- lp->tx_next = (lp->tx_next + 1) % DWCEQOS_TX_DCNT;
- consumed_size += dma_size;
- }
-
- rd->len = skb_frag_size(frag);
- rd->mapping = dma_handle;
- }
-
- return 0;
-}
-
-static void dwceqos_tx_finalize(struct sk_buff *skb, struct net_local *lp,
- struct dwceqos_tx *tx)
-{
- lp->tx_descs[tx->last_descriptor].des3 |= DWCEQOS_DMA_TDES3_LD;
- lp->tx_descs[tx->last_descriptor].des2 |= DWCEQOS_DMA_TDES2_IOC;
-
- lp->tx_skb[tx->last_descriptor].skb = skb;
-
- /* Make all descriptor updates visible to the DMA before setting the
- * owner bit.
- */
- wmb();
-
- lp->tx_descs[tx->initial_descriptor].des3 |= DWCEQOS_DMA_TDES3_OWN;
-
- /* Make the owner bit visible before TX wakeup. */
- wmb();
-
- dwceqos_tx_poll_demand(lp);
-}
-
-static void dwceqos_tx_rollback(struct net_local *lp, struct dwceqos_tx *tx)
-{
- size_t i = tx->initial_descriptor;
-
- while (i != lp->tx_next) {
- if (lp->tx_skb[i].mapping)
- dma_unmap_single(lp->ndev->dev.parent,
- lp->tx_skb[i].mapping,
- lp->tx_skb[i].len,
- DMA_TO_DEVICE);
-
- lp->tx_skb[i].mapping = 0;
- lp->tx_skb[i].skb = NULL;
-
- memset(&lp->tx_descs[i], 0, sizeof(lp->tx_descs[i]));
-
- i = (i + 1) % DWCEQOS_TX_DCNT;
- }
-
- lp->tx_next = tx->initial_descriptor;
- lp->gso_size = tx->prev_gso_size;
-}
-
-static int dwceqos_start_xmit(struct sk_buff *skb, struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct dwceqos_tx trans;
- int err;
-
- dwceqos_tx_prepare(skb, lp, &trans);
- if (lp->tx_free < trans.nr_descriptors) {
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
- }
-
- err = dwceqos_tx_linear(skb, lp, &trans);
- if (err)
- goto tx_error;
-
- err = dwceqos_tx_frags(skb, lp, &trans);
- if (err)
- goto tx_error;
-
- WARN_ON(lp->tx_next !=
- ((trans.initial_descriptor + trans.nr_descriptors) %
- DWCEQOS_TX_DCNT));
-
- spin_lock_bh(&lp->tx_lock);
- lp->tx_free -= trans.nr_descriptors;
- dwceqos_tx_finalize(skb, lp, &trans);
- netdev_sent_queue(ndev, skb->len);
- spin_unlock_bh(&lp->tx_lock);
-
- netif_trans_update(ndev);
- return 0;
-
-tx_error:
- dwceqos_tx_rollback(lp, &trans);
- dev_kfree_skb_any(skb);
- return 0;
-}
-
-/* Set MAC address and then update HW accordingly */
-static int dwceqos_set_mac_address(struct net_device *ndev, void *addr)
-{
- struct net_local *lp = netdev_priv(ndev);
- struct sockaddr *hwaddr = (struct sockaddr *)addr;
-
- if (netif_running(ndev))
- return -EBUSY;
-
- if (!is_valid_ether_addr(hwaddr->sa_data))
- return -EADDRNOTAVAIL;
-
- memcpy(ndev->dev_addr, hwaddr->sa_data, ndev->addr_len);
-
- dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
- return 0;
-}
-
-static void dwceqos_tx_timeout(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
-
- queue_work(lp->txtimeout_handler_wq, &lp->txtimeout_reinit);
-}
-
-static void dwceqos_set_umac_addr(struct net_local *lp, unsigned char *addr,
- unsigned int reg_n)
-{
- unsigned long data;
-
- data = (addr[5] << 8) | addr[4];
- dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n),
- data | DWCEQOS_MAC_MAC_ADDR_HI_EN);
- data = (addr[3] << 24) | (addr[2] << 16) | (addr[1] << 8) | addr[0];
- dwceqos_write(lp, DWCEQOS_ADDR_LOW(reg_n), data);
-}
-
-static void dwceqos_disable_umac_addr(struct net_local *lp, unsigned int reg_n)
-{
- /* Do not disable MAC address 0 */
- if (reg_n != 0)
- dwceqos_write(lp, DWCEQOS_ADDR_HIGH(reg_n), 0);
-}
-
-static void dwceqos_set_rx_mode(struct net_device *ndev)
-{
- struct net_local *lp = netdev_priv(ndev);
- u32 regval = 0;
- u32 mc_filter[2];
- int reg = 1;
- struct netdev_hw_addr *ha;
- unsigned int max_mac_addr;
-
- max_mac_addr = DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1);
-
- if (ndev->flags & IFF_PROMISC) {
- regval = DWCEQOS_MAC_PKT_FILT_PR;
- } else if (((netdev_mc_count(ndev) > DWCEQOS_HASH_TABLE_SIZE) ||
- (ndev->flags & IFF_ALLMULTI))) {
- regval = DWCEQOS_MAC_PKT_FILT_PM;
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, 0xffffffff);
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, 0xffffffff);
- } else if (!netdev_mc_empty(ndev)) {
- regval = DWCEQOS_MAC_PKT_FILT_HMC;
- memset(mc_filter, 0, sizeof(mc_filter));
- netdev_for_each_mc_addr(ha, ndev) {
- /* The upper 6 bits of the calculated CRC are used to
- * index the contens of the hash table
- */
- int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
- /* The most significant bit determines the register
- * to use (H/L) while the other 5 bits determine
- * the bit within the register.
- */
- mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
- }
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_LO, mc_filter[0]);
- dwceqos_write(lp, REG_DWCEQOS_HASTABLE_HI, mc_filter[1]);
- }
- if (netdev_uc_count(ndev) > max_mac_addr) {
- regval |= DWCEQOS_MAC_PKT_FILT_PR;
- } else {
- netdev_for_each_uc_addr(ha, ndev) {
- dwceqos_set_umac_addr(lp, ha->addr, reg);
- reg++;
- }
- for (; reg < DWCEQOS_MAX_PERFECT_ADDRESSES(lp->feature1); reg++)
- dwceqos_disable_umac_addr(lp, reg);
- }
- dwceqos_write(lp, REG_DWCEQOS_MAC_PKT_FILT, regval);
-}
-
-#ifdef CONFIG_NET_POLL_CONTROLLER
-static void dwceqos_poll_controller(struct net_device *ndev)
-{
- disable_irq(ndev->irq);
- dwceqos_interrupt(ndev->irq, ndev);
- enable_irq(ndev->irq);
-}
-#endif
-
-static void dwceqos_read_mmc_counters(struct net_local *lp, u32 rx_mask,
- u32 tx_mask)
-{
- if (tx_mask & BIT(27))
- lp->mmc_counters.txlpitranscntr +=
- dwceqos_read(lp, DWC_MMC_TXLPITRANSCNTR);
- if (tx_mask & BIT(26))
- lp->mmc_counters.txpiuscntr +=
- dwceqos_read(lp, DWC_MMC_TXLPIUSCNTR);
- if (tx_mask & BIT(25))
- lp->mmc_counters.txoversize_g +=
- dwceqos_read(lp, DWC_MMC_TXOVERSIZE_G);
- if (tx_mask & BIT(24))
- lp->mmc_counters.txvlanpackets_g +=
- dwceqos_read(lp, DWC_MMC_TXVLANPACKETS_G);
- if (tx_mask & BIT(23))
- lp->mmc_counters.txpausepackets +=
- dwceqos_read(lp, DWC_MMC_TXPAUSEPACKETS);
- if (tx_mask & BIT(22))
- lp->mmc_counters.txexcessdef +=
- dwceqos_read(lp, DWC_MMC_TXEXCESSDEF);
- if (tx_mask & BIT(21))
- lp->mmc_counters.txpacketcount_g +=
- dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_G);
- if (tx_mask & BIT(20))
- lp->mmc_counters.txoctetcount_g +=
- dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_G);
- if (tx_mask & BIT(19))
- lp->mmc_counters.txcarriererror +=
- dwceqos_read(lp, DWC_MMC_TXCARRIERERROR);
- if (tx_mask & BIT(18))
- lp->mmc_counters.txexcesscol +=
- dwceqos_read(lp, DWC_MMC_TXEXCESSCOL);
- if (tx_mask & BIT(17))
- lp->mmc_counters.txlatecol +=
- dwceqos_read(lp, DWC_MMC_TXLATECOL);
- if (tx_mask & BIT(16))
- lp->mmc_counters.txdeferred +=
- dwceqos_read(lp, DWC_MMC_TXDEFERRED);
- if (tx_mask & BIT(15))
- lp->mmc_counters.txmulticol_g +=
- dwceqos_read(lp, DWC_MMC_TXMULTICOL_G);
- if (tx_mask & BIT(14))
- lp->mmc_counters.txsinglecol_g +=
- dwceqos_read(lp, DWC_MMC_TXSINGLECOL_G);
- if (tx_mask & BIT(13))
- lp->mmc_counters.txunderflowerror +=
- dwceqos_read(lp, DWC_MMC_TXUNDERFLOWERROR);
- if (tx_mask & BIT(12))
- lp->mmc_counters.txbroadcastpackets_gb +=
- dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_GB);
- if (tx_mask & BIT(11))
- lp->mmc_counters.txmulticastpackets_gb +=
- dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_GB);
- if (tx_mask & BIT(10))
- lp->mmc_counters.txunicastpackets_gb +=
- dwceqos_read(lp, DWC_MMC_TXUNICASTPACKETS_GB);
- if (tx_mask & BIT(9))
- lp->mmc_counters.tx1024tomaxoctets_gb +=
- dwceqos_read(lp, DWC_MMC_TX1024TOMAXOCTETS_GB);
- if (tx_mask & BIT(8))
- lp->mmc_counters.tx512to1023octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX512TO1023OCTETS_GB);
- if (tx_mask & BIT(7))
- lp->mmc_counters.tx256to511octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX256TO511OCTETS_GB);
- if (tx_mask & BIT(6))
- lp->mmc_counters.tx128to255octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX128TO255OCTETS_GB);
- if (tx_mask & BIT(5))
- lp->mmc_counters.tx65to127octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX65TO127OCTETS_GB);
- if (tx_mask & BIT(4))
- lp->mmc_counters.tx64octets_gb +=
- dwceqos_read(lp, DWC_MMC_TX64OCTETS_GB);
- if (tx_mask & BIT(3))
- lp->mmc_counters.txmulticastpackets_g +=
- dwceqos_read(lp, DWC_MMC_TXMULTICASTPACKETS_G);
- if (tx_mask & BIT(2))
- lp->mmc_counters.txbroadcastpackets_g +=
- dwceqos_read(lp, DWC_MMC_TXBROADCASTPACKETS_G);
- if (tx_mask & BIT(1))
- lp->mmc_counters.txpacketcount_gb +=
- dwceqos_read(lp, DWC_MMC_TXPACKETCOUNT_GB);
- if (tx_mask & BIT(0))
- lp->mmc_counters.txoctetcount_gb +=
- dwceqos_read(lp, DWC_MMC_TXOCTETCOUNT_GB);
-
- if (rx_mask & BIT(27))
- lp->mmc_counters.rxlpitranscntr +=
- dwceqos_read(lp, DWC_MMC_RXLPITRANSCNTR);
- if (rx_mask & BIT(26))
- lp->mmc_counters.rxlpiuscntr +=
- dwceqos_read(lp, DWC_MMC_RXLPIUSCNTR);
- if (rx_mask & BIT(25))
- lp->mmc_counters.rxctrlpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXCTRLPACKETS_G);
- if (rx_mask & BIT(24))
- lp->mmc_counters.rxrcverror +=
- dwceqos_read(lp, DWC_MMC_RXRCVERROR);
- if (rx_mask & BIT(23))
- lp->mmc_counters.rxwatchdog +=
- dwceqos_read(lp, DWC_MMC_RXWATCHDOG);
- if (rx_mask & BIT(22))
- lp->mmc_counters.rxvlanpackets_gb +=
- dwceqos_read(lp, DWC_MMC_RXVLANPACKETS_GB);
- if (rx_mask & BIT(21))
- lp->mmc_counters.rxfifooverflow +=
- dwceqos_read(lp, DWC_MMC_RXFIFOOVERFLOW);
- if (rx_mask & BIT(20))
- lp->mmc_counters.rxpausepackets +=
- dwceqos_read(lp, DWC_MMC_RXPAUSEPACKETS);
- if (rx_mask & BIT(19))
- lp->mmc_counters.rxoutofrangetype +=
- dwceqos_read(lp, DWC_MMC_RXOUTOFRANGETYPE);
- if (rx_mask & BIT(18))
- lp->mmc_counters.rxlengtherror +=
- dwceqos_read(lp, DWC_MMC_RXLENGTHERROR);
- if (rx_mask & BIT(17))
- lp->mmc_counters.rxunicastpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXUNICASTPACKETS_G);
- if (rx_mask & BIT(16))
- lp->mmc_counters.rx1024tomaxoctets_gb +=
- dwceqos_read(lp, DWC_MMC_RX1024TOMAXOCTETS_GB);
- if (rx_mask & BIT(15))
- lp->mmc_counters.rx512to1023octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX512TO1023OCTETS_GB);
- if (rx_mask & BIT(14))
- lp->mmc_counters.rx256to511octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX256TO511OCTETS_GB);
- if (rx_mask & BIT(13))
- lp->mmc_counters.rx128to255octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX128TO255OCTETS_GB);
- if (rx_mask & BIT(12))
- lp->mmc_counters.rx65to127octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX65TO127OCTETS_GB);
- if (rx_mask & BIT(11))
- lp->mmc_counters.rx64octets_gb +=
- dwceqos_read(lp, DWC_MMC_RX64OCTETS_GB);
- if (rx_mask & BIT(10))
- lp->mmc_counters.rxoversize_g +=
- dwceqos_read(lp, DWC_MMC_RXOVERSIZE_G);
- if (rx_mask & BIT(9))
- lp->mmc_counters.rxundersize_g +=
- dwceqos_read(lp, DWC_MMC_RXUNDERSIZE_G);
- if (rx_mask & BIT(8))
- lp->mmc_counters.rxjabbererror +=
- dwceqos_read(lp, DWC_MMC_RXJABBERERROR);
- if (rx_mask & BIT(7))
- lp->mmc_counters.rxrunterror +=
- dwceqos_read(lp, DWC_MMC_RXRUNTERROR);
- if (rx_mask & BIT(6))
- lp->mmc_counters.rxalignmenterror +=
- dwceqos_read(lp, DWC_MMC_RXALIGNMENTERROR);
- if (rx_mask & BIT(5))
- lp->mmc_counters.rxcrcerror +=
- dwceqos_read(lp, DWC_MMC_RXCRCERROR);
- if (rx_mask & BIT(4))
- lp->mmc_counters.rxmulticastpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXMULTICASTPACKETS_G);
- if (rx_mask & BIT(3))
- lp->mmc_counters.rxbroadcastpackets_g +=
- dwceqos_read(lp, DWC_MMC_RXBROADCASTPACKETS_G);
- if (rx_mask & BIT(2))
- lp->mmc_counters.rxoctetcount_g +=
- dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_G);
- if (rx_mask & BIT(1))
- lp->mmc_counters.rxoctetcount_gb +=
- dwceqos_read(lp, DWC_MMC_RXOCTETCOUNT_GB);
- if (rx_mask & BIT(0))
- lp->mmc_counters.rxpacketcount_gb +=
- dwceqos_read(lp, DWC_MMC_RXPACKETCOUNT_GB);
-}
-
-static void
-dwceqos_get_stats64(struct net_device *ndev, struct rtnl_link_stats64 *s)
-{
- unsigned long flags;
- struct net_local *lp = netdev_priv(ndev);
- struct dwceqos_mmc_counters *hwstats = &lp->mmc_counters;
-
- spin_lock_irqsave(&lp->stats_lock, flags);
- dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
- lp->mmc_tx_counters_mask);
- spin_unlock_irqrestore(&lp->stats_lock, flags);
-
- s->rx_packets = hwstats->rxpacketcount_gb;
- s->rx_bytes = hwstats->rxoctetcount_gb;
- s->rx_errors = hwstats->rxpacketcount_gb -
- hwstats->rxbroadcastpackets_g -
- hwstats->rxmulticastpackets_g -
- hwstats->rxunicastpackets_g;
- s->multicast = hwstats->rxmulticastpackets_g;
- s->rx_length_errors = hwstats->rxlengtherror;
- s->rx_crc_errors = hwstats->rxcrcerror;
- s->rx_fifo_errors = hwstats->rxfifooverflow;
-
- s->tx_packets = hwstats->txpacketcount_gb;
- s->tx_bytes = hwstats->txoctetcount_gb;
-
- if (lp->mmc_tx_counters_mask & BIT(21))
- s->tx_errors = hwstats->txpacketcount_gb -
- hwstats->txpacketcount_g;
- else
- s->tx_errors = hwstats->txunderflowerror +
- hwstats->txcarriererror;
-}
-
-static void
-dwceqos_get_drvinfo(struct net_device *ndev, struct ethtool_drvinfo *ed)
-{
- const struct net_local *lp = netdev_priv(ndev);
-
- strcpy(ed->driver, lp->pdev->dev.driver->name);
- strcpy(ed->version, DRIVER_VERSION);
-}
-
-static void dwceqos_get_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pp)
-{
- const struct net_local *lp = netdev_priv(ndev);
-
- pp->autoneg = lp->flowcontrol.autoneg;
- pp->tx_pause = lp->flowcontrol.tx;
- pp->rx_pause = lp->flowcontrol.rx;
-}
-
-static int dwceqos_set_pauseparam(struct net_device *ndev,
- struct ethtool_pauseparam *pp)
-{
- struct net_local *lp = netdev_priv(ndev);
- int ret = 0;
-
- lp->flowcontrol.autoneg = pp->autoneg;
- if (pp->autoneg) {
- ndev->phydev->advertising |= ADVERTISED_Pause;
- ndev->phydev->advertising |= ADVERTISED_Asym_Pause;
- } else {
- ndev->phydev->advertising &= ~ADVERTISED_Pause;
- ndev->phydev->advertising &= ~ADVERTISED_Asym_Pause;
- lp->flowcontrol.rx = pp->rx_pause;
- lp->flowcontrol.tx = pp->tx_pause;
- }
-
- if (netif_running(ndev))
- ret = phy_start_aneg(ndev->phydev);
-
- return ret;
-}
-
-static void dwceqos_get_strings(struct net_device *ndev, u32 stringset,
- u8 *data)
-{
- size_t i;
-
- if (stringset != ETH_SS_STATS)
- return;
-
- for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
- memcpy(data, dwceqos_ethtool_stats[i].stat_name,
- ETH_GSTRING_LEN);
- data += ETH_GSTRING_LEN;
- }
-}
-
-static void dwceqos_get_ethtool_stats(struct net_device *ndev,
- struct ethtool_stats *stats, u64 *data)
-{
- struct net_local *lp = netdev_priv(ndev);
- unsigned long flags;
- size_t i;
- u8 *mmcstat = (u8 *)&lp->mmc_counters;
-
- spin_lock_irqsave(&lp->stats_lock, flags);
- dwceqos_read_mmc_counters(lp, lp->mmc_rx_counters_mask,
- lp->mmc_tx_counters_mask);
- spin_unlock_irqrestore(&lp->stats_lock, flags);
-
- for (i = 0; i < ARRAY_SIZE(dwceqos_ethtool_stats); ++i) {
- memcpy(data,
- mmcstat + dwceqos_ethtool_stats[i].offset,
- sizeof(u64));
- data++;
- }
-}
-
-static int dwceqos_get_sset_count(struct net_device *ndev, int sset)
-{
- if (sset == ETH_SS_STATS)
- return ARRAY_SIZE(dwceqos_ethtool_stats);
-
- return -EOPNOTSUPP;
-}
-
-static void dwceqos_get_regs(struct net_device *dev, struct ethtool_regs *regs,
- void *space)
-{
- const struct net_local *lp = netdev_priv(dev);
- u32 *reg_space = (u32 *)space;
- int reg_offset;
- int reg_ix = 0;
-
- /* MAC registers */
- for (reg_offset = START_MAC_REG_OFFSET;
- reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
- reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
- reg_ix++;
- }
- /* MTL registers */
- for (reg_offset = START_MTL_REG_OFFSET;
- reg_offset <= MAX_MTL_REG_OFFSET; reg_offset += 4) {
- reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
- reg_ix++;
- }
-
- /* DMA registers */
- for (reg_offset = START_DMA_REG_OFFSET;
- reg_offset <= MAX_DMA_REG_OFFSET; reg_offset += 4) {
- reg_space[reg_ix] = dwceqos_read(lp, reg_offset);
- reg_ix++;
- }
-
- BUG_ON(4 * reg_ix > REG_SPACE_SIZE);
-}
-
-static int dwceqos_get_regs_len(struct net_device *dev)
-{
- return REG_SPACE_SIZE;
-}
-
-static inline const char *dwceqos_get_rx_lpi_state(u32 lpi_ctrl)
-{
- return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_RLPIST) ? "on" : "off";
-}
-
-static inline const char *dwceqos_get_tx_lpi_state(u32 lpi_ctrl)
-{
- return (lpi_ctrl & DWCEQOS_MAC_LPI_CTRL_STATUS_TLPIST) ? "on" : "off";
-}
-
-static int dwceqos_get_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct net_local *lp = netdev_priv(ndev);
- u32 lpi_status;
- u32 lpi_enabled;
-
- if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
- return -EOPNOTSUPP;
-
- edata->eee_active = lp->eee_active;
- edata->eee_enabled = lp->eee_enabled;
- edata->tx_lpi_timer = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER);
- lpi_status = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- lpi_enabled = !!(lpi_status & DWCEQOS_MAC_LPI_CTRL_STATUS_LIPTXA);
- edata->tx_lpi_enabled = lpi_enabled;
-
- if (netif_msg_hw(lp)) {
- u32 regval;
-
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
-
- netdev_info(lp->ndev, "MAC LPI State: RX:%s TX:%s\n",
- dwceqos_get_rx_lpi_state(regval),
- dwceqos_get_tx_lpi_state(regval));
- }
-
- return phy_ethtool_get_eee(ndev->phydev, edata);
-}
-
-static int dwceqos_set_eee(struct net_device *ndev, struct ethtool_eee *edata)
-{
- struct net_local *lp = netdev_priv(ndev);
- u32 regval;
- unsigned long flags;
-
- if (!(lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_EEESEL))
- return -EOPNOTSUPP;
-
- if (edata->eee_enabled && !lp->eee_active)
- return -EOPNOTSUPP;
-
- if (edata->tx_lpi_enabled) {
- if (edata->tx_lpi_timer < DWCEQOS_LPI_TIMER_MIN ||
- edata->tx_lpi_timer > DWCEQOS_LPI_TIMER_MAX)
- return -EINVAL;
- }
-
- lp->eee_enabled = edata->eee_enabled;
-
- if (edata->eee_enabled && edata->tx_lpi_enabled) {
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_ENTRY_TIMER,
- edata->tx_lpi_timer);
-
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval |= DWCEQOS_LPI_CTRL_ENABLE_EEE;
- if (lp->en_tx_lpi_clockgating)
- regval |= DWCEQOS_MAC_LPI_CTRL_STATUS_LPITCSE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- } else {
- spin_lock_irqsave(&lp->hw_lock, flags);
- regval = dwceqos_read(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS);
- regval &= ~DWCEQOS_LPI_CTRL_ENABLE_EEE;
- dwceqos_write(lp, REG_DWCEQOS_MAC_LPI_CTRL_STATUS, regval);
- spin_unlock_irqrestore(&lp->hw_lock, flags);
- }
-
- return phy_ethtool_set_eee(ndev->phydev, edata);
-}
-
-static u32 dwceqos_get_msglevel(struct net_device *ndev)
-{
- const struct net_local *lp = netdev_priv(ndev);
-
- return lp->msg_enable;
-}
-
-static void dwceqos_set_msglevel(struct net_device *ndev, u32 msglevel)
-{
- struct net_local *lp = netdev_priv(ndev);
-
- lp->msg_enable = msglevel;
-}
-
-static const struct ethtool_ops dwceqos_ethtool_ops = {
- .get_drvinfo = dwceqos_get_drvinfo,
- .get_link = ethtool_op_get_link,
- .get_pauseparam = dwceqos_get_pauseparam,
- .set_pauseparam = dwceqos_set_pauseparam,
- .get_strings = dwceqos_get_strings,
- .get_ethtool_stats = dwceqos_get_ethtool_stats,
- .get_sset_count = dwceqos_get_sset_count,
- .get_regs = dwceqos_get_regs,
- .get_regs_len = dwceqos_get_regs_len,
- .get_eee = dwceqos_get_eee,
- .set_eee = dwceqos_set_eee,
- .get_msglevel = dwceqos_get_msglevel,
- .set_msglevel = dwceqos_set_msglevel,
- .get_link_ksettings = phy_ethtool_get_link_ksettings,
- .set_link_ksettings = phy_ethtool_set_link_ksettings,
-};
-
-static const struct net_device_ops netdev_ops = {
- .ndo_open = dwceqos_open,
- .ndo_stop = dwceqos_stop,
- .ndo_start_xmit = dwceqos_start_xmit,
- .ndo_set_rx_mode = dwceqos_set_rx_mode,
- .ndo_set_mac_address = dwceqos_set_mac_address,
-#ifdef CONFIG_NET_POLL_CONTROLLER
- .ndo_poll_controller = dwceqos_poll_controller,
-#endif
- .ndo_do_ioctl = dwceqos_ioctl,
- .ndo_tx_timeout = dwceqos_tx_timeout,
- .ndo_get_stats64 = dwceqos_get_stats64,
-};
-
-static const struct of_device_id dwceq_of_match[] = {
- { .compatible = "snps,dwc-qos-ethernet-4.10", },
- {}
-};
-MODULE_DEVICE_TABLE(of, dwceq_of_match);
-
-static int dwceqos_probe(struct platform_device *pdev)
-{
- struct resource *r_mem = NULL;
- struct net_device *ndev;
- struct net_local *lp;
- int ret = -ENXIO;
-
- r_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!r_mem) {
- dev_err(&pdev->dev, "no IO resource defined.\n");
- return -ENXIO;
- }
-
- ndev = alloc_etherdev(sizeof(*lp));
- if (!ndev) {
- dev_err(&pdev->dev, "etherdev allocation failed.\n");
- return -ENOMEM;
- }
-
- SET_NETDEV_DEV(ndev, &pdev->dev);
-
- lp = netdev_priv(ndev);
- lp->ndev = ndev;
- lp->pdev = pdev;
- lp->msg_enable = netif_msg_init(debug, DWCEQOS_MSG_DEFAULT);
-
- spin_lock_init(&lp->tx_lock);
- spin_lock_init(&lp->hw_lock);
- spin_lock_init(&lp->stats_lock);
-
- lp->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
- if (IS_ERR(lp->apb_pclk)) {
- dev_err(&pdev->dev, "apb_pclk clock not found.\n");
- ret = PTR_ERR(lp->apb_pclk);
- goto err_out_free_netdev;
- }
-
- ret = clk_prepare_enable(lp->apb_pclk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable APER clock.\n");
- goto err_out_free_netdev;
- }
-
- lp->baseaddr = devm_ioremap_resource(&pdev->dev, r_mem);
- if (IS_ERR(lp->baseaddr)) {
- dev_err(&pdev->dev, "failed to map baseaddress.\n");
- ret = PTR_ERR(lp->baseaddr);
- goto err_out_clk_dis_aper;
- }
-
- ndev->irq = platform_get_irq(pdev, 0);
- ndev->watchdog_timeo = DWCEQOS_TX_TIMEOUT * HZ;
- ndev->netdev_ops = &netdev_ops;
- ndev->ethtool_ops = &dwceqos_ethtool_ops;
- ndev->base_addr = r_mem->start;
-
- dwceqos_get_hwfeatures(lp);
- dwceqos_mdio_set_csr(lp);
-
- ndev->hw_features = NETIF_F_SG;
-
- if (lp->feature1 & DWCEQOS_MAC_HW_FEATURE1_TSOEN)
- ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
-
- if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_TXCOESEL)
- ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
-
- if (lp->feature0 & DWCEQOS_MAC_HW_FEATURE0_RXCOESEL)
- ndev->hw_features |= NETIF_F_RXCSUM;
-
- ndev->features = ndev->hw_features;
-
- lp->phy_ref_clk = devm_clk_get(&pdev->dev, "phy_ref_clk");
- if (IS_ERR(lp->phy_ref_clk)) {
- dev_err(&pdev->dev, "phy_ref_clk clock not found.\n");
- ret = PTR_ERR(lp->phy_ref_clk);
- goto err_out_clk_dis_aper;
- }
-
- ret = clk_prepare_enable(lp->phy_ref_clk);
- if (ret) {
- dev_err(&pdev->dev, "Unable to enable device clock.\n");
- goto err_out_clk_dis_aper;
- }
-
- lp->phy_node = of_parse_phandle(lp->pdev->dev.of_node,
- "phy-handle", 0);
- if (!lp->phy_node && of_phy_is_fixed_link(lp->pdev->dev.of_node)) {
- ret = of_phy_register_fixed_link(lp->pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&pdev->dev, "invalid fixed-link");
- goto err_out_clk_dis_phy;
- }
-
- lp->phy_node = of_node_get(lp->pdev->dev.of_node);
- }
-
- ret = of_get_phy_mode(lp->pdev->dev.of_node);
- if (ret < 0) {
- dev_err(&lp->pdev->dev, "error in getting phy i/f\n");
- goto err_out_deregister_fixed_link;
- }
-
- lp->phy_interface = ret;
-
- ret = dwceqos_mii_init(lp);
- if (ret) {
- dev_err(&lp->pdev->dev, "error in dwceqos_mii_init\n");
- goto err_out_deregister_fixed_link;
- }
-
- ret = dwceqos_mii_probe(ndev);
- if (ret != 0) {
- netdev_err(ndev, "mii_probe fail.\n");
- ret = -ENXIO;
- goto err_out_deregister_fixed_link;
- }
-
- dwceqos_set_umac_addr(lp, lp->ndev->dev_addr, 0);
-
- tasklet_init(&lp->tx_bdreclaim_tasklet, dwceqos_tx_reclaim,
- (unsigned long)ndev);
- tasklet_disable(&lp->tx_bdreclaim_tasklet);
-
- lp->txtimeout_handler_wq = alloc_workqueue(DRIVER_NAME,
- WQ_MEM_RECLAIM, 0);
- INIT_WORK(&lp->txtimeout_reinit, dwceqos_reinit_for_txtimeout);
-
- platform_set_drvdata(pdev, ndev);
- ret = dwceqos_probe_config_dt(pdev);
- if (ret) {
- dev_err(&lp->pdev->dev, "Unable to retrieve DT, error %d\n",
- ret);
- goto err_out_deregister_fixed_link;
- }
- dev_info(&lp->pdev->dev, "pdev->id %d, baseaddr 0x%08lx, irq %d\n",
- pdev->id, ndev->base_addr, ndev->irq);
-
- ret = devm_request_irq(&pdev->dev, ndev->irq, &dwceqos_interrupt, 0,
- ndev->name, ndev);
- if (ret) {
- dev_err(&lp->pdev->dev, "Unable to request IRQ %d, error %d\n",
- ndev->irq, ret);
- goto err_out_deregister_fixed_link;
- }
-
- if (netif_msg_probe(lp))
- netdev_dbg(ndev, "net_local@%p\n", lp);
-
- netif_napi_add(ndev, &lp->napi, dwceqos_rx_poll, NAPI_POLL_WEIGHT);
-
- ret = register_netdev(ndev);
- if (ret) {
- dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
- goto err_out_deregister_fixed_link;
- }
-
- return 0;
-
-err_out_deregister_fixed_link:
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
-err_out_clk_dis_phy:
- clk_disable_unprepare(lp->phy_ref_clk);
-err_out_clk_dis_aper:
- clk_disable_unprepare(lp->apb_pclk);
-err_out_free_netdev:
- of_node_put(lp->phy_node);
- free_netdev(ndev);
- platform_set_drvdata(pdev, NULL);
- return ret;
-}
-
-static int dwceqos_remove(struct platform_device *pdev)
-{
- struct net_device *ndev = platform_get_drvdata(pdev);
- struct net_local *lp;
-
- if (ndev) {
- lp = netdev_priv(ndev);
-
- if (ndev->phydev) {
- phy_disconnect(ndev->phydev);
- if (of_phy_is_fixed_link(pdev->dev.of_node))
- of_phy_deregister_fixed_link(pdev->dev.of_node);
- }
- mdiobus_unregister(lp->mii_bus);
- mdiobus_free(lp->mii_bus);
-
- unregister_netdev(ndev);
-
- clk_disable_unprepare(lp->phy_ref_clk);
- clk_disable_unprepare(lp->apb_pclk);
-
- free_netdev(ndev);
- }
-
- return 0;
-}
-
-static struct platform_driver dwceqos_driver = {
- .probe = dwceqos_probe,
- .remove = dwceqos_remove,
- .driver = {
- .name = DRIVER_NAME,
- .of_match_table = dwceq_of_match,
- },
-};
-
-module_platform_driver(dwceqos_driver);
-
-MODULE_DESCRIPTION("DWC Ethernet QoS v4.10a driver");
-MODULE_LICENSE("GPL v2");
-MODULE_AUTHOR("Andreas Irestaal <andreas.irestal@axis.com>");
-MODULE_AUTHOR("Lars Persson <lars.persson@axis.com>");
diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c
index baa3e4a..f864fd0 100644
--- a/drivers/net/ethernet/tehuti/tehuti.c
+++ b/drivers/net/ethernet/tehuti/tehuti.c
@@ -303,7 +303,7 @@ static int bdx_poll(struct napi_struct *napi, int budget)
* device lock and allow waiting tasks (eg rmmod) to advance) */
priv->napi_stop = 0;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
bdx_enable_interrupts(priv);
}
return work_done;
diff --git a/drivers/net/ethernet/ti/cpmac.c b/drivers/net/ethernet/ti/cpmac.c
index 77c88fc..9b8a30b 100644
--- a/drivers/net/ethernet/ti/cpmac.c
+++ b/drivers/net/ethernet/ti/cpmac.c
@@ -1210,7 +1210,7 @@ int cpmac_init(void)
goto fail_alloc;
}
-#warning FIXME: unhardcode gpio&reset bits
+ /* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 02b03ee..35a95dc 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -357,7 +357,6 @@ struct cpsw_slave {
struct phy_device *phy;
struct net_device *ndev;
u32 port_vlan;
- u32 open_stat;
};
static inline u32 slave_read(struct cpsw_slave *slave, u32 offset)
@@ -672,6 +671,18 @@ static void cpsw_intr_disable(struct cpsw_common *cpsw)
return;
}
+static int cpsw_get_usage_count(struct cpsw_common *cpsw)
+{
+ u32 i;
+ u32 usage_count = 0;
+
+ for (i = 0; i < cpsw->data.slaves; i++)
+ if (cpsw->slaves[i].ndev && netif_running(cpsw->slaves[i].ndev))
+ usage_count++;
+
+ return usage_count;
+}
+
static void cpsw_tx_handler(void *token, int len, int status)
{
struct netdev_queue *txq;
@@ -704,18 +715,10 @@ static void cpsw_rx_handler(void *token, int len, int status)
cpsw_dual_emac_src_port_detect(cpsw, status, ndev, skb);
if (unlikely(status < 0) || unlikely(!netif_running(ndev))) {
- bool ndev_status = false;
- struct cpsw_slave *slave = cpsw->slaves;
- int n;
-
- if (cpsw->data.dual_emac) {
- /* In dual emac mode check for all interfaces */
- for (n = cpsw->data.slaves; n; n--, slave++)
- if (netif_running(slave->ndev))
- ndev_status = true;
- }
-
- if (ndev_status && (status >= 0)) {
+ /* In dual emac mode check for all interfaces */
+ if (cpsw->data.dual_emac &&
+ cpsw_get_usage_count(cpsw) &&
+ (status >= 0)) {
/* The packet received is for the interface which
* is already down and the other interface is up
* and running, instead of freeing which results
@@ -939,7 +942,7 @@ static int cpsw_rx_poll(struct napi_struct *napi_rx, int budget)
}
if (num_rx < budget) {
- napi_complete(napi_rx);
+ napi_complete_done(napi_rx, num_rx);
writel(0xff, &cpsw->wr_regs->rx_en);
if (cpsw->quirk_irq && cpsw->rx_irq_disabled) {
cpsw->rx_irq_disabled = false;
@@ -1235,21 +1238,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev,
}
}
-static int cpsw_common_res_usage_state(struct cpsw_common *cpsw)
-{
- u32 i;
- u32 usage_count = 0;
-
- if (!cpsw->data.dual_emac)
- return 0;
-
- for (i = 0; i < cpsw->data.slaves; i++)
- if (cpsw->slaves[i].open_stat)
- usage_count++;
-
- return usage_count;
-}
-
static inline int cpsw_tx_packet_submit(struct cpsw_priv *priv,
struct sk_buff *skb,
struct cpdma_chan *txch)
@@ -1483,8 +1471,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
return ret;
}
- if (!cpsw_common_res_usage_state(cpsw))
- cpsw_intr_disable(cpsw);
netif_carrier_off(ndev);
/* Notify the stack of the actual queue counts. */
@@ -1506,8 +1492,11 @@ static int cpsw_ndo_open(struct net_device *ndev)
CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
CPSW_RTL_VERSION(reg));
- /* initialize host and slave ports */
- if (!cpsw_common_res_usage_state(cpsw))
+ /* Initialize host and slave ports.
+ * Given ndev is marked as opened already, so init port only if 1 ndev
+ * is opened
+ */
+ if (cpsw_get_usage_count(cpsw) < 2)
cpsw_init_host_port(priv);
for_each_slave(priv, cpsw_slave_open, priv);
@@ -1518,7 +1507,10 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpsw_ale_add_vlan(cpsw->ale, cpsw->data.default_vlan,
ALE_ALL_PORTS, ALE_ALL_PORTS, 0, 0);
- if (!cpsw_common_res_usage_state(cpsw)) {
+ /* Given ndev is marked as opened already, so if more ndev
+ * are opened - no need to init shared resources.
+ */
+ if (cpsw_get_usage_count(cpsw) < 2) {
/* disable priority elevation */
__raw_writel(0, &cpsw->regs->ptype);
@@ -1561,9 +1553,6 @@ static int cpsw_ndo_open(struct net_device *ndev)
cpdma_ctlr_start(cpsw->dma);
cpsw_intr_enable(cpsw);
- if (cpsw->data.dual_emac)
- cpsw->slaves[priv->emac_port].open_stat = true;
-
return 0;
err_cleanup:
@@ -1583,7 +1572,10 @@ static int cpsw_ndo_stop(struct net_device *ndev)
netif_tx_stop_all_queues(priv->ndev);
netif_carrier_off(priv->ndev);
- if (cpsw_common_res_usage_state(cpsw) <= 1) {
+ /* Given ndev is marked as close already,
+ * so disable shared resources if no open devices
+ */
+ if (!cpsw_get_usage_count(cpsw)) {
napi_disable(&cpsw->napi_rx);
napi_disable(&cpsw->napi_tx);
cpts_unregister(cpsw->cpts);
@@ -1597,8 +1589,6 @@ static int cpsw_ndo_stop(struct net_device *ndev)
cpsw_split_res(ndev);
pm_runtime_put_sync(cpsw->dev);
- if (cpsw->data.dual_emac)
- cpsw->slaves[priv->emac_port].open_stat = false;
return 0;
}
@@ -2368,17 +2358,11 @@ static int cpsw_update_channels(struct cpsw_priv *priv,
return 0;
}
-static int cpsw_set_channels(struct net_device *ndev,
- struct ethtool_channels *chs)
+static void cpsw_suspend_data_pass(struct net_device *ndev)
{
- struct cpsw_priv *priv = netdev_priv(ndev);
- struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_common *cpsw = ndev_to_cpsw(ndev);
struct cpsw_slave *slave;
- int i, ret;
-
- ret = cpsw_check_ch_settings(cpsw, chs);
- if (ret < 0)
- return ret;
+ int i;
/* Disable NAPI scheduling */
cpsw_intr_disable(cpsw);
@@ -2396,6 +2380,51 @@ static int cpsw_set_channels(struct net_device *ndev,
/* Handle rest of tx packets and stop cpdma channels */
cpdma_ctlr_stop(cpsw->dma);
+}
+
+static int cpsw_resume_data_pass(struct net_device *ndev)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ /* Allow rx packets handling */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_dormant_off(slave->ndev);
+
+ /* After this receive is started */
+ if (cpsw_get_usage_count(cpsw)) {
+ ret = cpsw_fill_rx_channels(priv);
+ if (ret)
+ return ret;
+
+ cpdma_ctlr_start(cpsw->dma);
+ cpsw_intr_enable(cpsw);
+ }
+
+ /* Resume transmit for every affected interface */
+ for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++)
+ if (slave->ndev && netif_running(slave->ndev))
+ netif_tx_start_all_queues(slave->ndev);
+
+ return 0;
+}
+
+static int cpsw_set_channels(struct net_device *ndev,
+ struct ethtool_channels *chs)
+{
+ struct cpsw_priv *priv = netdev_priv(ndev);
+ struct cpsw_common *cpsw = priv->cpsw;
+ struct cpsw_slave *slave;
+ int i, ret;
+
+ ret = cpsw_check_ch_settings(cpsw, chs);
+ if (ret < 0)
+ return ret;
+
+ cpsw_suspend_data_pass(ndev);
ret = cpsw_update_channels(priv, chs);
if (ret)
goto err;
@@ -2418,30 +2447,14 @@ static int cpsw_set_channels(struct net_device *ndev,
dev_err(priv->dev, "cannot set real number of rx queues\n");
goto err;
}
-
- /* Enable rx packets handling */
- netif_dormant_off(slave->ndev);
}
- if (cpsw_common_res_usage_state(cpsw)) {
- ret = cpsw_fill_rx_channels(priv);
- if (ret)
- goto err;
-
+ if (cpsw_get_usage_count(cpsw))
cpsw_split_res(ndev);
- /* After this receive is started */
- cpdma_ctlr_start(cpsw->dma);
- cpsw_intr_enable(cpsw);
- }
-
- /* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
- netif_tx_start_all_queues(slave->ndev);
- }
- return 0;
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
err:
dev_err(priv->dev, "cannot update channels number, closing device\n");
dev_close(ndev);
@@ -2502,8 +2515,7 @@ static int cpsw_set_ringparam(struct net_device *ndev,
{
struct cpsw_priv *priv = netdev_priv(ndev);
struct cpsw_common *cpsw = priv->cpsw;
- struct cpsw_slave *slave;
- int i, ret;
+ int ret;
/* ignore ering->tx_pending - only rx_pending adjustment is supported */
@@ -2515,54 +2527,18 @@ static int cpsw_set_ringparam(struct net_device *ndev,
if (ering->rx_pending == cpdma_get_num_rx_descs(cpsw->dma))
return 0;
- /* Disable NAPI scheduling */
- cpsw_intr_disable(cpsw);
-
- /* Stop all transmit queues for every network device.
- * Disable re-using rx descriptors with dormant_on.
- */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
-
- netif_tx_stop_all_queues(slave->ndev);
- netif_dormant_on(slave->ndev);
- }
-
- /* Handle rest of tx packets and stop cpdma channels */
- cpdma_ctlr_stop(cpsw->dma);
+ cpsw_suspend_data_pass(ndev);
cpdma_set_num_rx_descs(cpsw->dma, ering->rx_pending);
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
-
- /* Enable rx packets handling */
- netif_dormant_off(slave->ndev);
- }
-
- if (cpsw_common_res_usage_state(cpsw)) {
+ if (cpsw_get_usage_count(cpsw))
cpdma_chan_split_pool(cpsw->dma);
- ret = cpsw_fill_rx_channels(priv);
- if (ret)
- goto err;
-
- /* After this receive is started */
- cpdma_ctlr_start(cpsw->dma);
- cpsw_intr_enable(cpsw);
- }
+ ret = cpsw_resume_data_pass(ndev);
+ if (!ret)
+ return 0;
- /* Resume transmit for every affected interface */
- for (i = cpsw->data.slaves, slave = cpsw->slaves; i; i--, slave++) {
- if (!(slave->ndev && netif_running(slave->ndev)))
- continue;
- netif_tx_start_all_queues(slave->ndev);
- }
- return 0;
-err:
- dev_err(priv->dev, "cannot set ring params, closing device\n");
+ dev_err(&ndev->dev, "cannot set ring params, closing device\n");
dev_close(ndev);
return ret;
}
diff --git a/drivers/net/ethernet/ti/davinci_cpdma.c b/drivers/net/ethernet/ti/davinci_cpdma.c
index d80bff1..7ecc6b7 100644
--- a/drivers/net/ethernet/ti/davinci_cpdma.c
+++ b/drivers/net/ethernet/ti/davinci_cpdma.c
@@ -835,8 +835,8 @@ EXPORT_SYMBOL_GPL(cpdma_chan_get_min_rate);
*/
int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
{
- struct cpdma_ctlr *ctlr = ch->ctlr;
unsigned long flags, ch_flags;
+ struct cpdma_ctlr *ctlr;
int ret, prio_mode;
u32 rmask;
@@ -846,6 +846,7 @@ int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
if (ch->rate == rate)
return rate;
+ ctlr = ch->ctlr;
spin_lock_irqsave(&ctlr->lock, flags);
spin_lock_irqsave(&ch->lock, ch_flags);
diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c
index 481c7bf..64d5527 100644
--- a/drivers/net/ethernet/ti/davinci_emac.c
+++ b/drivers/net/ethernet/ti/davinci_emac.c
@@ -1295,7 +1295,7 @@ static int emac_poll(struct napi_struct *napi, int budget)
&emac_rxhost_errcodes[cause][0], ch);
}
} else if (num_rx_pkts < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, num_rx_pkts);
emac_int_enable(priv);
}
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 68a75cc..ebab1473 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -969,7 +969,7 @@ static int netcp_rx_poll(struct napi_struct *napi, int budget)
netcp_rxpool_refill(netcp);
if (packets < budget) {
- napi_complete(&netcp->rx_napi);
+ napi_complete_done(&netcp->rx_napi, packets);
knav_queue_enable_notify(netcp->rx_queue);
}
@@ -1909,7 +1909,7 @@ static int netcp_setup_tc(struct net_device *dev, u32 handle, __be16 proto,
return 0;
}
-static struct rtnl_link_stats64 *
+static void
netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
{
struct netcp_intf *netcp = netdev_priv(ndev);
@@ -1938,8 +1938,6 @@ netcp_get_stats(struct net_device *ndev, struct rtnl_link_stats64 *stats)
stats->rx_errors = p->rx_errors;
stats->rx_dropped = p->rx_dropped;
stats->tx_dropped = p->tx_dropped;
-
- return stats;
}
static const struct net_device_ops netcp_netdev_ops = {
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 2255f9a..7c634bc 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -681,7 +681,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
/* There are no packets left. */
- napi_complete(&info_mpipe->napi);
+ napi_complete_done(&info_mpipe->napi, work);
md = &mpipe_data[instance];
/* Re-enable hypervisor interrupts. */
diff --git a/drivers/net/ethernet/tile/tilepro.c b/drivers/net/ethernet/tile/tilepro.c
index 30cfea6..49ccee4 100644
--- a/drivers/net/ethernet/tile/tilepro.c
+++ b/drivers/net/ethernet/tile/tilepro.c
@@ -842,7 +842,7 @@ static int tile_net_poll(struct napi_struct *napi, int budget)
}
}
- napi_complete(&info->napi);
+ napi_complete_done(&info->napi, work);
if (!priv->active)
goto done;
@@ -2090,12 +2090,8 @@ static void tile_net_get_stats64(struct net_device *dev,
stats->tx_bytes = tx_bytes;
stats->rx_errors = rx_errors;
stats->rx_dropped = rx_dropped;
-
- return stats;
}
-
-
/*
* Change the Ethernet Address of the NIC.
*
diff --git a/drivers/net/ethernet/toshiba/ps3_gelic_net.c b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
index 345316c..7201331 100644
--- a/drivers/net/ethernet/toshiba/ps3_gelic_net.c
+++ b/drivers/net/ethernet/toshiba/ps3_gelic_net.c
@@ -1109,7 +1109,7 @@ static int gelic_net_poll(struct napi_struct *napi, int budget)
}
if (packets_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, packets_done);
gelic_card_rx_irq_on(card);
}
return packets_done;
diff --git a/drivers/net/ethernet/toshiba/spider_net.c b/drivers/net/ethernet/toshiba/spider_net.c
index cb341df..cec9e70 100644
--- a/drivers/net/ethernet/toshiba/spider_net.c
+++ b/drivers/net/ethernet/toshiba/spider_net.c
@@ -1270,7 +1270,7 @@ static int spider_net_poll(struct napi_struct *napi, int budget)
/* if all packets are in the stack, enable interrupts and return 0 */
/* if not, return 1 */
if (packets_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, packets_done);
spider_net_rx_irq_on(card);
card->ignore_rx_ramfull = 0;
}
diff --git a/drivers/net/ethernet/toshiba/tc35815.c b/drivers/net/ethernet/toshiba/tc35815.c
index 3be61ed..a45f98f 100644
--- a/drivers/net/ethernet/toshiba/tc35815.c
+++ b/drivers/net/ethernet/toshiba/tc35815.c
@@ -1638,7 +1638,7 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
spin_unlock(&lp->rx_lock);
if (received < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, received);
/* enable interrupts */
tc_writel(tc_readl(&tr->DMA_Ctl) & ~DMA_IntMask, &tr->DMA_Ctl);
}
diff --git a/drivers/net/ethernet/tundra/tsi108_eth.c b/drivers/net/ethernet/tundra/tsi108_eth.c
index f153ad7..c558399 100644
--- a/drivers/net/ethernet/tundra/tsi108_eth.c
+++ b/drivers/net/ethernet/tundra/tsi108_eth.c
@@ -887,7 +887,7 @@ static int tsi108_poll(struct napi_struct *napi, int budget)
if (num_received < budget) {
data->rxpending = 0;
- napi_complete(napi);
+ napi_complete_done(napi, num_received);
TSI_WRITE(TSI108_EC_INTMASK,
TSI_READ(TSI108_EC_INTMASK)
diff --git a/drivers/net/ethernet/via/via-rhine.c b/drivers/net/ethernet/via/via-rhine.c
index 453a1fa..c068c58 100644
--- a/drivers/net/ethernet/via/via-rhine.c
+++ b/drivers/net/ethernet/via/via-rhine.c
@@ -861,7 +861,7 @@ static int rhine_napipoll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
iowrite16(enable_mask, ioaddr + IntrEnable);
mmiowb();
}
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index 4716e60..d088788 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -2160,7 +2160,7 @@ static int velocity_poll(struct napi_struct *napi, int budget)
velocity_tx_srv(vptr);
/* If budget not fully consumed, exit the polling mode */
if (rx_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_done);
mac_enable_int(vptr->mac_regs);
}
spin_unlock_irqrestore(&vptr->lock, flags);
diff --git a/drivers/net/ethernet/wiznet/w5100.c b/drivers/net/ethernet/wiznet/w5100.c
index e1296ef..f90267f 100644
--- a/drivers/net/ethernet/wiznet/w5100.c
+++ b/drivers/net/ethernet/wiznet/w5100.c
@@ -915,7 +915,7 @@ static int w5100_napi_poll(struct napi_struct *napi, int budget)
}
if (rx_count < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
w5100_enable_intr(priv);
}
diff --git a/drivers/net/ethernet/wiznet/w5300.c b/drivers/net/ethernet/wiznet/w5300.c
index 724fabd..56ae573 100644
--- a/drivers/net/ethernet/wiznet/w5300.c
+++ b/drivers/net/ethernet/wiznet/w5300.c
@@ -417,7 +417,7 @@ static int w5300_napi_poll(struct napi_struct *napi, int budget)
}
if (rx_count < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
w5300_write(priv, W5300_IMR, IR_S0);
mmiowb();
}
diff --git a/drivers/net/ethernet/xilinx/xilinx_emaclite.c b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
index 97dcc0bd5..e3070fd8 100644
--- a/drivers/net/ethernet/xilinx/xilinx_emaclite.c
+++ b/drivers/net/ethernet/xilinx/xilinx_emaclite.c
@@ -1051,7 +1051,7 @@ static bool get_bool(struct platform_device *ofdev, const char *s)
}
}
-static struct net_device_ops xemaclite_netdev_ops;
+static const struct net_device_ops xemaclite_netdev_ops;
/**
* xemaclite_of_probe - Probe method for the Emaclite device.
@@ -1205,7 +1205,7 @@ xemaclite_poll_controller(struct net_device *ndev)
}
#endif
-static struct net_device_ops xemaclite_netdev_ops = {
+static const struct net_device_ops xemaclite_netdev_ops = {
.ndo_open = xemaclite_open,
.ndo_stop = xemaclite_close,
.ndo_start_xmit = xemaclite_send,
diff --git a/drivers/net/fjes/fjes_main.c b/drivers/net/fjes/fjes_main.c
index 5028001..b75d9cd 100644
--- a/drivers/net/fjes/fjes_main.c
+++ b/drivers/net/fjes/fjes_main.c
@@ -1155,7 +1155,7 @@ static int fjes_poll(struct napi_struct *napi, int budget)
}
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
if (adapter->unset_rx_last) {
adapter->rx_last_jiffies = jiffies;
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index 8b6810b..bda0c64 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -69,7 +69,6 @@ struct gtp_dev {
struct socket *sock0;
struct socket *sock1u;
- struct net *net;
struct net_device *dev;
unsigned int hash_size;
@@ -184,7 +183,6 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
sizeof(struct gtp0_header);
struct gtp0_header *gtp0;
struct pdp_ctx *pctx;
- int ret = 0;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -197,26 +195,19 @@ static int gtp0_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
if (gtp0->type != GTP_TPDU)
return 1;
- rcu_read_lock();
pctx = gtp0_pdp_find(gtp, be64_to_cpu(gtp0->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- ret = -1;
- goto out_rcu;
+ return 1;
}
if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- ret = -1;
- goto out_rcu;
+ return 1;
}
- rcu_read_unlock();
/* Get rid of the GTP + UDP headers. */
return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
-out_rcu:
- rcu_read_unlock();
- return ret;
}
static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
@@ -226,7 +217,6 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
sizeof(struct gtp1_header);
struct gtp1_header *gtp1;
struct pdp_ctx *pctx;
- int ret = 0;
if (!pskb_may_pull(skb, hdrlen))
return -1;
@@ -254,26 +244,19 @@ static int gtp1u_udp_encap_recv(struct gtp_dev *gtp, struct sk_buff *skb,
gtp1 = (struct gtp1_header *)(skb->data + sizeof(struct udphdr));
- rcu_read_lock();
pctx = gtp1_pdp_find(gtp, ntohl(gtp1->tid));
if (!pctx) {
netdev_dbg(gtp->dev, "No PDP ctx to decap skb=%p\n", skb);
- ret = -1;
- goto out_rcu;
+ return 1;
}
if (!gtp_check_src_ms(skb, pctx, hdrlen)) {
netdev_dbg(gtp->dev, "No PDP ctx for this MS\n");
- ret = -1;
- goto out_rcu;
+ return 1;
}
- rcu_read_unlock();
/* Get rid of the GTP + UDP headers. */
return iptunnel_pull_header(skb, hdrlen, skb->protocol, xnet);
-out_rcu:
- rcu_read_unlock();
- return ret;
}
static void gtp_encap_disable(struct gtp_dev *gtp)
@@ -316,7 +299,7 @@ static int gtp_encap_recv(struct sock *sk, struct sk_buff *skb)
netdev_dbg(gtp->dev, "encap_recv sk=%p\n", sk);
- xnet = !net_eq(gtp->net, dev_net(gtp->dev));
+ xnet = !net_eq(sock_net(sk), dev_net(gtp->dev));
switch (udp_sk(sk)->encap_type) {
case UDP_ENCAP_GTP0:
@@ -612,7 +595,7 @@ static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
pktinfo.fl4.saddr, pktinfo.fl4.daddr,
pktinfo.iph->tos,
ip4_dst_hoplimit(&pktinfo.rt->dst),
- htons(IP_DF),
+ 0,
pktinfo.gtph_port, pktinfo.gtph_port,
true, false);
break;
@@ -658,7 +641,7 @@ static void gtp_link_setup(struct net_device *dev)
static int gtp_hashtable_new(struct gtp_dev *gtp, int hsize);
static void gtp_hashtable_free(struct gtp_dev *gtp);
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1, struct net *src_net);
+ int fd_gtp0, int fd_gtp1);
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[], struct nlattr *data[])
@@ -675,7 +658,7 @@ static int gtp_newlink(struct net *src_net, struct net_device *dev,
fd0 = nla_get_u32(data[IFLA_GTP_FD0]);
fd1 = nla_get_u32(data[IFLA_GTP_FD1]);
- err = gtp_encap_enable(dev, gtp, fd0, fd1, src_net);
+ err = gtp_encap_enable(dev, gtp, fd0, fd1);
if (err < 0)
goto out_err;
@@ -821,7 +804,7 @@ static void gtp_hashtable_free(struct gtp_dev *gtp)
}
static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
- int fd_gtp0, int fd_gtp1, struct net *src_net)
+ int fd_gtp0, int fd_gtp1)
{
struct udp_tunnel_sock_cfg tuncfg = {NULL};
struct socket *sock0, *sock1u;
@@ -858,7 +841,6 @@ static int gtp_encap_enable(struct net_device *dev, struct gtp_dev *gtp,
gtp->sock0 = sock0;
gtp->sock1u = sock1u;
- gtp->net = src_net;
tuncfg.sk_user_data = gtp;
tuncfg.encap_rcv = gtp_encap_recv;
@@ -1376,3 +1358,4 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_DESCRIPTION("Interface driver for GTP encapsulated traffic");
MODULE_ALIAS_RTNL_LINK("gtp");
+MODULE_ALIAS_GENL_FAMILY("gtp");
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 3958ada..d3e73ac 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -34,6 +34,7 @@
#define NDIS_OBJECT_TYPE_RSS_CAPABILITIES 0x88
#define NDIS_OBJECT_TYPE_RSS_PARAMETERS 0x89
+#define NDIS_OBJECT_TYPE_OFFLOAD 0xa7
#define NDIS_RECEIVE_SCALE_CAPABILITIES_REVISION_2 2
#define NDIS_RECEIVE_SCALE_PARAMETERS_REVISION_2 2
@@ -118,6 +119,7 @@ struct ndis_recv_scale_param { /* NDIS_RECEIVE_SCALE_PARAMETERS */
/* Fwd declaration */
struct ndis_tcp_ip_checksum_info;
+struct ndis_pkt_8021q_info;
/*
* Represent netvsc packet which contains 1 RNDIS and 1 ethernet frame
@@ -135,8 +137,10 @@ struct hv_netvsc_packet {
u8 page_buf_cnt;
u16 q_idx;
- u32 send_buf_index;
+ u16 total_packets;
+ u32 total_bytes;
+ u32 send_buf_index;
u32 total_data_buflen;
};
@@ -155,6 +159,8 @@ enum rndis_device_state {
RNDIS_DEV_DATAINITIALIZED,
};
+#define NETVSC_HASH_KEYLEN 40
+
struct rndis_device {
struct net_device *ndev;
@@ -165,14 +171,17 @@ struct rndis_device {
spinlock_t request_lock;
struct list_head req_list;
- unsigned char hw_mac_adr[ETH_ALEN];
+ u8 hw_mac_adr[ETH_ALEN];
+ u8 rss_key[NETVSC_HASH_KEYLEN];
+ u16 ind_table[ITAB_NUM];
};
/* Interface */
struct rndis_message;
struct netvsc_device;
-int netvsc_device_add(struct hv_device *device, void *additional_info);
+int netvsc_device_add(struct hv_device *device,
+ const struct netvsc_device_info *info);
void netvsc_device_remove(struct hv_device *device);
int netvsc_send(struct hv_device *device,
struct hv_netvsc_packet *packet,
@@ -181,22 +190,25 @@ int netvsc_send(struct hv_device *device,
struct sk_buff *skb);
void netvsc_linkstatus_callback(struct hv_device *device_obj,
struct rndis_message *resp);
-int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet,
- void **data,
- struct ndis_tcp_ip_checksum_info *csum_info,
- struct vmbus_channel *channel,
- u16 vlan_tci);
+int netvsc_recv_callback(struct net_device *net,
+ struct vmbus_channel *channel,
+ void *data, u32 len,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const struct ndis_pkt_8021q_info *vlan);
void netvsc_channel_cb(void *context);
int rndis_filter_open(struct netvsc_device *nvdev);
int rndis_filter_close(struct netvsc_device *nvdev);
int rndis_filter_device_add(struct hv_device *dev,
- void *additional_info);
-void rndis_filter_device_remove(struct hv_device *dev);
-int rndis_filter_receive(struct hv_device *dev,
- struct hv_netvsc_packet *pkt,
- void **data,
- struct vmbus_channel *channel);
+ struct netvsc_device_info *info);
+void rndis_filter_device_remove(struct hv_device *dev,
+ struct netvsc_device *nvdev);
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *key, int num_queue);
+int rndis_filter_receive(struct net_device *ndev,
+ struct netvsc_device *net_dev,
+ struct hv_device *dev,
+ struct vmbus_channel *channel,
+ void *data, u32 buflen);
int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter);
int rndis_filter_set_device_mac(struct net_device *ndev, char *mac);
@@ -622,6 +634,7 @@ struct nvsp_message {
#define VRSS_SEND_TAB_SIZE 16
#define VRSS_CHANNEL_MAX 64
+#define VRSS_CHANNEL_DEFAULT 8
#define RNDIS_MAX_PKT_DEFAULT 8
#define RNDIS_PKT_ALIGN_DEFAULT 8
@@ -685,8 +698,7 @@ struct net_device_context {
struct work_struct work;
u32 msg_enable; /* debug level */
- struct netvsc_stats __percpu *tx_stats;
- struct netvsc_stats __percpu *rx_stats;
+ u32 tx_checksum_mask;
/* Ethtool settings */
u8 duplex;
@@ -705,11 +717,21 @@ struct net_device_context {
u32 vf_serial;
};
+/* Per channel data */
+struct netvsc_channel {
+ struct vmbus_channel *channel;
+ struct multi_send_data msd;
+ struct multi_recv_comp mrc;
+ atomic_t queue_sends;
+
+ struct netvsc_stats tx_stats;
+ struct netvsc_stats rx_stats;
+};
+
/* Per netvsc device */
struct netvsc_device {
u32 nvsp_version;
- atomic_t num_outstanding_sends;
wait_queue_head_t wait_drain;
bool destroy;
@@ -735,32 +757,25 @@ struct netvsc_device {
struct nvsp_message revoke_packet;
- struct vmbus_channel *chn_table[VRSS_CHANNEL_MAX];
u32 send_table[VRSS_SEND_TAB_SIZE];
u32 max_chn;
u32 num_chn;
spinlock_t sc_lock; /* Protects num_sc_offered variable */
u32 num_sc_offered;
- atomic_t queue_sends[VRSS_CHANNEL_MAX];
/* Holds rndis device info */
void *extension;
int ring_size;
- /* The primary channel callback buffer */
- unsigned char *cb_buffer;
- /* The sub channel callback buffer */
- unsigned char *sub_cb_buf;
-
- struct multi_send_data msd[VRSS_CHANNEL_MAX];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
- struct multi_recv_comp mrc[VRSS_CHANNEL_MAX];
atomic_t num_outstanding_recvs;
atomic_t open_cnt;
+
+ struct netvsc_channel chan_table[VRSS_CHANNEL_MAX];
};
static inline struct netvsc_device *
@@ -939,7 +954,7 @@ struct ndis_pkt_8021q_info {
};
};
-struct ndis_oject_header {
+struct ndis_object_header {
u8 type;
u8 revision;
u16 size;
@@ -947,6 +962,9 @@ struct ndis_oject_header {
#define NDIS_OBJECT_TYPE_DEFAULT 0x80
#define NDIS_OFFLOAD_PARAMETERS_REVISION_3 3
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_2 2
+#define NDIS_OFFLOAD_PARAMETERS_REVISION_1 1
+
#define NDIS_OFFLOAD_PARAMETERS_NO_CHANGE 0
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_DISABLED 1
#define NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED 2
@@ -973,8 +991,135 @@ struct ndis_oject_header {
#define OID_TCP_CONNECTION_OFFLOAD_HARDWARE_CAPABILITIES 0xFC01020F /* query */
#define OID_OFFLOAD_ENCAPSULATION 0x0101010A /* set/query */
+/*
+ * OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES
+ * ndis_type: NDIS_OBJTYPE_OFFLOAD
+ */
+
+#define NDIS_OFFLOAD_ENCAP_NONE 0x0000
+#define NDIS_OFFLOAD_ENCAP_NULL 0x0001
+#define NDIS_OFFLOAD_ENCAP_8023 0x0002
+#define NDIS_OFFLOAD_ENCAP_8023PQ 0x0004
+#define NDIS_OFFLOAD_ENCAP_8023PQ_OOB 0x0008
+#define NDIS_OFFLOAD_ENCAP_RFC1483 0x0010
+
+struct ndis_csum_offload {
+ u32 ip4_txenc;
+ u32 ip4_txcsum;
+#define NDIS_TXCSUM_CAP_IP4OPT 0x001
+#define NDIS_TXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP4 0x010
+#define NDIS_TXCSUM_CAP_UDP4 0x040
+#define NDIS_TXCSUM_CAP_IP4 0x100
+
+#define NDIS_TXCSUM_ALL_TCP4 (NDIS_TXCSUM_CAP_TCP4 | NDIS_TXCSUM_CAP_TCP4OPT)
+
+ u32 ip4_rxenc;
+ u32 ip4_rxcsum;
+#define NDIS_RXCSUM_CAP_IP4OPT 0x001
+#define NDIS_RXCSUM_CAP_TCP4OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP4 0x010
+#define NDIS_RXCSUM_CAP_UDP4 0x040
+#define NDIS_RXCSUM_CAP_IP4 0x100
+ u32 ip6_txenc;
+ u32 ip6_txcsum;
+#define NDIS_TXCSUM_CAP_IP6EXT 0x001
+#define NDIS_TXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_TXCSUM_CAP_TCP6 0x010
+#define NDIS_TXCSUM_CAP_UDP6 0x040
+ u32 ip6_rxenc;
+ u32 ip6_rxcsum;
+#define NDIS_RXCSUM_CAP_IP6EXT 0x001
+#define NDIS_RXCSUM_CAP_TCP6OPT 0x004
+#define NDIS_RXCSUM_CAP_TCP6 0x010
+#define NDIS_RXCSUM_CAP_UDP6 0x040
+
+#define NDIS_TXCSUM_ALL_TCP6 (NDIS_TXCSUM_CAP_TCP6 | \
+ NDIS_TXCSUM_CAP_TCP6OPT | \
+ NDIS_TXCSUM_CAP_IP6EXT)
+};
+
+struct ndis_lsov1_offload {
+ u32 encap;
+ u32 maxsize;
+ u32 minsegs;
+ u32 opts;
+};
+
+struct ndis_ipsecv1_offload {
+ u32 encap;
+ u32 ah_esp;
+ u32 xport_tun;
+ u32 ip4_opts;
+ u32 flags;
+ u32 ip4_ah;
+ u32 ip4_esp;
+};
+
+struct ndis_lsov2_offload {
+ u32 ip4_encap;
+ u32 ip4_maxsz;
+ u32 ip4_minsg;
+ u32 ip6_encap;
+ u32 ip6_maxsz;
+ u32 ip6_minsg;
+ u32 ip6_opts;
+#define NDIS_LSOV2_CAP_IP6EXT 0x001
+#define NDIS_LSOV2_CAP_TCP6OPT 0x004
+
+#define NDIS_LSOV2_CAP_IP6 (NDIS_LSOV2_CAP_IP6EXT | \
+ NDIS_LSOV2_CAP_TCP6OPT)
+};
+
+struct ndis_ipsecv2_offload {
+ u32 encap;
+ u16 ip6;
+ u16 ip4opt;
+ u16 ip6ext;
+ u16 ah;
+ u16 esp;
+ u16 ah_esp;
+ u16 xport;
+ u16 tun;
+ u16 xport_tun;
+ u16 lso;
+ u16 extseq;
+ u32 udp_esp;
+ u32 auth;
+ u32 crypto;
+ u32 sa_caps;
+};
+
+struct ndis_rsc_offload {
+ u16 ip4;
+ u16 ip6;
+};
+
+struct ndis_encap_offload {
+ u32 flags;
+ u32 maxhdr;
+};
+
+struct ndis_offload {
+ struct ndis_object_header header;
+ struct ndis_csum_offload csum;
+ struct ndis_lsov1_offload lsov1;
+ struct ndis_ipsecv1_offload ipsecv1;
+ struct ndis_lsov2_offload lsov2;
+ u32 flags;
+ /* NDIS >= 6.1 */
+ struct ndis_ipsecv2_offload ipsecv2;
+ /* NDIS >= 6.30 */
+ struct ndis_rsc_offload rsc;
+ struct ndis_encap_offload encap_gre;
+};
+
+#define NDIS_OFFLOAD_SIZE sizeof(struct ndis_offload)
+#define NDIS_OFFLOAD_SIZE_6_0 offsetof(struct ndis_offload, ipsecv2)
+#define NDIS_OFFLOAD_SIZE_6_1 offsetof(struct ndis_offload, rsc)
+
struct ndis_offload_params {
- struct ndis_oject_header header;
+ struct ndis_object_header header;
u8 ip_v4_csum;
u8 tcp_ip_v4_csum;
u8 udp_ip_v4_csum;
@@ -1301,15 +1446,10 @@ struct rndis_message {
#define NDIS_PACKET_TYPE_FUNCTIONAL 0x00000400
#define NDIS_PACKET_TYPE_MAC_FRAME 0x00000800
-#define INFO_IPV4 2
-#define INFO_IPV6 4
-#define INFO_TCP 2
-#define INFO_UDP 4
-
#define TRANSPORT_INFO_NOT_IP 0
-#define TRANSPORT_INFO_IPV4_TCP ((INFO_IPV4 << 16) | INFO_TCP)
-#define TRANSPORT_INFO_IPV4_UDP ((INFO_IPV4 << 16) | INFO_UDP)
-#define TRANSPORT_INFO_IPV6_TCP ((INFO_IPV6 << 16) | INFO_TCP)
-#define TRANSPORT_INFO_IPV6_UDP ((INFO_IPV6 << 16) | INFO_UDP)
+#define TRANSPORT_INFO_IPV4_TCP 0x01
+#define TRANSPORT_INFO_IPV4_UDP 0x02
+#define TRANSPORT_INFO_IPV6_TCP 0x10
+#define TRANSPORT_INFO_IPV6_UDP 0x20
#endif /* _HYPERV_NET_H */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 5a1cc08..5cfdb1a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -67,14 +67,8 @@ static struct netvsc_device *alloc_net_device(void)
if (!net_device)
return NULL;
- net_device->cb_buffer = kzalloc(NETVSC_PACKET_SIZE, GFP_KERNEL);
- if (!net_device->cb_buffer) {
- kfree(net_device);
- return NULL;
- }
-
- net_device->mrc[0].buf = vzalloc(NETVSC_RECVSLOT_MAX *
- sizeof(struct recv_comp_data));
+ net_device->chan_table[0].mrc.buf
+ = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
@@ -91,35 +85,28 @@ static void free_netvsc_device(struct netvsc_device *nvdev)
int i;
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
- vfree(nvdev->mrc[i].buf);
+ vfree(nvdev->chan_table[i].mrc.buf);
- kfree(nvdev->cb_buffer);
kfree(nvdev);
}
-static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
-{
- struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
- if (net_device && net_device->destroy)
- net_device = NULL;
+static inline bool netvsc_channel_idle(const struct netvsc_device *net_device,
+ u16 q_idx)
+{
+ const struct netvsc_channel *nvchan = &net_device->chan_table[q_idx];
- return net_device;
+ return atomic_read(&net_device->num_outstanding_recvs) == 0 &&
+ atomic_read(&nvchan->queue_sends) == 0;
}
-static struct netvsc_device *get_inbound_net_device(struct hv_device *device)
+static struct netvsc_device *get_outbound_net_device(struct hv_device *device)
{
struct netvsc_device *net_device = hv_device_to_netvsc_device(device);
- if (!net_device)
- goto get_in_err;
-
- if (net_device->destroy &&
- atomic_read(&net_device->num_outstanding_sends) == 0 &&
- atomic_read(&net_device->num_outstanding_recvs) == 0)
+ if (net_device && net_device->destroy)
net_device = NULL;
-get_in_err:
return net_device;
}
@@ -584,7 +571,6 @@ void netvsc_device_remove(struct hv_device *device)
vmbus_close(device->channel);
/* Release all resources */
- vfree(net_device->sub_cb_buf);
free_netvsc_device(net_device);
}
@@ -620,29 +606,35 @@ static void netvsc_send_tx_complete(struct netvsc_device *net_device,
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct vmbus_channel *channel = device->channel;
- int num_outstanding_sends;
u16 q_idx = 0;
int queue_sends;
/* Notify the layer above us */
if (likely(skb)) {
- struct hv_netvsc_packet *nvsc_packet
+ const struct hv_netvsc_packet *packet
= (struct hv_netvsc_packet *)skb->cb;
- u32 send_index = nvsc_packet->send_buf_index;
+ u32 send_index = packet->send_buf_index;
+ struct netvsc_stats *tx_stats;
if (send_index != NETVSC_INVALID_INDEX)
netvsc_free_send_slot(net_device, send_index);
- q_idx = nvsc_packet->q_idx;
+ q_idx = packet->q_idx;
channel = incoming_channel;
+ tx_stats = &net_device->chan_table[q_idx].tx_stats;
+
+ u64_stats_update_begin(&tx_stats->syncp);
+ tx_stats->packets += packet->total_packets;
+ tx_stats->bytes += packet->total_bytes;
+ u64_stats_update_end(&tx_stats->syncp);
+
dev_consume_skb_any(skb);
}
- num_outstanding_sends =
- atomic_dec_return(&net_device->num_outstanding_sends);
- queue_sends = atomic_dec_return(&net_device->queue_sends[q_idx]);
+ queue_sends =
+ atomic_dec_return(&net_device->chan_table[q_idx].queue_sends);
- if (net_device->destroy && num_outstanding_sends == 0)
+ if (net_device->destroy && queue_sends == 0)
wake_up(&net_device->wait_drain);
if (netif_tx_queue_stopped(netdev_get_tx_queue(ndev, q_idx)) &&
@@ -688,27 +680,15 @@ static void netvsc_send_completion(struct netvsc_device *net_device,
static u32 netvsc_get_next_send_section(struct netvsc_device *net_device)
{
- unsigned long index;
- u32 max_words = net_device->map_words;
- unsigned long *map_addr = (unsigned long *)net_device->send_section_map;
- u32 section_cnt = net_device->send_section_cnt;
- int ret_val = NETVSC_INVALID_INDEX;
- int i;
- int prev_val;
-
- for (i = 0; i < max_words; i++) {
- if (!~(map_addr[i]))
- continue;
- index = ffz(map_addr[i]);
- prev_val = sync_test_and_set_bit(index, &map_addr[i]);
- if (prev_val)
- continue;
- if ((index + (i * BITS_PER_LONG)) >= section_cnt)
- break;
- ret_val = (index + (i * BITS_PER_LONG));
- break;
+ unsigned long *map_addr = net_device->send_section_map;
+ unsigned int i;
+
+ for_each_clear_bit(i, map_addr, net_device->map_words) {
+ if (sync_test_and_set_bit(i, map_addr) == 0)
+ return i;
}
- return ret_val;
+
+ return NETVSC_INVALID_INDEX;
}
static u32 netvsc_copy_to_send_buf(struct netvsc_device *net_device,
@@ -765,9 +745,11 @@ static inline int netvsc_send_pkt(
struct sk_buff *skb)
{
struct nvsp_message nvmsg;
- u16 q_idx = packet->q_idx;
- struct vmbus_channel *out_channel = net_device->chn_table[q_idx];
+ struct netvsc_channel *nvchan
+ = &net_device->chan_table[packet->q_idx];
+ struct vmbus_channel *out_channel = nvchan->channel;
struct net_device *ndev = hv_get_drvdata(device);
+ struct netdev_queue *txq = netdev_get_tx_queue(ndev, packet->q_idx);
u64 req_id;
int ret;
struct hv_page_buffer *pgbuf;
@@ -827,23 +809,14 @@ static inline int netvsc_send_pkt(
}
if (ret == 0) {
- atomic_inc(&net_device->num_outstanding_sends);
- atomic_inc(&net_device->queue_sends[q_idx]);
-
- if (ring_avail < RING_AVAIL_PERCENT_LOWATER) {
- netif_tx_stop_queue(netdev_get_tx_queue(ndev, q_idx));
+ atomic_inc_return(&nvchan->queue_sends);
- if (atomic_read(&net_device->
- queue_sends[q_idx]) < 1)
- netif_tx_wake_queue(netdev_get_tx_queue(
- ndev, q_idx));
- }
+ if (ring_avail < RING_AVAIL_PERCENT_LOWATER)
+ netif_tx_stop_queue(txq);
} else if (ret == -EAGAIN) {
- netif_tx_stop_queue(netdev_get_tx_queue(
- ndev, q_idx));
- if (atomic_read(&net_device->queue_sends[q_idx]) < 1) {
- netif_tx_wake_queue(netdev_get_tx_queue(
- ndev, q_idx));
+ netif_tx_stop_queue(txq);
+ if (atomic_read(&nvchan->queue_sends) < 1) {
+ netif_tx_wake_queue(txq);
ret = -ENOSPC;
}
} else {
@@ -874,8 +847,7 @@ int netvsc_send(struct hv_device *device,
{
struct netvsc_device *net_device;
int ret = 0;
- struct vmbus_channel *out_channel;
- u16 q_idx = packet->q_idx;
+ struct netvsc_channel *nvchan;
u32 pktlen = packet->total_data_buflen, msd_len = 0;
unsigned int section_index = NETVSC_INVALID_INDEX;
struct multi_send_data *msdp;
@@ -895,8 +867,7 @@ int netvsc_send(struct hv_device *device,
if (!net_device->send_section_map)
return -EAGAIN;
- out_channel = net_device->chn_table[q_idx];
-
+ nvchan = &net_device->chan_table[packet->q_idx];
packet->send_buf_index = NETVSC_INVALID_INDEX;
packet->cp_partial = false;
@@ -908,9 +879,8 @@ int netvsc_send(struct hv_device *device,
goto send_now;
}
- msdp = &net_device->msd[q_idx];
-
/* batch packets in send buffer if possible */
+ msdp = &nvchan->msd;
if (msdp->pkt)
msd_len = msdp->pkt->total_data_buflen;
@@ -950,6 +920,11 @@ int netvsc_send(struct hv_device *device,
packet->total_data_buflen += msd_len;
}
+ if (msdp->pkt) {
+ packet->total_packets += msdp->pkt->total_packets;
+ packet->total_bytes += msdp->pkt->total_bytes;
+ }
+
if (msdp->skb)
dev_consume_skb_any(msdp->skb);
@@ -1011,8 +986,9 @@ static int netvsc_send_recv_completion(struct vmbus_channel *channel,
static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
u32 *filled, u32 *avail)
{
- u32 first = nvdev->mrc[q_idx].first;
- u32 next = nvdev->mrc[q_idx].next;
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
+ u32 first = mrc->first;
+ u32 next = mrc->next;
*filled = (first > next) ? NETVSC_RECVSLOT_MAX - first + next :
next - first;
@@ -1024,26 +1000,26 @@ static inline void count_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx,
static inline struct recv_comp_data *read_recv_comp_slot(struct netvsc_device
*nvdev, u16 q_idx)
{
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail;
- if (!nvdev->mrc[q_idx].buf)
+ if (unlikely(!mrc->buf))
return NULL;
count_recv_comp_slot(nvdev, q_idx, &filled, &avail);
if (!filled)
return NULL;
- return nvdev->mrc[q_idx].buf + nvdev->mrc[q_idx].first *
- sizeof(struct recv_comp_data);
+ return mrc->buf + mrc->first * sizeof(struct recv_comp_data);
}
/* Put the first filled slot back to available pool */
static inline void put_recv_comp_slot(struct netvsc_device *nvdev, u16 q_idx)
{
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
int num_recv;
- nvdev->mrc[q_idx].first = (nvdev->mrc[q_idx].first + 1) %
- NETVSC_RECVSLOT_MAX;
+ mrc->first = (mrc->first + 1) % NETVSC_RECVSLOT_MAX;
num_recv = atomic_dec_return(&nvdev->num_outstanding_recvs);
@@ -1078,13 +1054,14 @@ static void netvsc_chk_recv_comp(struct netvsc_device *nvdev,
static inline struct recv_comp_data *get_recv_comp_slot(
struct netvsc_device *nvdev, struct vmbus_channel *channel, u16 q_idx)
{
+ struct multi_recv_comp *mrc = &nvdev->chan_table[q_idx].mrc;
u32 filled, avail, next;
struct recv_comp_data *rcd;
- if (!nvdev->recv_section)
+ if (unlikely(!nvdev->recv_section))
return NULL;
- if (!nvdev->mrc[q_idx].buf)
+ if (unlikely(!mrc->buf))
return NULL;
if (atomic_read(&nvdev->num_outstanding_recvs) >
@@ -1095,60 +1072,44 @@ static inline struct recv_comp_data *get_recv_comp_slot(
if (!avail)
return NULL;
- next = nvdev->mrc[q_idx].next;
- rcd = nvdev->mrc[q_idx].buf + next * sizeof(struct recv_comp_data);
- nvdev->mrc[q_idx].next = (next + 1) % NETVSC_RECVSLOT_MAX;
+ next = mrc->next;
+ rcd = mrc->buf + next * sizeof(struct recv_comp_data);
+ mrc->next = (next + 1) % NETVSC_RECVSLOT_MAX;
atomic_inc(&nvdev->num_outstanding_recvs);
return rcd;
}
-static void netvsc_receive(struct netvsc_device *net_device,
- struct vmbus_channel *channel,
- struct hv_device *device,
- struct vmpacket_descriptor *packet)
+static void netvsc_receive(struct net_device *ndev,
+ struct netvsc_device *net_device,
+ struct net_device_context *net_device_ctx,
+ struct hv_device *device,
+ struct vmbus_channel *channel,
+ struct vmtransfer_page_packet_header *vmxferpage_packet,
+ struct nvsp_message *nvsp)
{
- struct vmtransfer_page_packet_header *vmxferpage_packet;
- struct nvsp_message *nvsp_packet;
- struct hv_netvsc_packet nv_pkt;
- struct hv_netvsc_packet *netvsc_packet = &nv_pkt;
+ char *recv_buf = net_device->recv_buf;
u32 status = NVSP_STAT_SUCCESS;
int i;
int count = 0;
- struct net_device *ndev = hv_get_drvdata(device);
- void *data;
int ret;
struct recv_comp_data *rcd;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
- /*
- * All inbound packets other than send completion should be xfer page
- * packet
- */
- if (packet->type != VM_PKT_DATA_USING_XFER_PAGES) {
- netdev_err(ndev, "Unknown packet type received - %d\n",
- packet->type);
- return;
- }
-
- nvsp_packet = (struct nvsp_message *)((unsigned long)packet +
- (packet->offset8 << 3));
-
/* Make sure this is a valid nvsp packet */
- if (nvsp_packet->hdr.msg_type !=
- NVSP_MSG1_TYPE_SEND_RNDIS_PKT) {
- netdev_err(ndev, "Unknown nvsp packet type received-"
- " %d\n", nvsp_packet->hdr.msg_type);
+ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Unknown nvsp packet type received %u\n",
+ nvsp->hdr.msg_type);
return;
}
- vmxferpage_packet = (struct vmtransfer_page_packet_header *)packet;
-
- if (vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID) {
- netdev_err(ndev, "Invalid xfer page set id - "
- "expecting %x got %x\n", NETVSC_RECEIVE_BUFFER_ID,
- vmxferpage_packet->xfer_pageset_id);
+ if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "Invalid xfer page set id - expecting %x got %x\n",
+ NETVSC_RECEIVE_BUFFER_ID,
+ vmxferpage_packet->xfer_pageset_id);
return;
}
@@ -1156,18 +1117,16 @@ static void netvsc_receive(struct netvsc_device *net_device,
/* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */
for (i = 0; i < count; i++) {
- /* Initialize the netvsc packet */
- data = (void *)((unsigned long)net_device->
- recv_buf + vmxferpage_packet->ranges[i].byte_offset);
- netvsc_packet->total_data_buflen =
- vmxferpage_packet->ranges[i].byte_count;
+ void *data = recv_buf
+ + vmxferpage_packet->ranges[i].byte_offset;
+ u32 buflen = vmxferpage_packet->ranges[i].byte_count;
/* Pass it to the upper layer */
- status = rndis_filter_receive(device, netvsc_packet, &data,
- channel);
+ status = rndis_filter_receive(ndev, net_device, device,
+ channel, data, buflen);
}
- if (!net_device->mrc[q_idx].buf) {
+ if (!net_device->chan_table[q_idx].mrc.buf) {
ret = netvsc_send_recv_completion(channel,
vmxferpage_packet->d.trans_id,
status);
@@ -1243,11 +1202,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
u64 request_id,
struct vmpacket_descriptor *desc)
{
- struct nvsp_message *nvmsg;
struct net_device_context *net_device_ctx = netdev_priv(ndev);
-
- nvmsg = (struct nvsp_message *)((unsigned long)
- desc + (desc->offset8 << 3));
+ struct nvsp_message *nvmsg
+ = (struct nvsp_message *)((unsigned long)desc
+ + (desc->offset8 << 3));
switch (desc->type) {
case VM_PKT_COMP:
@@ -1255,7 +1213,10 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
break;
case VM_PKT_DATA_USING_XFER_PAGES:
- netvsc_receive(net_device, channel, device, desc);
+ netvsc_receive(ndev, net_device, net_device_ctx,
+ device, channel,
+ (struct vmtransfer_page_packet_header *)desc,
+ nvmsg);
break;
case VM_PKT_DATA_INBAND:
@@ -1271,16 +1232,11 @@ static void netvsc_process_raw_pkt(struct hv_device *device,
void netvsc_channel_cb(void *context)
{
- int ret;
- struct vmbus_channel *channel = (struct vmbus_channel *)context;
+ struct vmbus_channel *channel = context;
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct hv_device *device;
struct netvsc_device *net_device;
- u32 bytes_recvd;
- u64 request_id;
struct vmpacket_descriptor *desc;
- unsigned char *buffer;
- int bufferlen = NETVSC_PACKET_SIZE;
struct net_device *ndev;
bool need_to_commit = false;
@@ -1289,68 +1245,25 @@ void netvsc_channel_cb(void *context)
else
device = channel->device_obj;
- net_device = get_inbound_net_device(device);
- if (!net_device)
- return;
ndev = hv_get_drvdata(device);
- buffer = get_per_channel_state(channel);
-
- do {
- desc = get_next_pkt_raw(channel);
- if (desc != NULL) {
- netvsc_process_raw_pkt(device,
- channel,
- net_device,
- ndev,
- desc->trans_id,
- desc);
-
- put_pkt_raw(channel, desc);
- need_to_commit = true;
- continue;
- }
- if (need_to_commit) {
- need_to_commit = false;
- commit_rd_index(channel);
- }
+ if (unlikely(!ndev))
+ return;
- ret = vmbus_recvpacket_raw(channel, buffer, bufferlen,
- &bytes_recvd, &request_id);
- if (ret == 0) {
- if (bytes_recvd > 0) {
- desc = (struct vmpacket_descriptor *)buffer;
- netvsc_process_raw_pkt(device,
- channel,
- net_device,
- ndev,
- request_id,
- desc);
- } else {
- /*
- * We are done for this pass.
- */
- break;
- }
-
- } else if (ret == -ENOBUFS) {
- if (bufferlen > NETVSC_PACKET_SIZE)
- kfree(buffer);
- /* Handle large packet */
- buffer = kmalloc(bytes_recvd, GFP_ATOMIC);
- if (buffer == NULL) {
- /* Try again next time around */
- netdev_err(ndev,
- "unable to allocate buffer of size "
- "(%d)!!\n", bytes_recvd);
- break;
- }
-
- bufferlen = bytes_recvd;
- }
- } while (1);
+ net_device = net_device_to_netvsc_device(ndev);
+ if (unlikely(net_device->destroy) &&
+ netvsc_channel_idle(net_device, q_idx))
+ return;
+
+ while ((desc = get_next_pkt_raw(channel)) != NULL) {
+ netvsc_process_raw_pkt(device, channel, net_device,
+ ndev, desc->trans_id, desc);
- if (bufferlen > NETVSC_PACKET_SIZE)
- kfree(buffer);
+ put_pkt_raw(channel, desc);
+ need_to_commit = true;
+ }
+
+ if (need_to_commit)
+ commit_rd_index(channel);
netvsc_chk_recv_comp(net_device, channel, q_idx);
}
@@ -1359,11 +1272,11 @@ void netvsc_channel_cb(void *context)
* netvsc_device_add - Callback when the device belonging to this
* driver is added
*/
-int netvsc_device_add(struct hv_device *device, void *additional_info)
+int netvsc_device_add(struct hv_device *device,
+ const struct netvsc_device_info *device_info)
{
int i, ret = 0;
- int ring_size =
- ((struct netvsc_device_info *)additional_info)->ring_size;
+ int ring_size = device_info->ring_size;
struct netvsc_device *net_device;
struct net_device *ndev = hv_get_drvdata(device);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1374,8 +1287,6 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
net_device->ring_size = ring_size;
- set_per_channel_state(device->channel, net_device->cb_buffer);
-
/* Open the channel */
ret = vmbus_open(device->channel, ring_size * PAGE_SIZE,
ring_size * PAGE_SIZE, NULL, 0,
@@ -1394,7 +1305,7 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
* opened.
*/
for (i = 0; i < VRSS_CHANNEL_MAX; i++)
- net_device->chn_table[i] = device->channel;
+ net_device->chan_table[i].channel = device->channel;
/* Writing nvdev pointer unlocks netvsc_send(), make sure chn_table is
* populated.
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 05374fc..72b0c1f 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -42,21 +42,11 @@
#define RING_SIZE_MIN 64
#define LINKCHANGE_INT (2 * HZ)
-#define NETVSC_HW_FEATURES (NETIF_F_RXCSUM | \
- NETIF_F_SG | \
- NETIF_F_TSO | \
- NETIF_F_TSO6 | \
- NETIF_F_HW_CSUM)
-
-/* Restrict GSO size to account for NVGRE */
-#define NETVSC_GSO_MAX_SIZE 62768
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
-static int max_num_vrss_chns = 8;
-
static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
NETIF_MSG_LINK | NETIF_MSG_IFUP |
NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
@@ -145,7 +135,7 @@ static int netvsc_close(struct net_device *net)
while (true) {
aread = 0;
for (i = 0; i < nvdev->num_chn; i++) {
- chn = nvdev->chn_table[i];
+ chn = nvdev->chan_table[i].channel;
if (!chn)
continue;
@@ -201,22 +191,41 @@ static void *init_ppi_data(struct rndis_message *msg, u32 ppi_size,
return ppi;
}
+/*
+ * Select queue for transmit.
+ *
+ * If a valid queue has already been assigned, then use that.
+ * Otherwise compute tx queue based on hash and the send table.
+ *
+ * This is basically similar to default (__netdev_pick_tx) with the added step
+ * of using the host send_table when no other queue has been assigned.
+ *
+ * TODO support XPS - but get_xps_queue not exported
+ */
static u16 netvsc_select_queue(struct net_device *ndev, struct sk_buff *skb,
void *accel_priv, select_queue_fallback_t fallback)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
struct netvsc_device *nvsc_dev = net_device_ctx->nvdev;
- u32 hash;
- u16 q_idx = 0;
+ struct sock *sk = skb->sk;
+ int q_idx = sk_tx_queue_get(sk);
- if (nvsc_dev == NULL || ndev->real_num_tx_queues <= 1)
- return 0;
+ if (q_idx < 0 || skb->ooo_okay ||
+ q_idx >= ndev->real_num_tx_queues) {
+ u16 hash = __skb_tx_hash(ndev, skb, VRSS_SEND_TAB_SIZE);
+ int new_idx;
+
+ new_idx = nvsc_dev->send_table[hash]
+ % nvsc_dev->num_chn;
+
+ if (q_idx != new_idx && sk &&
+ sk_fullsock(sk) && rcu_access_pointer(sk->sk_dst_cache))
+ sk_tx_queue_set(sk, new_idx);
- hash = skb_get_hash(skb);
- q_idx = nvsc_dev->send_table[hash % VRSS_SEND_TAB_SIZE] %
- ndev->real_num_tx_queues;
+ q_idx = new_idx;
+ }
- if (!nvsc_dev->chn_table[q_idx])
+ if (unlikely(!nvsc_dev->chan_table[q_idx].channel))
q_idx = 0;
return q_idx;
@@ -323,33 +332,25 @@ static int netvsc_get_slots(struct sk_buff *skb)
return slots + frag_slots;
}
-static u32 get_net_transport_info(struct sk_buff *skb, u32 *trans_off)
+static u32 net_checksum_info(struct sk_buff *skb)
{
- u32 ret_val = TRANSPORT_INFO_NOT_IP;
-
- if ((eth_hdr(skb)->h_proto != htons(ETH_P_IP)) &&
- (eth_hdr(skb)->h_proto != htons(ETH_P_IPV6))) {
- goto not_ip;
- }
+ if (skb->protocol == htons(ETH_P_IP)) {
+ struct iphdr *ip = ip_hdr(skb);
- *trans_off = skb_transport_offset(skb);
-
- if ((eth_hdr(skb)->h_proto == htons(ETH_P_IP))) {
- struct iphdr *iphdr = ip_hdr(skb);
-
- if (iphdr->protocol == IPPROTO_TCP)
- ret_val = TRANSPORT_INFO_IPV4_TCP;
- else if (iphdr->protocol == IPPROTO_UDP)
- ret_val = TRANSPORT_INFO_IPV4_UDP;
+ if (ip->protocol == IPPROTO_TCP)
+ return TRANSPORT_INFO_IPV4_TCP;
+ else if (ip->protocol == IPPROTO_UDP)
+ return TRANSPORT_INFO_IPV4_UDP;
} else {
- if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
- ret_val = TRANSPORT_INFO_IPV6_TCP;
+ struct ipv6hdr *ip6 = ipv6_hdr(skb);
+
+ if (ip6->nexthdr == IPPROTO_TCP)
+ return TRANSPORT_INFO_IPV6_TCP;
else if (ipv6_hdr(skb)->nexthdr == IPPROTO_UDP)
- ret_val = TRANSPORT_INFO_IPV6_UDP;
+ return TRANSPORT_INFO_IPV6_UDP;
}
-not_ip:
- return ret_val;
+ return TRANSPORT_INFO_NOT_IP;
}
static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
@@ -362,11 +363,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
struct rndis_packet *rndis_pkt;
u32 rndis_msg_size;
struct rndis_per_packet_info *ppi;
- struct ndis_tcp_ip_checksum_info *csum_info;
- int hdr_offset;
- u32 net_trans_info;
u32 hash;
- u32 skb_length;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
struct hv_page_buffer *pb = page_buf;
@@ -376,7 +373,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
* more pages we try linearizing it.
*/
- skb_length = skb->len;
num_data_pgs = netvsc_get_slots(skb) + 2;
if (unlikely(num_data_pgs > MAX_PAGE_BUFFER_COUNT)) {
@@ -409,6 +405,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
packet->q_idx = skb_get_queue_mapping(skb);
packet->total_data_buflen = skb->len;
+ packet->total_bytes = skb->len;
+ packet->total_packets = 1;
rndis_msg = (struct rndis_message *)skb->head;
@@ -445,13 +443,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
VLAN_PRIO_SHIFT;
}
- net_trans_info = get_net_transport_info(skb, &hdr_offset);
-
- /*
- * Setup the sendside checksum offload only if this is not a
- * GSO packet.
- */
- if ((net_trans_info & (INFO_TCP | INFO_UDP)) && skb_is_gso(skb)) {
+ if (skb_is_gso(skb)) {
struct ndis_tcp_lso_info *lso_info;
rndis_msg_size += NDIS_LSO_PPI_SIZE;
@@ -462,7 +454,7 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
ppi->ppi_offset);
lso_info->lso_v2_transmit.type = NDIS_TCP_LARGE_SEND_OFFLOAD_V2_TYPE;
- if (net_trans_info & (INFO_IPV4 << 16)) {
+ if (skb->protocol == htons(ETH_P_IP)) {
lso_info->lso_v2_transmit.ip_version =
NDIS_TCP_LARGE_SEND_OFFLOAD_IPV4;
ip_hdr(skb)->tot_len = 0;
@@ -478,10 +470,12 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0);
}
- lso_info->lso_v2_transmit.tcp_header_offset = hdr_offset;
+ lso_info->lso_v2_transmit.tcp_header_offset = skb_transport_offset(skb);
lso_info->lso_v2_transmit.mss = skb_shinfo(skb)->gso_size;
} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
- if (net_trans_info & INFO_TCP) {
+ if (net_checksum_info(skb) & net_device_ctx->tx_checksum_mask) {
+ struct ndis_tcp_ip_checksum_info *csum_info;
+
rndis_msg_size += NDIS_CSUM_PPI_SIZE;
ppi = init_ppi_data(rndis_msg, NDIS_CSUM_PPI_SIZE,
TCPIP_CHKSUM_PKTINFO);
@@ -489,15 +483,25 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
csum_info = (struct ndis_tcp_ip_checksum_info *)((void *)ppi +
ppi->ppi_offset);
- if (net_trans_info & (INFO_IPV4 << 16))
+ csum_info->transmit.tcp_header_offset = skb_transport_offset(skb);
+
+ if (skb->protocol == htons(ETH_P_IP)) {
csum_info->transmit.is_ipv4 = 1;
- else
+
+ if (ip_hdr(skb)->protocol == IPPROTO_TCP)
+ csum_info->transmit.tcp_checksum = 1;
+ else
+ csum_info->transmit.udp_checksum = 1;
+ } else {
csum_info->transmit.is_ipv6 = 1;
- csum_info->transmit.tcp_checksum = 1;
- csum_info->transmit.tcp_header_offset = hdr_offset;
+ if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
+ csum_info->transmit.tcp_checksum = 1;
+ else
+ csum_info->transmit.udp_checksum = 1;
+ }
} else {
- /* UDP checksum (and other) offload is not supported. */
+ /* Can't do offload of this type of checksum */
if (skb_checksum_help(skb))
goto drop;
}
@@ -513,15 +517,8 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
skb_tx_timestamp(skb);
ret = netvsc_send(net_device_ctx->device_ctx, packet,
rndis_msg, &pb, skb);
- if (likely(ret == 0)) {
- struct netvsc_stats *tx_stats = this_cpu_ptr(net_device_ctx->tx_stats);
-
- u64_stats_update_begin(&tx_stats->syncp);
- tx_stats->packets++;
- tx_stats->bytes += skb_length;
- u64_stats_update_end(&tx_stats->syncp);
+ if (likely(ret == 0))
return NETDEV_TX_OK;
- }
if (ret == -EAGAIN) {
++net_device_ctx->eth_stats.tx_busy;
@@ -541,7 +538,6 @@ no_memory:
++net_device_ctx->eth_stats.tx_no_memory;
goto drop;
}
-
/*
* netvsc_linkstatus_callback - Link up/down notification
*/
@@ -593,13 +589,13 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
}
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
- struct hv_netvsc_packet *packet,
- struct ndis_tcp_ip_checksum_info *csum_info,
- void *data, u16 vlan_tci)
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const struct ndis_pkt_8021q_info *vlan,
+ void *data, u32 buflen)
{
struct sk_buff *skb;
- skb = netdev_alloc_skb_ip_align(net, packet->total_data_buflen);
+ skb = netdev_alloc_skb_ip_align(net, buflen);
if (!skb)
return skb;
@@ -607,8 +603,7 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* Copy to skb. This copy is needed here since the memory pointed by
* hv_netvsc_packet cannot be deallocated
*/
- memcpy(skb_put(skb, packet->total_data_buflen), data,
- packet->total_data_buflen);
+ memcpy(skb_put(skb, buflen), data, buflen);
skb->protocol = eth_type_trans(skb, net);
@@ -625,9 +620,12 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
- if (vlan_tci & VLAN_TAG_PRESENT)
+ if (vlan) {
+ u16 vlan_tci = vlan->vlanid | (vlan->pri << VLAN_PRIO_SHIFT);
+
__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
vlan_tci);
+ }
return skb;
}
@@ -636,18 +634,19 @@ static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
* netvsc_recv_callback - Callback when we receive a packet from the
* "wire" on the specified device.
*/
-int netvsc_recv_callback(struct hv_device *device_obj,
- struct hv_netvsc_packet *packet,
- void **data,
- struct ndis_tcp_ip_checksum_info *csum_info,
- struct vmbus_channel *channel,
- u16 vlan_tci)
+int netvsc_recv_callback(struct net_device *net,
+ struct vmbus_channel *channel,
+ void *data, u32 len,
+ const struct ndis_tcp_ip_checksum_info *csum_info,
+ const struct ndis_pkt_8021q_info *vlan)
{
- struct net_device *net = hv_get_drvdata(device_obj);
struct net_device_context *net_device_ctx = netdev_priv(net);
+ struct netvsc_device *net_device = net_device_ctx->nvdev;
struct net_device *vf_netdev;
struct sk_buff *skb;
struct netvsc_stats *rx_stats;
+ u16 q_idx = channel->offermsg.offer.sub_channel_index;
+
if (net->reg_state != NETREG_REGISTERED)
return NVSP_STAT_FAIL;
@@ -659,30 +658,31 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* policy filters on the host). Deliver these via the VF
* interface in the guest.
*/
+ rcu_read_lock();
vf_netdev = rcu_dereference(net_device_ctx->vf_netdev);
if (vf_netdev && (vf_netdev->flags & IFF_UP))
net = vf_netdev;
/* Allocate a skb - TODO direct I/O to pages? */
- skb = netvsc_alloc_recv_skb(net, packet, csum_info, *data, vlan_tci);
+ skb = netvsc_alloc_recv_skb(net, csum_info, vlan, data, len);
if (unlikely(!skb)) {
++net->stats.rx_dropped;
+ rcu_read_unlock();
return NVSP_STAT_FAIL;
}
if (net != vf_netdev)
- skb_record_rx_queue(skb,
- channel->offermsg.offer.sub_channel_index);
+ skb_record_rx_queue(skb, q_idx);
/*
* Even if injecting the packet, record the statistics
* on the synthetic device because modifying the VF device
* statistics will not work correctly.
*/
- rx_stats = this_cpu_ptr(net_device_ctx->rx_stats);
+ rx_stats = &net_device->chan_table[q_idx].rx_stats;
u64_stats_update_begin(&rx_stats->syncp);
rx_stats->packets++;
- rx_stats->bytes += packet->total_data_buflen;
+ rx_stats->bytes += len;
if (skb->pkt_type == PACKET_BROADCAST)
++rx_stats->broadcast;
@@ -695,7 +695,8 @@ int netvsc_recv_callback(struct hv_device *device_obj,
* is done.
* TODO - use NAPI?
*/
- netif_rx(skb);
+ netif_receive_skb(skb);
+ rcu_read_unlock();
return 0;
}
@@ -719,102 +720,76 @@ static void netvsc_get_channels(struct net_device *net,
}
}
+static int netvsc_set_queues(struct net_device *net, struct hv_device *dev,
+ u32 num_chn)
+{
+ struct netvsc_device_info device_info;
+ int ret;
+
+ memset(&device_info, 0, sizeof(device_info));
+ device_info.num_chn = num_chn;
+ device_info.ring_size = ring_size;
+ device_info.max_num_vrss_chns = num_chn;
+
+ ret = rndis_filter_device_add(dev, &device_info);
+ if (ret)
+ return ret;
+
+ ret = netif_set_real_num_tx_queues(net, num_chn);
+ if (ret)
+ return ret;
+
+ ret = netif_set_real_num_rx_queues(net, num_chn);
+
+ return ret;
+}
+
static int netvsc_set_channels(struct net_device *net,
struct ethtool_channels *channels)
{
struct net_device_context *net_device_ctx = netdev_priv(net);
struct hv_device *dev = net_device_ctx->device_ctx;
struct netvsc_device *nvdev = net_device_ctx->nvdev;
- struct netvsc_device_info device_info;
- u32 num_chn;
- u32 max_chn;
- int ret = 0;
- bool recovering = false;
+ unsigned int count = channels->combined_count;
+ int ret;
+
+ /* We do not support separate count for rx, tx, or other */
+ if (count == 0 ||
+ channels->rx_count || channels->tx_count || channels->other_count)
+ return -EINVAL;
+
+ if (count > net->num_tx_queues || count > net->num_rx_queues)
+ return -EINVAL;
if (net_device_ctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
- num_chn = nvdev->num_chn;
- max_chn = min_t(u32, nvdev->max_chn, num_online_cpus());
-
- if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5) {
- pr_info("vRSS unsupported before NVSP Version 5\n");
+ if (nvdev->nvsp_version < NVSP_PROTOCOL_VERSION_5)
return -EINVAL;
- }
- /* We do not support rx, tx, or other */
- if (!channels ||
- channels->rx_count ||
- channels->tx_count ||
- channels->other_count ||
- (channels->combined_count < 1))
+ if (count > nvdev->max_chn)
return -EINVAL;
- if (channels->combined_count > max_chn) {
- pr_info("combined channels too high, using %d\n", max_chn);
- channels->combined_count = max_chn;
- }
-
ret = netvsc_close(net);
if (ret)
- goto out;
+ return ret;
- do_set:
net_device_ctx->start_remove = true;
- rndis_filter_device_remove(dev);
-
- nvdev->num_chn = channels->combined_count;
-
- memset(&device_info, 0, sizeof(device_info));
- device_info.num_chn = nvdev->num_chn; /* passed to RNDIS */
- device_info.ring_size = ring_size;
- device_info.max_num_vrss_chns = max_num_vrss_chns;
+ rndis_filter_device_remove(dev, nvdev);
- ret = rndis_filter_device_add(dev, &device_info);
- if (ret) {
- if (recovering) {
- netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- return ret;
- }
- goto recover;
- }
-
- nvdev = net_device_ctx->nvdev;
-
- ret = netif_set_real_num_tx_queues(net, nvdev->num_chn);
- if (ret) {
- if (recovering) {
- netdev_err(net, "could not set tx queue count (ret %d)\n", ret);
- return ret;
- }
- goto recover;
- }
-
- ret = netif_set_real_num_rx_queues(net, nvdev->num_chn);
- if (ret) {
- if (recovering) {
- netdev_err(net, "could not set rx queue count (ret %d)\n", ret);
- return ret;
- }
- goto recover;
- }
+ ret = netvsc_set_queues(net, dev, count);
+ if (ret == 0)
+ nvdev->num_chn = count;
+ else
+ netvsc_set_queues(net, dev, nvdev->num_chn);
- out:
netvsc_open(net);
net_device_ctx->start_remove = false;
+
/* We may have missed link change notifications */
schedule_delayed_work(&net_device_ctx->dwork, 0);
return ret;
-
- recover:
- /* If the above failed, we attempt to recover through the same
- * process but with the original number of channels.
- */
- netdev_err(net, "could not set channels, recovering\n");
- recovering = true;
- channels->combined_count = num_chn;
- goto do_set;
}
static bool netvsc_validate_ethtool_ss_cmd(const struct ethtool_cmd *cmd)
@@ -875,8 +850,7 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
struct netvsc_device *nvdev = ndevctx->nvdev;
struct hv_device *hdev = ndevctx->device_ctx;
struct netvsc_device_info device_info;
- u32 num_chn;
- int ret = 0;
+ int ret;
if (ndevctx->start_remove || !nvdev || nvdev->destroy)
return -ENODEV;
@@ -885,17 +859,15 @@ static int netvsc_change_mtu(struct net_device *ndev, int mtu)
if (ret)
goto out;
- num_chn = nvdev->num_chn;
-
ndevctx->start_remove = true;
- rndis_filter_device_remove(hdev);
+ rndis_filter_device_remove(hdev, nvdev);
ndev->mtu = mtu;
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.num_chn = num_chn;
- device_info.max_num_vrss_chns = max_num_vrss_chns;
+ device_info.num_chn = nvdev->num_chn;
+ device_info.max_num_vrss_chns = nvdev->num_chn;
rndis_filter_device_add(hdev, &device_info);
out:
@@ -912,34 +884,39 @@ static void netvsc_get_stats64(struct net_device *net,
struct rtnl_link_stats64 *t)
{
struct net_device_context *ndev_ctx = netdev_priv(net);
- int cpu;
-
- for_each_possible_cpu(cpu) {
- struct netvsc_stats *tx_stats = per_cpu_ptr(ndev_ctx->tx_stats,
- cpu);
- struct netvsc_stats *rx_stats = per_cpu_ptr(ndev_ctx->rx_stats,
- cpu);
- u64 tx_packets, tx_bytes, rx_packets, rx_bytes, rx_multicast;
+ struct netvsc_device *nvdev = ndev_ctx->nvdev;
+ int i;
+
+ if (!nvdev)
+ return;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+ const struct netvsc_stats *stats;
+ u64 packets, bytes, multicast;
unsigned int start;
+ stats = &nvchan->tx_stats;
do {
- start = u64_stats_fetch_begin_irq(&tx_stats->syncp);
- tx_packets = tx_stats->packets;
- tx_bytes = tx_stats->bytes;
- } while (u64_stats_fetch_retry_irq(&tx_stats->syncp, start));
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+ t->tx_bytes += bytes;
+ t->tx_packets += packets;
+
+ stats = &nvchan->rx_stats;
do {
- start = u64_stats_fetch_begin_irq(&rx_stats->syncp);
- rx_packets = rx_stats->packets;
- rx_bytes = rx_stats->bytes;
- rx_multicast = rx_stats->multicast + rx_stats->broadcast;
- } while (u64_stats_fetch_retry_irq(&rx_stats->syncp, start));
-
- t->tx_bytes += tx_bytes;
- t->tx_packets += tx_packets;
- t->rx_bytes += rx_bytes;
- t->rx_packets += rx_packets;
- t->multicast += rx_multicast;
+ start = u64_stats_fetch_begin_irq(&stats->syncp);
+ packets = stats->packets;
+ bytes = stats->bytes;
+ multicast = stats->multicast + stats->broadcast;
+ } while (u64_stats_fetch_retry_irq(&stats->syncp, start));
+
+ t->rx_bytes += bytes;
+ t->rx_packets += packets;
+ t->multicast += multicast;
}
t->tx_dropped = net->stats.tx_dropped;
@@ -984,11 +961,19 @@ static const struct {
{ "tx_busy", offsetof(struct netvsc_ethtool_stats, tx_busy) },
};
+#define NETVSC_GLOBAL_STATS_LEN ARRAY_SIZE(netvsc_stats)
+
+/* 4 statistics per queue (rx/tx packets/bytes) */
+#define NETVSC_QUEUE_STATS_LEN(dev) ((dev)->num_chn * 4)
+
static int netvsc_get_sset_count(struct net_device *dev, int string_set)
{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
+
switch (string_set) {
case ETH_SS_STATS:
- return ARRAY_SIZE(netvsc_stats);
+ return NETVSC_GLOBAL_STATS_LEN + NETVSC_QUEUE_STATS_LEN(nvdev);
default:
return -EINVAL;
}
@@ -998,26 +983,109 @@ static void netvsc_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 *data)
{
struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
const void *nds = &ndc->eth_stats;
- int i;
+ const struct netvsc_stats *qstats;
+ unsigned int start;
+ u64 packets, bytes;
+ int i, j;
- for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
+ for (i = 0; i < NETVSC_GLOBAL_STATS_LEN; i++)
data[i] = *(unsigned long *)(nds + netvsc_stats[i].offset);
+
+ for (j = 0; j < nvdev->num_chn; j++) {
+ qstats = &nvdev->chan_table[j].tx_stats;
+
+ do {
+ start = u64_stats_fetch_begin_irq(&qstats->syncp);
+ packets = qstats->packets;
+ bytes = qstats->bytes;
+ } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ data[i++] = packets;
+ data[i++] = bytes;
+
+ qstats = &nvdev->chan_table[j].rx_stats;
+ do {
+ start = u64_stats_fetch_begin_irq(&qstats->syncp);
+ packets = qstats->packets;
+ bytes = qstats->bytes;
+ } while (u64_stats_fetch_retry_irq(&qstats->syncp, start));
+ data[i++] = packets;
+ data[i++] = bytes;
+ }
}
static void netvsc_get_strings(struct net_device *dev, u32 stringset, u8 *data)
{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
+ u8 *p = data;
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(netvsc_stats); i++)
- memcpy(data + i * ETH_GSTRING_LEN,
+ memcpy(p + i * ETH_GSTRING_LEN,
netvsc_stats[i].name, ETH_GSTRING_LEN);
+
+ p += i * ETH_GSTRING_LEN;
+ for (i = 0; i < nvdev->num_chn; i++) {
+ sprintf(p, "tx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "tx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_packets", i);
+ p += ETH_GSTRING_LEN;
+ sprintf(p, "rx_queue_%u_bytes", i);
+ p += ETH_GSTRING_LEN;
+ }
+
break;
}
}
+static int
+netvsc_get_rss_hash_opts(struct netvsc_device *nvdev,
+ struct ethtool_rxnfc *info)
+{
+ info->data = RXH_IP_SRC | RXH_IP_DST;
+
+ switch (info->flow_type) {
+ case TCP_V4_FLOW:
+ case TCP_V6_FLOW:
+ info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
+ /* fallthrough */
+ case UDP_V4_FLOW:
+ case UDP_V6_FLOW:
+ case IPV4_FLOW:
+ case IPV6_FLOW:
+ break;
+ default:
+ info->data = 0;
+ break;
+ }
+
+ return 0;
+}
+
+static int
+netvsc_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
+ u32 *rules)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *nvdev = ndc->nvdev;
+
+ switch (info->cmd) {
+ case ETHTOOL_GRXRINGS:
+ info->data = nvdev->num_chn;
+ return 0;
+
+ case ETHTOOL_GRXFH:
+ return netvsc_get_rss_hash_opts(nvdev, info);
+ }
+ return -EOPNOTSUPP;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void netvsc_poll_controller(struct net_device *net)
{
@@ -1027,6 +1095,68 @@ static void netvsc_poll_controller(struct net_device *net)
}
#endif
+static u32 netvsc_get_rxfh_key_size(struct net_device *dev)
+{
+ return NETVSC_HASH_KEYLEN;
+}
+
+static u32 netvsc_rss_indir_size(struct net_device *dev)
+{
+ return ITAB_NUM;
+}
+
+static int netvsc_get_rxfh(struct net_device *dev, u32 *indir, u8 *key,
+ u8 *hfunc)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev = ndc->nvdev;
+ struct rndis_device *rndis_dev = ndev->extension;
+ int i;
+
+ if (hfunc)
+ *hfunc = ETH_RSS_HASH_TOP; /* Toeplitz */
+
+ if (indir) {
+ for (i = 0; i < ITAB_NUM; i++)
+ indir[i] = rndis_dev->ind_table[i];
+ }
+
+ if (key)
+ memcpy(key, rndis_dev->rss_key, NETVSC_HASH_KEYLEN);
+
+ return 0;
+}
+
+static int netvsc_set_rxfh(struct net_device *dev, const u32 *indir,
+ const u8 *key, const u8 hfunc)
+{
+ struct net_device_context *ndc = netdev_priv(dev);
+ struct netvsc_device *ndev = ndc->nvdev;
+ struct rndis_device *rndis_dev = ndev->extension;
+ int i;
+
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
+ return -EOPNOTSUPP;
+
+ if (indir) {
+ for (i = 0; i < ITAB_NUM; i++)
+ if (indir[i] >= dev->num_rx_queues)
+ return -EINVAL;
+
+ for (i = 0; i < ITAB_NUM; i++)
+ rndis_dev->ind_table[i] = indir[i];
+ }
+
+ if (!key) {
+ if (!indir)
+ return 0;
+
+ key = rndis_dev->rss_key;
+ }
+
+ return rndis_filter_set_rss_param(rndis_dev, key, ndev->num_chn);
+}
+
static const struct ethtool_ops ethtool_ops = {
.get_drvinfo = netvsc_get_drvinfo,
.get_link = ethtool_op_get_link,
@@ -1038,6 +1168,11 @@ static const struct ethtool_ops ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info,
.get_settings = netvsc_get_settings,
.set_settings = netvsc_set_settings,
+ .get_rxnfc = netvsc_get_rxnfc,
+ .get_rxfh_key_size = netvsc_get_rxfh_key_size,
+ .get_rxfh_indir_size = netvsc_rss_indir_size,
+ .get_rxfh = netvsc_get_rxfh,
+ .set_rxfh = netvsc_set_rxfh,
};
static const struct net_device_ops device_ops = {
@@ -1158,15 +1293,6 @@ out_unlock:
rtnl_unlock();
}
-static void netvsc_free_netdev(struct net_device *netdev)
-{
- struct net_device_context *net_device_ctx = netdev_priv(netdev);
-
- free_percpu(net_device_ctx->tx_stats);
- free_percpu(net_device_ctx->rx_stats);
- free_netdev(netdev);
-}
-
static struct net_device *get_netvsc_bymac(const u8 *mac)
{
struct net_device *dev;
@@ -1303,7 +1429,6 @@ static int netvsc_vf_down(struct net_device *vf_netdev)
static int netvsc_unregister_vf(struct net_device *vf_netdev)
{
struct net_device *ndev;
- struct netvsc_device *netvsc_dev;
struct net_device_context *net_device_ctx;
ndev = get_netvsc_byref(vf_netdev);
@@ -1311,7 +1436,6 @@ static int netvsc_unregister_vf(struct net_device *vf_netdev)
return NOTIFY_DONE;
net_device_ctx = netdev_priv(ndev);
- netvsc_dev = net_device_ctx->nvdev;
netdev_info(ndev, "VF unregistering: %s\n", vf_netdev->name);
@@ -1331,7 +1455,7 @@ static int netvsc_probe(struct hv_device *dev,
int ret;
net = alloc_etherdev_mq(sizeof(struct net_device_context),
- num_online_cpus());
+ VRSS_CHANNEL_MAX);
if (!net)
return -ENOMEM;
@@ -1346,18 +1470,6 @@ static int netvsc_probe(struct hv_device *dev,
netdev_dbg(net, "netvsc msg_enable: %d\n",
net_device_ctx->msg_enable);
- net_device_ctx->tx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
- if (!net_device_ctx->tx_stats) {
- free_netdev(net);
- return -ENOMEM;
- }
- net_device_ctx->rx_stats = netdev_alloc_pcpu_stats(struct netvsc_stats);
- if (!net_device_ctx->rx_stats) {
- free_percpu(net_device_ctx->tx_stats);
- free_netdev(net);
- return -ENOMEM;
- }
-
hv_set_drvdata(dev, net);
net_device_ctx->start_remove = false;
@@ -1369,10 +1481,6 @@ static int netvsc_probe(struct hv_device *dev,
INIT_LIST_HEAD(&net_device_ctx->reconfig_events);
net->netdev_ops = &device_ops;
-
- net->hw_features = NETVSC_HW_FEATURES;
- net->features = NETVSC_HW_FEATURES | NETIF_F_HW_VLAN_CTAG_TX;
-
net->ethtool_ops = &ethtool_ops;
SET_NETDEV_DEV(net, &dev->device);
@@ -1382,20 +1490,26 @@ static int netvsc_probe(struct hv_device *dev,
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
- device_info.max_num_vrss_chns = max_num_vrss_chns;
+ device_info.max_num_vrss_chns = min_t(u32, VRSS_CHANNEL_DEFAULT,
+ num_online_cpus());
ret = rndis_filter_device_add(dev, &device_info);
if (ret != 0) {
netdev_err(net, "unable to add netvsc device (ret %d)\n", ret);
- netvsc_free_netdev(net);
+ free_netdev(net);
hv_set_drvdata(dev, NULL);
return ret;
}
memcpy(net->dev_addr, device_info.mac_adr, ETH_ALEN);
+ /* hw_features computed in rndis_filter_device_add */
+ net->features = net->hw_features |
+ NETIF_F_HIGHDMA | NETIF_F_SG |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ net->vlan_features = net->features;
+
nvdev = net_device_ctx->nvdev;
netif_set_real_num_tx_queues(net, nvdev->num_chn);
netif_set_real_num_rx_queues(net, nvdev->num_chn);
- netif_set_gso_max_size(net, NETVSC_GSO_MAX_SIZE);
/* MTU range: 68 - 1500 or 65521 */
net->min_mtu = NETVSC_MTU_MIN;
@@ -1407,8 +1521,8 @@ static int netvsc_probe(struct hv_device *dev,
ret = register_netdev(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
- rndis_filter_device_remove(dev);
- netvsc_free_netdev(net);
+ rndis_filter_device_remove(dev, nvdev);
+ free_netdev(net);
}
return ret;
@@ -1418,7 +1532,6 @@ static int netvsc_remove(struct hv_device *dev)
{
struct net_device *net;
struct net_device_context *ndev_ctx;
- struct netvsc_device *net_device;
net = hv_get_drvdata(dev);
@@ -1428,7 +1541,6 @@ static int netvsc_remove(struct hv_device *dev)
}
ndev_ctx = netdev_priv(net);
- net_device = ndev_ctx->nvdev;
/* Avoid racing with netvsc_change_mtu()/netvsc_set_channels()
* removing the device.
@@ -1449,11 +1561,11 @@ static int netvsc_remove(struct hv_device *dev)
* Call to the vsc driver to let it know that the device is being
* removed
*/
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, ndev_ctx->nvdev);
hv_set_drvdata(dev, NULL);
- netvsc_free_netdev(net);
+ free_netdev(net);
return 0;
}
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 8d90904..19356f5 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -57,6 +57,14 @@ struct rndis_request {
u8 request_ext[RNDIS_EXT_LEN];
};
+static const u8 netvsc_hash_key[NETVSC_HASH_KEYLEN] = {
+ 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
+ 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
+ 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
+ 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
+ 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
+};
+
static struct rndis_device *get_rndis_device(void)
{
struct rndis_device *device;
@@ -124,7 +132,7 @@ static void put_rndis_request(struct rndis_device *dev,
}
static void dump_rndis_message(struct hv_device *hv_dev,
- struct rndis_message *rndis_msg)
+ const struct rndis_message *rndis_msg)
{
struct net_device *netdev = hv_get_drvdata(hv_dev);
@@ -339,102 +347,78 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, u32 type)
return NULL;
}
-static int rndis_filter_receive_data(struct rndis_device *dev,
- struct rndis_message *msg,
- struct hv_netvsc_packet *pkt,
- void **data,
- struct vmbus_channel *channel)
+static int rndis_filter_receive_data(struct net_device *ndev,
+ struct rndis_device *dev,
+ struct rndis_message *msg,
+ struct vmbus_channel *channel,
+ void *data, u32 data_buflen)
{
- struct rndis_packet *rndis_pkt;
+ struct rndis_packet *rndis_pkt = &msg->msg.pkt;
+ const struct ndis_tcp_ip_checksum_info *csum_info;
+ const struct ndis_pkt_8021q_info *vlan;
u32 data_offset;
- struct ndis_pkt_8021q_info *vlan;
- struct ndis_tcp_ip_checksum_info *csum_info;
- u16 vlan_tci = 0;
- struct net_device_context *net_device_ctx = netdev_priv(dev->ndev);
-
- rndis_pkt = &msg->msg.pkt;
/* Remove the rndis header and pass it back up the stack */
data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset;
- pkt->total_data_buflen -= data_offset;
+ data_buflen -= data_offset;
/*
* Make sure we got a valid RNDIS message, now total_data_buflen
* should be the data packet size plus the trailer padding size
*/
- if (pkt->total_data_buflen < rndis_pkt->data_len) {
+ if (unlikely(data_buflen < rndis_pkt->data_len)) {
netdev_err(dev->ndev, "rndis message buffer "
"overflow detected (got %u, min %u)"
"...dropping this message!\n",
- pkt->total_data_buflen, rndis_pkt->data_len);
+ data_buflen, rndis_pkt->data_len);
return NVSP_STAT_FAIL;
}
+ vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
+
/*
* Remove the rndis trailer padding from rndis packet message
* rndis_pkt->data_len tell us the real data length, we only copy
* the data packet to the stack, without the rndis trailer padding
*/
- pkt->total_data_buflen = rndis_pkt->data_len;
- *data = (void *)((unsigned long)(*data) + data_offset);
-
- vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO);
- if (vlan) {
- vlan_tci = VLAN_TAG_PRESENT | vlan->vlanid |
- (vlan->pri << VLAN_PRIO_SHIFT);
- }
-
+ data = (void *)((unsigned long)data + data_offset);
csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO);
- return netvsc_recv_callback(net_device_ctx->device_ctx, pkt, data,
- csum_info, channel, vlan_tci);
+ return netvsc_recv_callback(ndev, channel,
+ data, rndis_pkt->data_len,
+ csum_info, vlan);
}
-int rndis_filter_receive(struct hv_device *dev,
- struct hv_netvsc_packet *pkt,
- void **data,
- struct vmbus_channel *channel)
+int rndis_filter_receive(struct net_device *ndev,
+ struct netvsc_device *net_dev,
+ struct hv_device *dev,
+ struct vmbus_channel *channel,
+ void *data, u32 buflen)
{
- struct net_device *ndev = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx = netdev_priv(ndev);
- struct netvsc_device *net_dev = net_device_ctx->nvdev;
- struct rndis_device *rndis_dev;
- struct rndis_message *rndis_msg;
- int ret = 0;
-
- if (!net_dev) {
- ret = NVSP_STAT_FAIL;
- goto exit;
- }
+ struct rndis_device *rndis_dev = net_dev->extension;
+ struct rndis_message *rndis_msg = data;
/* Make sure the rndis device state is initialized */
- if (!net_dev->extension) {
- netdev_err(ndev, "got rndis message but no rndis device - "
- "dropping this message!\n");
- ret = NVSP_STAT_FAIL;
- goto exit;
+ if (unlikely(!rndis_dev)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "got rndis message but no rndis device!\n");
+ return NVSP_STAT_FAIL;
}
- rndis_dev = (struct rndis_device *)net_dev->extension;
- if (rndis_dev->state == RNDIS_DEV_UNINITIALIZED) {
- netdev_err(ndev, "got rndis message but rndis device "
- "uninitialized...dropping this message!\n");
- ret = NVSP_STAT_FAIL;
- goto exit;
+ if (unlikely(rndis_dev->state == RNDIS_DEV_UNINITIALIZED)) {
+ netif_err(net_device_ctx, rx_err, ndev,
+ "got rndis message uninitialized\n");
+ return NVSP_STAT_FAIL;
}
- rndis_msg = *data;
-
- if (netif_msg_rx_err(net_device_ctx))
+ if (netif_msg_rx_status(net_device_ctx))
dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
- /* data msg */
- ret = rndis_filter_receive_data(rndis_dev, rndis_msg, pkt,
- data, channel);
- break;
-
+ return rndis_filter_receive_data(ndev, rndis_dev, rndis_msg,
+ channel, data, buflen);
case RNDIS_MSG_INIT_C:
case RNDIS_MSG_QUERY_C:
case RNDIS_MSG_SET_C:
@@ -454,8 +438,7 @@ int rndis_filter_receive(struct hv_device *dev,
break;
}
-exit:
- return ret;
+ return 0;
}
static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
@@ -485,7 +468,35 @@ static int rndis_filter_query_device(struct rndis_device *dev, u32 oid,
query->info_buflen = 0;
query->dev_vc_handle = 0;
- if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
+ if (oid == OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES) {
+ struct net_device_context *ndevctx = netdev_priv(dev->ndev);
+ struct netvsc_device *nvdev = ndevctx->nvdev;
+ struct ndis_offload *hwcaps;
+ u32 nvsp_version = nvdev->nvsp_version;
+ u8 ndis_rev;
+ size_t size;
+
+ if (nvsp_version >= NVSP_PROTOCOL_VERSION_5) {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_3;
+ size = NDIS_OFFLOAD_SIZE;
+ } else if (nvsp_version >= NVSP_PROTOCOL_VERSION_4) {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_2;
+ size = NDIS_OFFLOAD_SIZE_6_1;
+ } else {
+ ndis_rev = NDIS_OFFLOAD_PARAMETERS_REVISION_1;
+ size = NDIS_OFFLOAD_SIZE_6_0;
+ }
+
+ request->request_msg.msg_len += size;
+ query->info_buflen = size;
+ hwcaps = (struct ndis_offload *)
+ ((unsigned long)query + query->info_buf_offset);
+
+ hwcaps->header.type = NDIS_OBJECT_TYPE_OFFLOAD;
+ hwcaps->header.revision = ndis_rev;
+ hwcaps->header.size = size;
+
+ } else if (oid == OID_GEN_RECEIVE_SCALE_CAPABILITIES) {
struct ndis_recv_scale_cap *cap;
request->request_msg.msg_len +=
@@ -526,6 +537,44 @@ cleanup:
return ret;
}
+/* Get the hardware offload capabilities */
+static int
+rndis_query_hwcaps(struct rndis_device *dev, struct ndis_offload *caps)
+{
+ u32 caps_len = sizeof(*caps);
+ int ret;
+
+ memset(caps, 0, sizeof(*caps));
+
+ ret = rndis_filter_query_device(dev,
+ OID_TCP_OFFLOAD_HARDWARE_CAPABILITIES,
+ caps, &caps_len);
+ if (ret)
+ return ret;
+
+ if (caps->header.type != NDIS_OBJECT_TYPE_OFFLOAD) {
+ netdev_warn(dev->ndev, "invalid NDIS objtype %#x\n",
+ caps->header.type);
+ return -EINVAL;
+ }
+
+ if (caps->header.revision < NDIS_OFFLOAD_PARAMETERS_REVISION_1) {
+ netdev_warn(dev->ndev, "invalid NDIS objrev %x\n",
+ caps->header.revision);
+ return -EINVAL;
+ }
+
+ if (caps->header.size > caps_len ||
+ caps->header.size < NDIS_OFFLOAD_SIZE_6_0) {
+ netdev_warn(dev->ndev,
+ "invalid NDIS objsize %u, data size %u\n",
+ caps->header.size, caps_len);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
static int rndis_filter_query_device_mac(struct rndis_device *dev)
{
u32 size = ETH_ALEN;
@@ -663,23 +712,15 @@ cleanup:
return ret;
}
-static const u8 netvsc_hash_key[] = {
- 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
- 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
- 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
- 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
- 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
-};
-#define HASH_KEYLEN ARRAY_SIZE(netvsc_hash_key)
-
-static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
+int rndis_filter_set_rss_param(struct rndis_device *rdev,
+ const u8 *rss_key, int num_queue)
{
struct net_device *ndev = rdev->ndev;
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
u32 extlen = sizeof(struct ndis_recv_scale_param) +
- 4*ITAB_NUM + HASH_KEYLEN;
+ 4 * ITAB_NUM + NETVSC_HASH_KEYLEN;
struct ndis_recv_scale_param *rssp;
u32 *itab;
u8 *keyp;
@@ -707,19 +748,18 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
NDIS_HASH_TCP_IPV6;
rssp->indirect_tabsize = 4*ITAB_NUM;
rssp->indirect_taboffset = sizeof(struct ndis_recv_scale_param);
- rssp->hashkey_size = HASH_KEYLEN;
+ rssp->hashkey_size = NETVSC_HASH_KEYLEN;
rssp->kashkey_offset = rssp->indirect_taboffset +
rssp->indirect_tabsize;
/* Set indirection table entries */
itab = (u32 *)(rssp + 1);
for (i = 0; i < ITAB_NUM; i++)
- itab[i] = i % num_queue;
+ itab[i] = rdev->ind_table[i];
/* Set hask key values */
keyp = (u8 *)((unsigned long)rssp + rssp->kashkey_offset);
- for (i = 0; i < HASH_KEYLEN; i++)
- keyp[i] = netvsc_hash_key[i];
+ memcpy(keyp, rss_key, NETVSC_HASH_KEYLEN);
ret = rndis_filter_send_request(rdev, request);
if (ret != 0)
@@ -727,7 +767,9 @@ static int rndis_filter_set_rss_param(struct rndis_device *rdev, int num_queue)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
- if (set_complete->status != RNDIS_STATUS_SUCCESS) {
+ if (set_complete->status == RNDIS_STATUS_SUCCESS)
+ memcpy(rdev->rss_key, rss_key, NETVSC_HASH_KEYLEN);
+ else {
netdev_err(ndev, "Fail to set RSS parameters:0x%x\n",
set_complete->status);
ret = -EINVAL;
@@ -778,7 +820,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
struct rndis_request *request;
struct rndis_set_request *set;
struct rndis_set_complete *set_complete;
- u32 status;
int ret;
request = get_rndis_request(dev, RNDIS_MSG_SET,
@@ -805,8 +846,6 @@ int rndis_filter_set_packet_filter(struct rndis_device *dev, u32 new_filter)
wait_for_completion(&request->wait_event);
set_complete = &request->response_msg.msg.set_complete;
- status = set_complete->status;
-
cleanup:
if (request)
put_rndis_request(dev, request);
@@ -864,6 +903,23 @@ cleanup:
return ret;
}
+static bool netvsc_device_idle(const struct netvsc_device *nvdev)
+{
+ int i;
+
+ if (atomic_read(&nvdev->num_outstanding_recvs) > 0)
+ return false;
+
+ for (i = 0; i < nvdev->num_chn; i++) {
+ const struct netvsc_channel *nvchan = &nvdev->chan_table[i];
+
+ if (atomic_read(&nvchan->queue_sends) > 0)
+ return false;
+ }
+
+ return true;
+}
+
static void rndis_filter_halt_device(struct rndis_device *dev)
{
struct rndis_request *request;
@@ -894,9 +950,7 @@ cleanup:
spin_unlock_irqrestore(&hdev->channel->inbound_lock, flags);
/* Wait for all send completions */
- wait_event(nvdev->wait_drain,
- atomic_read(&nvdev->num_outstanding_sends) == 0 &&
- atomic_read(&nvdev->num_outstanding_recvs) == 0);
+ wait_event(nvdev->wait_drain, netvsc_device_idle(nvdev));
if (request)
put_rndis_request(dev, request);
@@ -948,18 +1002,15 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
if (chn_index >= nvscdev->num_chn)
return;
- set_per_channel_state(new_sc, nvscdev->sub_cb_buf + (chn_index - 1) *
- NETVSC_PACKET_SIZE);
-
- nvscdev->mrc[chn_index].buf = vzalloc(NETVSC_RECVSLOT_MAX *
- sizeof(struct recv_comp_data));
+ nvscdev->chan_table[chn_index].mrc.buf
+ = vzalloc(NETVSC_RECVSLOT_MAX * sizeof(struct recv_comp_data));
ret = vmbus_open(new_sc, nvscdev->ring_size * PAGE_SIZE,
nvscdev->ring_size * PAGE_SIZE, NULL, 0,
netvsc_channel_cb, new_sc);
if (ret == 0)
- nvscdev->chn_table[chn_index] = new_sc;
+ nvscdev->chan_table[chn_index].channel = new_sc;
spin_lock_irqsave(&nvscdev->sc_lock, flags);
nvscdev->num_sc_offered--;
@@ -969,24 +1020,25 @@ static void netvsc_sc_open(struct vmbus_channel *new_sc)
}
int rndis_filter_device_add(struct hv_device *dev,
- void *additional_info)
+ struct netvsc_device_info *device_info)
{
- int ret;
struct net_device *net = hv_get_drvdata(dev);
struct net_device_context *net_device_ctx = netdev_priv(net);
struct netvsc_device *net_device;
struct rndis_device *rndis_device;
- struct netvsc_device_info *device_info = additional_info;
+ struct ndis_offload hwcaps;
struct ndis_offload_params offloads;
struct nvsp_message *init_packet;
struct ndis_recv_scale_cap rsscap;
u32 rsscap_size = sizeof(struct ndis_recv_scale_cap);
+ unsigned int gso_max_size = GSO_MAX_SIZE;
u32 mtu, size;
u32 num_rss_qs;
u32 sc_delta;
const struct cpumask *node_cpu_mask;
u32 num_possible_rss_qs;
unsigned long flags;
+ int i, ret;
rndis_device = get_rndis_device();
if (!rndis_device)
@@ -997,7 +1049,7 @@ int rndis_filter_device_add(struct hv_device *dev,
* NOTE! Once the channel is created, we may get a receive callback
* (RndisFilterOnReceive()) before this call is completed
*/
- ret = netvsc_device_add(dev, additional_info);
+ ret = netvsc_device_add(dev, device_info);
if (ret != 0) {
kfree(rndis_device);
return ret;
@@ -1016,7 +1068,7 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Send the rndis initialization message */
ret = rndis_filter_init_device(rndis_device);
if (ret != 0) {
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, net_device);
return ret;
}
@@ -1031,25 +1083,71 @@ int rndis_filter_device_add(struct hv_device *dev,
/* Get the mac address */
ret = rndis_filter_query_device_mac(rndis_device);
if (ret != 0) {
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, net_device);
return ret;
}
memcpy(device_info->mac_adr, rndis_device->hw_mac_adr, ETH_ALEN);
- /* Turn on the offloads; the host supports all of the relevant
- * offloads.
- */
+ /* Find HW offload capabilities */
+ ret = rndis_query_hwcaps(rndis_device, &hwcaps);
+ if (ret != 0) {
+ rndis_filter_device_remove(dev, net_device);
+ return ret;
+ }
+
+ /* A value of zero means "no change"; now turn on what we want. */
memset(&offloads, 0, sizeof(struct ndis_offload_params));
- /* A value of zero means "no change"; now turn on what we
- * want.
- */
- offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
- offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+
+ /* Linux does not care about IP checksum, always does in kernel */
+ offloads.ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_DISABLED;
+
+ /* Compute tx offload settings based on hw capabilities */
+ net->hw_features = NETIF_F_RXCSUM;
+
+ if ((hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_ALL_TCP4) == NDIS_TXCSUM_ALL_TCP4) {
+ /* Can checksum TCP */
+ net->hw_features |= NETIF_F_IP_CSUM;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_TCP;
+
+ offloads.tcp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+
+ if (hwcaps.lsov2.ip4_encap & NDIS_OFFLOAD_ENCAP_8023) {
+ offloads.lso_v2_ipv4 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+ net->hw_features |= NETIF_F_TSO;
+
+ if (hwcaps.lsov2.ip4_maxsz < gso_max_size)
+ gso_max_size = hwcaps.lsov2.ip4_maxsz;
+ }
+
+ if (hwcaps.csum.ip4_txcsum & NDIS_TXCSUM_CAP_UDP4) {
+ offloads.udp_ip_v4_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV4_UDP;
+ }
+ }
+
+ if ((hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_ALL_TCP6) == NDIS_TXCSUM_ALL_TCP6) {
+ net->hw_features |= NETIF_F_IPV6_CSUM;
+
+ offloads.tcp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_TCP;
+
+ if ((hwcaps.lsov2.ip6_encap & NDIS_OFFLOAD_ENCAP_8023) &&
+ (hwcaps.lsov2.ip6_opts & NDIS_LSOV2_CAP_IP6) == NDIS_LSOV2_CAP_IP6) {
+ offloads.lso_v2_ipv6 = NDIS_OFFLOAD_PARAMETERS_LSOV2_ENABLED;
+ net->hw_features |= NETIF_F_TSO6;
+
+ if (hwcaps.lsov2.ip6_maxsz < gso_max_size)
+ gso_max_size = hwcaps.lsov2.ip6_maxsz;
+ }
+
+ if (hwcaps.csum.ip6_txcsum & NDIS_TXCSUM_CAP_UDP6) {
+ offloads.udp_ip_v6_csum = NDIS_OFFLOAD_PARAMETERS_TX_RX_ENABLED;
+ net_device_ctx->tx_checksum_mask |= TRANSPORT_INFO_IPV6_UDP;
+ }
+ }
+
+ netif_set_gso_max_size(net, gso_max_size);
ret = rndis_filter_set_offload_params(net, &offloads);
if (ret)
@@ -1094,19 +1192,16 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = min(num_possible_rss_qs, num_rss_qs);
num_rss_qs = net_device->num_chn - 1;
+
+ for (i = 0; i < ITAB_NUM; i++)
+ rndis_device->ind_table[i] = ethtool_rxfh_indir_default(i,
+ net_device->num_chn);
+
net_device->num_sc_offered = num_rss_qs;
if (net_device->num_chn == 1)
goto out;
- net_device->sub_cb_buf = vzalloc((net_device->num_chn - 1) *
- NETVSC_PACKET_SIZE);
- if (!net_device->sub_cb_buf) {
- net_device->num_chn = 1;
- dev_info(&dev->device, "No memory for subchannels.\n");
- goto out;
- }
-
vmbus_set_sc_create_callback(dev->channel, netvsc_sc_open);
init_packet = &net_device->channel_init_pkt;
@@ -1132,7 +1227,8 @@ int rndis_filter_device_add(struct hv_device *dev,
net_device->num_chn = 1 +
init_packet->msg.v5_msg.subchn_comp.num_subchannels;
- ret = rndis_filter_set_rss_param(rndis_device, net_device->num_chn);
+ ret = rndis_filter_set_rss_param(rndis_device, netvsc_hash_key,
+ net_device->num_chn);
/*
* Set the number of sub-channels to be received.
@@ -1152,13 +1248,13 @@ out:
return 0; /* return 0 because primary channel can be used alone */
err_dev_remv:
- rndis_filter_device_remove(dev);
+ rndis_filter_device_remove(dev, net_device);
return ret;
}
-void rndis_filter_device_remove(struct hv_device *dev)
+void rndis_filter_device_remove(struct hv_device *dev,
+ struct netvsc_device *net_dev)
{
- struct netvsc_device *net_dev = hv_device_to_netvsc_device(dev);
struct rndis_device *rndis_dev = net_dev->extension;
/* If not all subchannel offers are complete, wait for them until
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 46d53a6..76ba7ec 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1715,9 +1715,9 @@ static int at86rf230_probe(struct spi_device *spi)
/* Reset */
if (gpio_is_valid(rstn)) {
udelay(1);
- gpio_set_value(rstn, 0);
+ gpio_set_value_cansleep(rstn, 0);
udelay(1);
- gpio_set_value(rstn, 1);
+ gpio_set_value_cansleep(rstn, 1);
usleep_range(120, 240);
}
diff --git a/drivers/net/ieee802154/atusb.c b/drivers/net/ieee802154/atusb.c
index 1253f86..ef68851 100644
--- a/drivers/net/ieee802154/atusb.c
+++ b/drivers/net/ieee802154/atusb.c
@@ -117,13 +117,26 @@ static int atusb_read_reg(struct atusb *atusb, uint8_t reg)
{
struct usb_device *usb_dev = atusb->usb_dev;
int ret;
+ uint8_t *buffer;
uint8_t value;
+ buffer = kmalloc(1, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
dev_dbg(&usb_dev->dev, "atusb: reg = 0x%x\n", reg);
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_REG_READ, ATUSB_REQ_FROM_DEV,
- 0, reg, &value, 1, 1000);
- return ret >= 0 ? value : ret;
+ 0, reg, buffer, 1, 1000);
+
+ if (ret >= 0) {
+ value = buffer[0];
+ kfree(buffer);
+ return value;
+ } else {
+ kfree(buffer);
+ return ret;
+ }
}
static int atusb_write_subreg(struct atusb *atusb, uint8_t reg, uint8_t mask,
@@ -549,13 +562,6 @@ static int
atusb_set_frame_retries(struct ieee802154_hw *hw, s8 retries)
{
struct atusb *atusb = hw->priv;
- struct device *dev = &atusb->usb_dev->dev;
-
- if (atusb->fw_ver_maj == 0 && atusb->fw_ver_min < 3) {
- dev_info(dev, "Automatic frame retransmission is only available from "
- "firmware version 0.3. Please update if you want this feature.");
- return -EINVAL;
- }
return atusb_write_subreg(atusb, SR_MAX_FRAME_RETRIES, retries);
}
@@ -608,9 +614,13 @@ static const struct ieee802154_ops atusb_ops = {
static int atusb_get_and_show_revision(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[3];
+ unsigned char *buffer;
int ret;
+ buffer = kmalloc(3, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Get a couple of the ATMega Firmware values */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_ID, ATUSB_REQ_FROM_DEV, 0, 0,
@@ -631,15 +641,20 @@ static int atusb_get_and_show_revision(struct atusb *atusb)
dev_info(&usb_dev->dev, "Please update to version 0.2 or newer");
}
+ kfree(buffer);
return ret;
}
static int atusb_get_and_show_build(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- char build[ATUSB_BUILD_SIZE + 1];
+ char *build;
int ret;
+ build = kmalloc(ATUSB_BUILD_SIZE + 1, GFP_KERNEL);
+ if (!build)
+ return -ENOMEM;
+
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_BUILD, ATUSB_REQ_FROM_DEV, 0, 0,
build, ATUSB_BUILD_SIZE, 1000);
@@ -648,6 +663,7 @@ static int atusb_get_and_show_build(struct atusb *atusb)
dev_info(&usb_dev->dev, "Firmware: build %s\n", build);
}
+ kfree(build);
return ret;
}
@@ -698,7 +714,7 @@ fail:
static int atusb_set_extended_addr(struct atusb *atusb)
{
struct usb_device *usb_dev = atusb->usb_dev;
- unsigned char buffer[IEEE802154_EXTENDED_ADDR_LEN];
+ unsigned char *buffer;
__le64 extended_addr;
u64 addr;
int ret;
@@ -710,12 +726,20 @@ static int atusb_set_extended_addr(struct atusb *atusb)
return 0;
}
+ buffer = kmalloc(IEEE802154_EXTENDED_ADDR_LEN, GFP_KERNEL);
+ if (!buffer)
+ return -ENOMEM;
+
/* Firmware is new enough so we fetch the address from EEPROM */
ret = atusb_control_msg(atusb, usb_rcvctrlpipe(usb_dev, 0),
ATUSB_EUI64_READ, ATUSB_REQ_FROM_DEV, 0, 0,
buffer, IEEE802154_EXTENDED_ADDR_LEN, 1000);
- if (ret < 0)
- dev_err(&usb_dev->dev, "failed to fetch extended address\n");
+ if (ret < 0) {
+ dev_err(&usb_dev->dev, "failed to fetch extended address, random address set\n");
+ ieee802154_random_extended_addr(&atusb->hw->phy->perm_extended_addr);
+ kfree(buffer);
+ return ret;
+ }
memcpy(&extended_addr, buffer, IEEE802154_EXTENDED_ADDR_LEN);
/* Check if read address is not empty and the unicast bit is set correctly */
@@ -729,6 +753,7 @@ static int atusb_set_extended_addr(struct atusb *atusb)
&addr);
}
+ kfree(buffer);
return ret;
}
@@ -770,8 +795,7 @@ static int atusb_probe(struct usb_interface *interface,
hw->parent = &usb_dev->dev;
hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT |
- IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS |
- IEEE802154_HW_FRAME_RETRIES;
+ IEEE802154_HW_PROMISCUOUS | IEEE802154_HW_CSMA_PARAMS;
hw->phy->flags = WPAN_PHY_FLAG_TXPOWER | WPAN_PHY_FLAG_CCA_ED_LEVEL |
WPAN_PHY_FLAG_CCA_MODE;
@@ -800,6 +824,9 @@ static int atusb_probe(struct usb_interface *interface,
atusb_get_and_show_build(atusb);
atusb_set_extended_addr(atusb);
+ if (atusb->fw_ver_maj >= 0 && atusb->fw_ver_min >= 3)
+ hw->flags |= IEEE802154_HW_FRAME_RETRIES;
+
ret = atusb_get_and_clear_error(atusb);
if (ret) {
dev_err(&atusb->usb_dev->dev,
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 92b221a..95b18f4 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -102,8 +102,8 @@ static int ipvlan_port_create(struct net_device *dev)
return -EINVAL;
}
- if (netif_is_macvlan_port(dev)) {
- netdev_err(dev, "Master is a macvlan port.\n");
+ if (netdev_is_rx_handler_busy(dev)) {
+ netdev_err(dev, "Device is already in use.\n");
return -EBUSY;
}
@@ -550,6 +550,9 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
err = ida_simple_get(&port->ida, port->dev_id_start, 0xFFFE,
GFP_KERNEL);
if (err < 0)
+ err = ida_simple_get(&port->ida, 0x1, port->dev_id_start,
+ GFP_KERNEL);
+ if (err < 0)
goto destroy_ipvlan_port;
dev->dev_id = err;
/* Increment id-base to the next slot for the future assignment */
diff --git a/drivers/net/irda/bfin_sir.c b/drivers/net/irda/bfin_sir.c
index be5bb0b..3151b58 100644
--- a/drivers/net/irda/bfin_sir.c
+++ b/drivers/net/irda/bfin_sir.c
@@ -22,7 +22,7 @@ static int max_rate = 57600;
static int max_rate = 115200;
#endif
-static void turnaround_delay(unsigned long last_jif, int mtt)
+static void turnaround_delay(int mtt)
{
long ticks;
@@ -209,7 +209,6 @@ static void bfin_sir_rx_chars(struct net_device *dev)
UART_CLEAR_LSR(port);
ch = UART_GET_CHAR(port);
async_unwrap_char(dev, &self->stats, &self->rx_buff, ch);
- dev->last_rx = jiffies;
}
static irqreturn_t bfin_sir_rx_int(int irq, void *dev_id)
@@ -510,7 +509,7 @@ static void bfin_sir_send_work(struct work_struct *work)
int tx_cnt = 10;
while (bfin_sir_is_receiving(dev) && --tx_cnt)
- turnaround_delay(dev->last_rx, self->mtt);
+ turnaround_delay(self->mtt);
bfin_sir_stop_rx(port);
diff --git a/drivers/net/irda/sh_sir.c b/drivers/net/irda/sh_sir.c
index e3fe9a2..fede686 100644
--- a/drivers/net/irda/sh_sir.c
+++ b/drivers/net/irda/sh_sir.c
@@ -547,7 +547,6 @@ static void sh_sir_rx(struct sh_sir_self *self)
async_unwrap_char(self->ndev, &self->ndev->stats,
&self->rx_buff, (u8)data);
- self->ndev->last_rx = jiffies;
if (EOFD & sh_sir_read(self, IRIF_SIR_FRM))
continue;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 440ab3d..cbfc1be 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -1110,7 +1110,7 @@ static int macvlan_port_create(struct net_device *dev)
if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK)
return -EINVAL;
- if (netif_is_ipvlan_port(dev))
+ if (netdev_is_rx_handler_busy(dev))
return -EBUSY;
port = kzalloc(sizeof(*port), GFP_KERNEL);
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 5c26653..4026185 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -825,7 +825,7 @@ static ssize_t macvtap_put_user(struct macvtap_queue *q,
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &vnet_hdr,
- macvtap_is_little_endian(q)))
+ macvtap_is_little_endian(q), true))
BUG();
if (copy_to_iter(&vnet_hdr, sizeof(vnet_hdr), iter) !=
diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig
index d361835..8dbd59b 100644
--- a/drivers/net/phy/Kconfig
+++ b/drivers/net/phy/Kconfig
@@ -279,6 +279,7 @@ config MARVELL_PHY
config MESON_GXL_PHY
tristate "Amlogic Meson GXL Internal PHY"
+ depends on ARCH_MESON || COMPILE_TEST
---help---
Currently has a driver for the Amlogic Meson GXL Internal PHY
diff --git a/drivers/net/phy/bcm63xx.c b/drivers/net/phy/bcm63xx.c
index e741bf6..b0492ef 100644
--- a/drivers/net/phy/bcm63xx.c
+++ b/drivers/net/phy/bcm63xx.c
@@ -21,6 +21,23 @@ MODULE_DESCRIPTION("Broadcom 63xx internal PHY driver");
MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
MODULE_LICENSE("GPL");
+static int bcm63xx_config_intr(struct phy_device *phydev)
+{
+ int reg, err;
+
+ reg = phy_read(phydev, MII_BCM63XX_IR);
+ if (reg < 0)
+ return reg;
+
+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
+ reg &= ~MII_BCM63XX_IR_GMASK;
+ else
+ reg |= MII_BCM63XX_IR_GMASK;
+
+ err = phy_write(phydev, MII_BCM63XX_IR, reg);
+ return err;
+}
+
static int bcm63xx_config_init(struct phy_device *phydev)
{
int reg, err;
@@ -55,7 +72,7 @@ static struct phy_driver bcm63xx_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
- .config_intr = bcm_phy_config_intr,
+ .config_intr = bcm63xx_config_intr,
}, {
/* same phy as above, with just a different OUI */
.phy_id = 0x002bdc00,
@@ -67,7 +84,7 @@ static struct phy_driver bcm63xx_driver[] = {
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = bcm_phy_ack_intr,
- .config_intr = bcm_phy_config_intr,
+ .config_intr = bcm63xx_config_intr,
} };
module_phy_driver(bcm63xx_driver);
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index 264b085..aa01020 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -167,6 +167,31 @@ static int bcm7xxx_28nm_e0_plus_afe_config_init(struct phy_device *phydev)
return 0;
}
+static int bcm7xxx_28nm_a0_patch_afe_config_init(struct phy_device *phydev)
+{
+ /* +1 RC_CAL codes for RL centering for both LT and HT conditions */
+ bcm_phy_write_misc(phydev, AFE_RXCONFIG_2, 0xd003);
+
+ /* Cut master bias current by 2% to compensate for RC_CAL offset */
+ bcm_phy_write_misc(phydev, DSP_TAP10, 0x791b);
+
+ /* Improve hybrid leakage */
+ bcm_phy_write_misc(phydev, AFE_HPF_TRIM_OTHERS, 0x10e3);
+
+ /* Change rx_on_tune 8 to 0xf */
+ bcm_phy_write_misc(phydev, 0x21, 0x2, 0x87f6);
+
+ /* Change 100Tx EEE bandwidth */
+ bcm_phy_write_misc(phydev, 0x22, 0x2, 0x017d);
+
+ /* Enable ffe zero detection for Vitesse interoperability */
+ bcm_phy_write_misc(phydev, 0x26, 0x2, 0x0015);
+
+ r_rc_cal_reset(phydev);
+
+ return 0;
+}
+
static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
{
u8 rev = PHY_BRCM_7XXX_REV(phydev->dev_flags);
@@ -174,6 +199,12 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
u8 count;
int ret = 0;
+ /* Newer devices have moved the revision information back into a
+ * standard location in MII_PHYS_ID[23]
+ */
+ if (rev == 0)
+ rev = phydev->phy_id & ~phydev->drv->phy_id_mask;
+
pr_info_once("%s: %s PHY revision: 0x%02x, patch: %d\n",
phydev_name(phydev), phydev->drv->name, rev, patch);
@@ -197,6 +228,9 @@ static int bcm7xxx_28nm_config_init(struct phy_device *phydev)
case 0x10:
ret = bcm7xxx_28nm_e0_plus_afe_config_init(phydev);
break;
+ case 0x01:
+ ret = bcm7xxx_28nm_a0_patch_afe_config_init(phydev);
+ break;
default:
break;
}
@@ -416,6 +450,7 @@ static int bcm7xxx_28nm_probe(struct phy_device *phydev)
static struct phy_driver bcm7xxx_driver[] = {
BCM7XXX_28NM_GPHY(PHY_ID_BCM7250, "Broadcom BCM7250"),
+ BCM7XXX_28NM_GPHY(PHY_ID_BCM7278, "Broadcom BCM7278"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7364, "Broadcom BCM7364"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7366, "Broadcom BCM7366"),
BCM7XXX_28NM_GPHY(PHY_ID_BCM7439, "Broadcom BCM7439"),
@@ -430,6 +465,7 @@ static struct phy_driver bcm7xxx_driver[] = {
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
{ PHY_ID_BCM7250, 0xfffffff0, },
+ { PHY_ID_BCM7278, 0xfffffff0, },
{ PHY_ID_BCM7364, 0xfffffff0, },
{ PHY_ID_BCM7366, 0xfffffff0, },
{ PHY_ID_BCM7346, 0xfffffff0, },
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 4223e35..794b9ec 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -30,6 +30,22 @@ MODULE_DESCRIPTION("Broadcom PHY driver");
MODULE_AUTHOR("Maciej W. Rozycki");
MODULE_LICENSE("GPL");
+static int bcm54210e_config_init(struct phy_device *phydev)
+{
+ int val;
+
+ val = bcm54xx_auxctl_read(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
+ val &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
+ val |= MII_BCM54XX_AUXCTL_MISC_WREN;
+ bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC, val);
+
+ val = bcm_phy_read_shadow(phydev, BCM54810_SHD_CLK_CTL);
+ val &= ~BCM54810_SHD_CLK_CTL_GTXCLK_EN;
+ bcm_phy_write_shadow(phydev, BCM54810_SHD_CLK_CTL, val);
+
+ return 0;
+}
+
static int bcm54810_config(struct phy_device *phydev)
{
int rc, val;
@@ -230,7 +246,11 @@ static int bcm54xx_config_init(struct phy_device *phydev)
(phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
bcm54xx_adjust_rxrefclk(phydev);
- if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
+ if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54210E) {
+ err = bcm54210e_config_init(phydev);
+ if (err)
+ return err;
+ } else if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM54810) {
err = bcm54810_config(phydev);
if (err)
return err;
@@ -395,12 +415,10 @@ static int bcm54612e_config_aneg(struct phy_device *phydev)
(phydev->interface != PHY_INTERFACE_MODE_RGMII_RXID)) {
u16 reg;
- /* Errata: reads require filling in the write selector field */
- bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
- MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC);
- reg = phy_read(phydev, MII_BCM54XX_AUX_CTL);
+ reg = bcm54xx_auxctl_read(phydev,
+ MII_BCM54XX_AUXCTL_SHDWSEL_MISC);
/* Disable RXD to RXC delay (default set) */
- reg &= ~MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW;
+ reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN;
/* Clear shadow selector field */
reg &= ~MII_BCM54XX_AUXCTL_SHDWSEL_MASK;
bcm54xx_auxctl_write(phydev, MII_BCM54XX_AUXCTL_SHDWSEL_MISC,
@@ -544,6 +562,17 @@ static struct phy_driver broadcom_drivers[] = {
.ack_interrupt = bcm_phy_ack_intr,
.config_intr = bcm_phy_config_intr,
}, {
+ .phy_id = PHY_ID_BCM54210E,
+ .phy_id_mask = 0xfffffff0,
+ .name = "Broadcom BCM54210E",
+ .features = PHY_GBIT_FEATURES,
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = bcm54xx_config_init,
+ .config_aneg = genphy_config_aneg,
+ .read_status = genphy_read_status,
+ .ack_interrupt = bcm_phy_ack_intr,
+ .config_intr = bcm_phy_config_intr,
+}, {
.phy_id = PHY_ID_BCM5461,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM5461",
@@ -682,6 +711,7 @@ module_phy_driver(broadcom_drivers);
static struct mdio_device_id __maybe_unused broadcom_tbl[] = {
{ PHY_ID_BCM5411, 0xfffffff0 },
{ PHY_ID_BCM5421, 0xfffffff0 },
+ { PHY_ID_BCM54210E, 0xfffffff0 },
{ PHY_ID_BCM5461, 0xfffffff0 },
{ PHY_ID_BCM54612E, 0xfffffff0 },
{ PHY_ID_BCM54616S, 0xfffffff0 },
diff --git a/drivers/net/phy/dp83848.c b/drivers/net/phy/dp83848.c
index 800b39f..a10d0e7 100644
--- a/drivers/net/phy/dp83848.c
+++ b/drivers/net/phy/dp83848.c
@@ -17,6 +17,7 @@
#include <linux/phy.h>
#define TI_DP83848C_PHY_ID 0x20005ca0
+#define TI_DP83620_PHY_ID 0x20005ce0
#define NS_DP83848C_PHY_ID 0x20005c90
#define TLK10X_PHY_ID 0x2000a210
#define TI_DP83822_PHY_ID 0x2000a240
@@ -77,6 +78,7 @@ static int dp83848_config_intr(struct phy_device *phydev)
static struct mdio_device_id __maybe_unused dp83848_tbl[] = {
{ TI_DP83848C_PHY_ID, 0xfffffff0 },
{ NS_DP83848C_PHY_ID, 0xfffffff0 },
+ { TI_DP83620_PHY_ID, 0xfffffff0 },
{ TLK10X_PHY_ID, 0xfffffff0 },
{ TI_DP83822_PHY_ID, 0xfffffff0 },
{ }
@@ -106,6 +108,7 @@ MODULE_DEVICE_TABLE(mdio, dp83848_tbl);
static struct phy_driver dp83848_driver[] = {
DP83848_PHY_DRIVER(TI_DP83848C_PHY_ID, "TI DP83848C 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(NS_DP83848C_PHY_ID, "NS DP83848C 10/100 Mbps PHY"),
+ DP83848_PHY_DRIVER(TI_DP83620_PHY_ID, "TI DP83620 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TLK10X_PHY_ID, "TI TLK10X 10/100 Mbps PHY"),
DP83848_PHY_DRIVER(TI_DP83822_PHY_ID, "TI DP83822 10/100 Mbps PHY"),
};
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index e84ae08..ca1b462 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -132,12 +132,16 @@ static int dp83867_of_init(struct phy_device *phydev)
ret = of_property_read_u32(of_node, "ti,rx-internal-delay",
&dp83867->rx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_RXID))
return ret;
ret = of_property_read_u32(of_node, "ti,tx-internal-delay",
&dp83867->tx_id_delay);
- if (ret)
+ if (ret &&
+ (phydev->interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ phydev->interface == PHY_INTERFACE_MODE_RGMII_TXID))
return ret;
return of_property_read_u32(of_node, "ti,fifo-depth",
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e269262..a3e3733 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -17,8 +17,10 @@
*/
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/unistd.h>
+#include <linux/hwmon.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/delay.h>
@@ -90,6 +92,17 @@
#define MII_88E1121_PHY_MSCR_TX_DELAY BIT(4)
#define MII_88E1121_PHY_MSCR_DELAY_MASK (~(0x3 << 4))
+#define MII_88E1121_MISC_TEST 0x1a
+#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK 0x1f00
+#define MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT 8
+#define MII_88E1510_MISC_TEST_TEMP_IRQ_EN BIT(7)
+#define MII_88E1510_MISC_TEST_TEMP_IRQ BIT(6)
+#define MII_88E1121_MISC_TEST_TEMP_SENSOR_EN BIT(5)
+#define MII_88E1121_MISC_TEST_TEMP_MASK 0x1f
+
+#define MII_88E1510_TEMP_SENSOR 0x1b
+#define MII_88E1510_TEMP_SENSOR_MASK 0xff
+
#define MII_88E1318S_PHY_MSCR1_REG 16
#define MII_88E1318S_PHY_MSCR1_PAD_ODD BIT(6)
@@ -172,6 +185,8 @@ static struct marvell_hw_stat marvell_hw_stats[] = {
struct marvell_priv {
u64 stats[ARRAY_SIZE(marvell_hw_stats)];
+ char *hwmon_name;
+ struct device *hwmon_dev;
};
static int marvell_ack_interrupt(struct phy_device *phydev)
@@ -1192,7 +1207,8 @@ static int marvell_read_status(struct phy_device *phydev)
int err;
/* Check the fiber mode first */
- if (phydev->supported & SUPPORTED_FIBRE) {
+ if (phydev->supported & SUPPORTED_FIBRE &&
+ phydev->interface != PHY_INTERFACE_MODE_SGMII) {
err = phy_write(phydev, MII_MARVELL_PHY_PAGE, MII_M1111_FIBER);
if (err < 0)
goto error;
@@ -1467,6 +1483,371 @@ static void marvell_get_stats(struct phy_device *phydev,
data[i] = marvell_get_stat(phydev, i);
}
+#ifdef CONFIG_HWMON
+static int m88e1121_get_temp(struct phy_device *phydev, long *temp)
+{
+ int ret;
+ int val;
+
+ *temp = 0;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ /* Enable temperature sensor */
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret | MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ if (ret < 0)
+ goto error;
+
+ /* Wait for temperature to stabilize */
+ usleep_range(10000, 12000);
+
+ val = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (val < 0) {
+ ret = val;
+ goto error;
+ }
+
+ /* Disable temperature sensor */
+ ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+ ret & ~MII_88E1121_MISC_TEST_TEMP_SENSOR_EN);
+ if (ret < 0)
+ goto error;
+
+ *temp = ((val & MII_88E1121_MISC_TEST_TEMP_MASK) - 5) * 5000;
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int m88e1121_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e1121_get_temp(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static umode_t m88e1121_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ return 0444;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e1121_hwmon_chip_config[] = {
+ HWMON_C_REGISTER_TZ,
+ 0
+};
+
+static const struct hwmon_channel_info m88e1121_hwmon_chip = {
+ .type = hwmon_chip,
+ .config = m88e1121_hwmon_chip_config,
+};
+
+static u32 m88e1121_hwmon_temp_config[] = {
+ HWMON_T_INPUT,
+ 0
+};
+
+static const struct hwmon_channel_info m88e1121_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e1121_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e1121_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e1121_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e1121_hwmon_hwmon_ops = {
+ .is_visible = m88e1121_hwmon_is_visible,
+ .read = m88e1121_hwmon_read,
+};
+
+static const struct hwmon_chip_info m88e1121_hwmon_chip_info = {
+ .ops = &m88e1121_hwmon_hwmon_ops,
+ .info = m88e1121_hwmon_info,
+};
+
+static int m88e1510_get_temp(struct phy_device *phydev, long *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1510_TEMP_SENSOR);
+ if (ret < 0)
+ goto error;
+
+ *temp = ((ret & MII_88E1510_TEMP_SENSOR_MASK) - 25) * 1000;
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+int m88e1510_get_temp_critical(struct phy_device *phydev, long *temp)
+{
+ int ret;
+
+ *temp = 0;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ *temp = (((ret & MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) >>
+ MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT) * 5) - 25;
+ /* convert to mC */
+ *temp *= 1000;
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+int m88e1510_set_temp_critical(struct phy_device *phydev, long temp)
+{
+ int ret;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+
+ temp = temp / 1000;
+ temp = clamp_val(DIV_ROUND_CLOSEST(temp, 5) + 5, 0, 0x1f);
+ ret = phy_write(phydev, MII_88E1121_MISC_TEST,
+ (ret & ~MII_88E1510_MISC_TEST_TEMP_THRESHOLD_MASK) |
+ (temp << MII_88E1510_MISC_TEST_TEMP_THRESHOLD_SHIFT));
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+int m88e1510_get_temp_alarm(struct phy_device *phydev, long *alarm)
+{
+ int ret;
+
+ *alarm = false;
+
+ mutex_lock(&phydev->lock);
+
+ ret = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x6);
+ if (ret < 0)
+ goto error;
+
+ ret = phy_read(phydev, MII_88E1121_MISC_TEST);
+ if (ret < 0)
+ goto error;
+ *alarm = !!(ret & MII_88E1510_MISC_TEST_TEMP_IRQ);
+
+error:
+ phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, 0x0);
+ mutex_unlock(&phydev->lock);
+
+ return ret;
+}
+
+static int m88e1510_hwmon_read(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long *temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ err = m88e1510_get_temp(phydev, temp);
+ break;
+ case hwmon_temp_crit:
+ err = m88e1510_get_temp_critical(phydev, temp);
+ break;
+ case hwmon_temp_max_alarm:
+ err = m88e1510_get_temp_alarm(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+
+ return err;
+}
+
+static int m88e1510_hwmon_write(struct device *dev,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel, long temp)
+{
+ struct phy_device *phydev = dev_get_drvdata(dev);
+ int err;
+
+ switch (attr) {
+ case hwmon_temp_crit:
+ err = m88e1510_set_temp_critical(phydev, temp);
+ break;
+ default:
+ return -EOPNOTSUPP;
+ }
+ return err;
+}
+
+static umode_t m88e1510_hwmon_is_visible(const void *data,
+ enum hwmon_sensor_types type,
+ u32 attr, int channel)
+{
+ if (type != hwmon_temp)
+ return 0;
+
+ switch (attr) {
+ case hwmon_temp_input:
+ case hwmon_temp_max_alarm:
+ return 0444;
+ case hwmon_temp_crit:
+ return 0644;
+ default:
+ return 0;
+ }
+}
+
+static u32 m88e1510_hwmon_temp_config[] = {
+ HWMON_T_INPUT | HWMON_T_CRIT | HWMON_T_MAX_ALARM,
+ 0
+};
+
+static const struct hwmon_channel_info m88e1510_hwmon_temp = {
+ .type = hwmon_temp,
+ .config = m88e1510_hwmon_temp_config,
+};
+
+static const struct hwmon_channel_info *m88e1510_hwmon_info[] = {
+ &m88e1121_hwmon_chip,
+ &m88e1510_hwmon_temp,
+ NULL
+};
+
+static const struct hwmon_ops m88e1510_hwmon_hwmon_ops = {
+ .is_visible = m88e1510_hwmon_is_visible,
+ .read = m88e1510_hwmon_read,
+ .write = m88e1510_hwmon_write,
+};
+
+static const struct hwmon_chip_info m88e1510_hwmon_chip_info = {
+ .ops = &m88e1510_hwmon_hwmon_ops,
+ .info = m88e1510_hwmon_info,
+};
+
+static int marvell_hwmon_name(struct phy_device *phydev)
+{
+ struct marvell_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ const char *devname = dev_name(dev);
+ size_t len = strlen(devname);
+ int i, j;
+
+ priv->hwmon_name = devm_kzalloc(dev, len, GFP_KERNEL);
+ if (!priv->hwmon_name)
+ return -ENOMEM;
+
+ for (i = j = 0; i < len && devname[i]; i++) {
+ if (isalnum(devname[i]))
+ priv->hwmon_name[j++] = devname[i];
+ }
+
+ return 0;
+}
+
+static int marvell_hwmon_probe(struct phy_device *phydev,
+ const struct hwmon_chip_info *chip)
+{
+ struct marvell_priv *priv = phydev->priv;
+ struct device *dev = &phydev->mdio.dev;
+ int err;
+
+ err = marvell_hwmon_name(phydev);
+ if (err)
+ return err;
+
+ priv->hwmon_dev = devm_hwmon_device_register_with_info(
+ dev, priv->hwmon_name, phydev, chip, NULL);
+
+ return PTR_ERR_OR_ZERO(priv->hwmon_dev);
+}
+
+static int m88e1121_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e1121_hwmon_chip_info);
+}
+
+static int m88e1510_hwmon_probe(struct phy_device *phydev)
+{
+ return marvell_hwmon_probe(phydev, &m88e1510_hwmon_chip_info);
+}
+#else
+static int m88e1121_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+
+static int m88e1510_hwmon_probe(struct phy_device *phydev)
+{
+ return 0;
+}
+#endif
+
static int marvell_probe(struct phy_device *phydev)
{
struct marvell_priv *priv;
@@ -1480,14 +1861,47 @@ static int marvell_probe(struct phy_device *phydev)
return 0;
}
+static int m88e1121_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e1121_hwmon_probe(phydev);
+}
+
+static int m88e1510_probe(struct phy_device *phydev)
+{
+ int err;
+
+ err = marvell_probe(phydev);
+ if (err)
+ return err;
+
+ return m88e1510_hwmon_probe(phydev);
+}
+
+static void marvell_remove(struct phy_device *phydev)
+{
+#ifdef CONFIG_HWMON
+
+ struct marvell_priv *priv = phydev->priv;
+
+ if (priv && priv->hwmon_dev)
+ hwmon_device_unregister(priv->hwmon_dev);
+#endif
+}
+
static struct phy_driver marvell_drivers[] = {
{
.phy_id = MARVELL_PHY_ID_88E1101,
.phy_id_mask = MARVELL_PHY_ID_MASK,
.name = "Marvell 88E1101",
.features = PHY_GBIT_FEATURES,
- .probe = marvell_probe,
.flags = PHY_HAS_INTERRUPT,
+ .probe = marvell_probe,
.config_init = &marvell_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
@@ -1559,7 +1973,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1121R",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = marvell_probe,
+ .probe = &m88e1121_probe,
+ .remove = &marvell_remove,
.config_init = &m88e1121_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
@@ -1671,13 +2086,16 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1510",
.features = PHY_GBIT_FEATURES | SUPPORTED_FIBRE,
.flags = PHY_HAS_INTERRUPT,
- .probe = marvell_probe,
+ .probe = &m88e1510_probe,
+ .remove = &marvell_remove,
.config_init = &m88e1510_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
.config_intr = &marvell_config_intr,
.did_interrupt = &m88e1121_did_interrupt,
+ .get_wol = &m88e1318_get_wol,
+ .set_wol = &m88e1318_set_wol,
.resume = &marvell_resume,
.suspend = &marvell_suspend,
.get_sset_count = marvell_get_sset_count,
@@ -1690,7 +2108,8 @@ static struct phy_driver marvell_drivers[] = {
.name = "Marvell 88E1540",
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .probe = marvell_probe,
+ .probe = m88e1510_probe,
+ .remove = &marvell_remove,
.config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 27ab630..7faa79b 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -32,8 +32,7 @@
struct mdio_gpio_info {
struct mdiobb_ctrl ctrl;
- int mdc, mdio, mdo;
- int mdc_active_low, mdio_active_low, mdo_active_low;
+ struct gpio_desc *mdc, *mdio, *mdo;
};
static void *mdio_gpio_of_get_data(struct platform_device *pdev)
@@ -80,16 +79,14 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
* assume the pin serves as pull-up. If direction is
* output, the default value is high.
*/
- gpio_set_value_cansleep(bitbang->mdo,
- 1 ^ bitbang->mdo_active_low);
+ gpiod_set_value(bitbang->mdo, 1);
return;
}
if (dir)
- gpio_direction_output(bitbang->mdio,
- 1 ^ bitbang->mdio_active_low);
+ gpiod_direction_output(bitbang->mdio, 1);
else
- gpio_direction_input(bitbang->mdio);
+ gpiod_direction_input(bitbang->mdio);
}
static int mdio_get(struct mdiobb_ctrl *ctrl)
@@ -97,8 +94,7 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpio_get_value_cansleep(bitbang->mdio) ^
- bitbang->mdio_active_low;
+ return gpiod_get_value(bitbang->mdio);
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -107,11 +103,9 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
container_of(ctrl, struct mdio_gpio_info, ctrl);
if (bitbang->mdo)
- gpio_set_value_cansleep(bitbang->mdo,
- what ^ bitbang->mdo_active_low);
+ gpiod_set_value(bitbang->mdo, what);
else
- gpio_set_value_cansleep(bitbang->mdio,
- what ^ bitbang->mdio_active_low);
+ gpiod_set_value(bitbang->mdio, what);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -119,7 +113,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
+ gpiod_set_value(bitbang->mdc, what);
}
static struct mdiobb_ops mdio_gpio_ops = {
@@ -137,6 +131,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
struct mii_bus *new_bus;
struct mdio_gpio_info *bitbang;
int i;
+ int mdc, mdio, mdo;
+ unsigned long mdc_flags = GPIOF_OUT_INIT_LOW;
+ unsigned long mdio_flags = GPIOF_DIR_IN;
+ unsigned long mdo_flags = GPIOF_OUT_INIT_HIGH;
bitbang = devm_kzalloc(dev, sizeof(*bitbang), GFP_KERNEL);
if (!bitbang)
@@ -144,12 +142,20 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
bitbang->ctrl.ops = &mdio_gpio_ops;
bitbang->ctrl.reset = pdata->reset;
- bitbang->mdc = pdata->mdc;
- bitbang->mdc_active_low = pdata->mdc_active_low;
- bitbang->mdio = pdata->mdio;
- bitbang->mdio_active_low = pdata->mdio_active_low;
- bitbang->mdo = pdata->mdo;
- bitbang->mdo_active_low = pdata->mdo_active_low;
+ mdc = pdata->mdc;
+ bitbang->mdc = gpio_to_desc(mdc);
+ if (pdata->mdc_active_low)
+ mdc_flags = GPIOF_OUT_INIT_HIGH | GPIOF_ACTIVE_LOW;
+ mdio = pdata->mdio;
+ bitbang->mdio = gpio_to_desc(mdio);
+ if (pdata->mdio_active_low)
+ mdio_flags |= GPIOF_ACTIVE_LOW;
+ mdo = pdata->mdo;
+ if (mdo) {
+ bitbang->mdo = gpio_to_desc(mdo);
+ if (pdata->mdo_active_low)
+ mdo_flags = GPIOF_OUT_INIT_LOW | GPIOF_ACTIVE_LOW;
+ }
new_bus = alloc_mdio_bitbang(&bitbang->ctrl);
if (!new_bus)
@@ -174,20 +180,14 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
else
strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
- if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
+ if (devm_gpio_request_one(dev, mdc, mdc_flags, "mdc"))
goto out_free_bus;
- if (devm_gpio_request(dev, bitbang->mdio, "mdio"))
+ if (devm_gpio_request_one(dev, mdio, mdio_flags, "mdio"))
goto out_free_bus;
- if (bitbang->mdo) {
- if (devm_gpio_request(dev, bitbang->mdo, "mdo"))
- goto out_free_bus;
- gpio_direction_output(bitbang->mdo, 1);
- gpio_direction_input(bitbang->mdio);
- }
-
- gpio_direction_output(bitbang->mdc, 0);
+ if (mdo && devm_gpio_request_one(dev, mdo, mdo_flags, "mdo"))
+ goto out_free_bus;
dev_set_drvdata(dev, new_bus);
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 9a77289..e55809c 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -1008,6 +1008,20 @@ static struct phy_driver ksphy_driver[] = {
.get_stats = kszphy_get_stats,
.suspend = genphy_suspend,
.resume = genphy_resume,
+}, {
+ .phy_id = PHY_ID_KSZ8795,
+ .phy_id_mask = MICREL_PHY_ID_MASK,
+ .name = "Micrel KSZ8795",
+ .features = (SUPPORTED_Pause | SUPPORTED_Asym_Pause),
+ .flags = PHY_HAS_MAGICANEG | PHY_HAS_INTERRUPT,
+ .config_init = kszphy_config_init,
+ .config_aneg = ksz8873mll_config_aneg,
+ .read_status = ksz8873mll_read_status,
+ .get_sset_count = kszphy_get_sset_count,
+ .get_strings = kszphy_get_strings,
+ .get_stats = kszphy_get_stats,
+ .suspend = genphy_suspend,
+ .resume = genphy_resume,
} };
module_phy_driver(ksphy_driver);
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 48da6e9..7cc1b7d 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -29,6 +29,7 @@
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mdio.h>
@@ -649,14 +650,18 @@ void phy_start_machine(struct phy_device *phydev)
* phy_trigger_machine - trigger the state machine to run
*
* @phydev: the phy_device struct
+ * @sync: indicate whether we should wait for the workqueue cancelation
*
* Description: There has been a change in state which requires that the
* state machine runs.
*/
-static void phy_trigger_machine(struct phy_device *phydev)
+static void phy_trigger_machine(struct phy_device *phydev, bool sync)
{
- cancel_delayed_work_sync(&phydev->state_queue);
+ if (sync)
+ cancel_delayed_work_sync(&phydev->state_queue);
+ else
+ cancel_delayed_work(&phydev->state_queue);
queue_delayed_work(system_power_efficient_wq, &phydev->state_queue, 0);
}
@@ -693,7 +698,7 @@ static void phy_error(struct phy_device *phydev)
phydev->state = PHY_HALTED;
mutex_unlock(&phydev->lock);
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, false);
}
/**
@@ -840,7 +845,7 @@ void phy_change(struct phy_device *phydev)
}
/* reschedule state queue work to run as soon as possible */
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, true);
return;
ignore:
@@ -942,7 +947,7 @@ void phy_start(struct phy_device *phydev)
if (do_resume)
phy_resume(phydev);
- phy_trigger_machine(phydev);
+ phy_trigger_machine(phydev, true);
}
EXPORT_SYMBOL(phy_start);
diff --git a/drivers/net/phy/phy_led_triggers.c b/drivers/net/phy/phy_led_triggers.c
index fa62bdf..94ca42e 100644
--- a/drivers/net/phy/phy_led_triggers.c
+++ b/drivers/net/phy/phy_led_triggers.c
@@ -12,6 +12,7 @@
*/
#include <linux/leds.h>
#include <linux/phy.h>
+#include <linux/phy_led_triggers.h>
#include <linux/netdevice.h>
static struct phy_led_trigger *phy_speed_to_led_trigger(struct phy_device *phy,
@@ -102,8 +103,10 @@ int phy_led_triggers_register(struct phy_device *phy)
sizeof(struct phy_led_trigger) *
phy->phy_num_led_triggers,
GFP_KERNEL);
- if (!phy->phy_led_triggers)
- return -ENOMEM;
+ if (!phy->phy_led_triggers) {
+ err = -ENOMEM;
+ goto out_clear;
+ }
for (i = 0; i < phy->phy_num_led_triggers; i++) {
err = phy_led_trigger_register(phy, &phy->phy_led_triggers[i],
@@ -120,6 +123,8 @@ out_unreg:
while (i--)
phy_led_trigger_unregister(&phy->phy_led_triggers[i]);
devm_kfree(&phy->mdio.dev, phy->phy_led_triggers);
+out_clear:
+ phy->phy_num_led_triggers = 0;
return err;
}
EXPORT_SYMBOL_GPL(phy_led_triggers_register);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 8c1d3bd..8a7d6b9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -218,6 +218,7 @@ struct tun_struct {
struct list_head disabled;
void *security;
u32 flow_count;
+ u32 rx_batched;
struct tun_pcpu_stats __percpu *pcpu_stats;
};
@@ -522,6 +523,7 @@ static void tun_queue_purge(struct tun_file *tfile)
while ((skb = skb_array_consume(&tfile->tx_array)) != NULL)
kfree_skb(skb);
+ skb_queue_purge(&tfile->sk.sk_write_queue);
skb_queue_purge(&tfile->sk.sk_error_queue);
}
@@ -1139,10 +1141,46 @@ static struct sk_buff *tun_alloc_skb(struct tun_file *tfile,
return skb;
}
+static void tun_rx_batched(struct tun_struct *tun, struct tun_file *tfile,
+ struct sk_buff *skb, int more)
+{
+ struct sk_buff_head *queue = &tfile->sk.sk_write_queue;
+ struct sk_buff_head process_queue;
+ u32 rx_batched = tun->rx_batched;
+ bool rcv = false;
+
+ if (!rx_batched || (!more && skb_queue_empty(queue))) {
+ local_bh_disable();
+ netif_receive_skb(skb);
+ local_bh_enable();
+ return;
+ }
+
+ spin_lock(&queue->lock);
+ if (!more || skb_queue_len(queue) == rx_batched) {
+ __skb_queue_head_init(&process_queue);
+ skb_queue_splice_tail_init(queue, &process_queue);
+ rcv = true;
+ } else {
+ __skb_queue_tail(queue, skb);
+ }
+ spin_unlock(&queue->lock);
+
+ if (rcv) {
+ struct sk_buff *nskb;
+
+ local_bh_disable();
+ while ((nskb = __skb_dequeue(&process_queue)))
+ netif_receive_skb(nskb);
+ netif_receive_skb(skb);
+ local_bh_enable();
+ }
+}
+
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
void *msg_control, struct iov_iter *from,
- int noblock)
+ int noblock, bool more)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
@@ -1283,9 +1321,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
rxhash = skb_get_hash(skb);
#ifndef CONFIG_4KSTACKS
- local_bh_disable();
- netif_receive_skb(skb);
- local_bh_enable();
+ tun_rx_batched(tun, tfile, skb, more);
#else
netif_rx_ni(skb);
#endif
@@ -1311,7 +1347,8 @@ static ssize_t tun_chr_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!tun)
return -EBADFD;
- result = tun_get_user(tun, tfile, NULL, from, file->f_flags & O_NONBLOCK);
+ result = tun_get_user(tun, tfile, NULL, from,
+ file->f_flags & O_NONBLOCK, false);
tun_put(tun);
return result;
@@ -1359,7 +1396,7 @@ static ssize_t tun_put_user(struct tun_struct *tun,
return -EINVAL;
if (virtio_net_hdr_from_skb(skb, &gso,
- tun_is_little_endian(tun))) {
+ tun_is_little_endian(tun), true)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
@@ -1569,7 +1606,8 @@ static int tun_sendmsg(struct socket *sock, struct msghdr *m, size_t total_len)
return -EBADFD;
ret = tun_get_user(tun, tfile, m->msg_control, &m->msg_iter,
- m->msg_flags & MSG_DONTWAIT);
+ m->msg_flags & MSG_DONTWAIT,
+ m->msg_flags & MSG_MORE);
tun_put(tun);
return ret;
}
@@ -1770,6 +1808,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
tun->align = NET_SKB_PAD;
tun->filter_attached = false;
tun->sndbuf = tfile->socket.sk->sk_sndbuf;
+ tun->rx_batched = 0;
tun->pcpu_stats = netdev_alloc_pcpu_stats(struct tun_pcpu_stats);
if (!tun->pcpu_stats) {
@@ -2438,6 +2477,29 @@ static void tun_set_msglevel(struct net_device *dev, u32 value)
#endif
}
+static int tun_get_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ ec->rx_max_coalesced_frames = tun->rx_batched;
+
+ return 0;
+}
+
+static int tun_set_coalesce(struct net_device *dev,
+ struct ethtool_coalesce *ec)
+{
+ struct tun_struct *tun = netdev_priv(dev);
+
+ if (ec->rx_max_coalesced_frames > NAPI_POLL_WEIGHT)
+ tun->rx_batched = NAPI_POLL_WEIGHT;
+ else
+ tun->rx_batched = ec->rx_max_coalesced_frames;
+
+ return 0;
+}
+
static const struct ethtool_ops tun_ethtool_ops = {
.get_settings = tun_get_settings,
.get_drvinfo = tun_get_drvinfo,
@@ -2445,6 +2507,8 @@ static const struct ethtool_ops tun_ethtool_ops = {
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info,
+ .get_coalesce = tun_get_coalesce,
+ .set_coalesce = tun_set_coalesce,
};
static int tun_queue_resize(struct tun_struct *tun)
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index fe7b288..f5552aa 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -466,7 +466,7 @@ static int usbnet_cdc_zte_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
* connected. This causes the link state to be incorrect. Work around this by
* always setting the state to off, then on.
*/
-void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
+static void usbnet_cdc_zte_status(struct usbnet *dev, struct urb *urb)
{
struct usb_cdc_notification *event;
@@ -531,6 +531,7 @@ static const struct driver_info wwan_info = {
#define SAMSUNG_VENDOR_ID 0x04e8
#define LENOVO_VENDOR_ID 0x17ef
#define NVIDIA_VENDOR_ID 0x0955
+#define HP_VENDOR_ID 0x03f0
static const struct usb_device_id products[] = {
/* BLACKLIST !!
@@ -677,6 +678,13 @@ static const struct usb_device_id products[] = {
.driver_info = 0,
},
+/* HP lt2523 (Novatel E371) - handled by qmi_wwan */
+{
+ USB_DEVICE_AND_INTERFACE_INFO(HP_VENDOR_ID, 0x421d, USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+ .driver_info = 0,
+},
+
/* AnyDATA ADU960S - handled by qmi_wwan */
{
USB_DEVICE_AND_INTERFACE_INFO(0x16d5, 0x650a, USB_CLASS_COMM,
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 6fe1cdb..24d5272 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -654,6 +654,13 @@ static const struct usb_device_id products[] = {
USB_CDC_PROTO_NONE),
.driver_info = (unsigned long)&qmi_wwan_info,
},
+ { /* HP lt2523 (Novatel E371) */
+ USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x421d,
+ USB_CLASS_COMM,
+ USB_CDC_SUBCLASS_ETHERNET,
+ USB_CDC_PROTO_NONE),
+ .driver_info = (unsigned long)&qmi_wwan_info,
+ },
{ /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7dc6122..986243c 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -32,7 +32,7 @@
#define NETNEXT_VERSION "08"
/* Information for net */
-#define NET_VERSION "6"
+#define NET_VERSION "8"
#define DRIVER_VERSION "v1." NETNEXT_VERSION "." NET_VERSION
#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
@@ -1730,7 +1730,7 @@ static u8 r8152_rx_csum(struct r8152 *tp, struct rx_desc *rx_desc)
u8 checksum = CHECKSUM_NONE;
u32 opts2, opts3;
- if (tp->version == RTL_VER_01 || tp->version == RTL_VER_02)
+ if (!(tp->netdev->features & NETIF_F_RXCSUM))
goto return_result;
opts2 = le32_to_cpu(rx_desc->opts2);
@@ -1936,6 +1936,9 @@ static int r8152_poll(struct napi_struct *napi, int budget)
napi_complete(napi);
if (!list_empty(&tp->rx_done))
napi_schedule(napi);
+ else if (!skb_queue_empty(&tp->tx_queue) &&
+ !list_empty(&tp->tx_free))
+ napi_schedule(napi);
}
return work_done;
@@ -3155,10 +3158,13 @@ static void set_carrier(struct r8152 *tp)
if (!netif_carrier_ok(netdev)) {
tp->rtl_ops.enable(tp);
set_bit(RTL8152_SET_RX_MODE, &tp->flags);
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
netif_carrier_on(netdev);
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ netif_info(tp, link, netdev, "carrier on\n");
}
} else {
if (netif_carrier_ok(netdev)) {
@@ -3166,6 +3172,7 @@ static void set_carrier(struct r8152 *tp)
napi_disable(&tp->napi);
tp->rtl_ops.disable(tp);
napi_enable(&tp->napi);
+ netif_info(tp, link, netdev, "carrier off\n");
}
}
}
@@ -3515,12 +3522,12 @@ static int rtl8152_pre_reset(struct usb_interface *intf)
if (!netif_running(netdev))
return 0;
+ netif_stop_queue(netdev);
napi_disable(&tp->napi);
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
cancel_delayed_work_sync(&tp->schedule);
if (netif_carrier_ok(netdev)) {
- netif_stop_queue(netdev);
mutex_lock(&tp->control);
tp->rtl_ops.disable(tp);
mutex_unlock(&tp->control);
@@ -3545,12 +3552,17 @@ static int rtl8152_post_reset(struct usb_interface *intf)
if (netif_carrier_ok(netdev)) {
mutex_lock(&tp->control);
tp->rtl_ops.enable(tp);
+ rtl_start_rx(tp);
rtl8152_set_rx_mode(netdev);
mutex_unlock(&tp->control);
- netif_wake_queue(netdev);
}
napi_enable(&tp->napi);
+ netif_wake_queue(netdev);
+ usb_submit_urb(tp->intr_urb, GFP_KERNEL);
+
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
return 0;
}
@@ -3572,43 +3584,98 @@ static bool delay_autosuspend(struct r8152 *tp)
*/
if (!sw_linking && tp->rtl_ops.in_nway(tp))
return true;
+ else if (!skb_queue_empty(&tp->tx_queue))
+ return true;
else
return false;
}
-static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+static int rtl8152_runtime_suspend(struct r8152 *tp)
{
- struct r8152 *tp = usb_get_intfdata(intf);
struct net_device *netdev = tp->netdev;
int ret = 0;
- mutex_lock(&tp->control);
+ set_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+
+ if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
+ u32 rcr = 0;
- if (PMSG_IS_AUTO(message)) {
- if (netif_running(netdev) && delay_autosuspend(tp)) {
+ if (delay_autosuspend(tp)) {
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
ret = -EBUSY;
goto out1;
}
- set_bit(SELECTIVE_SUSPEND, &tp->flags);
- } else {
- netif_device_detach(netdev);
+ if (netif_carrier_ok(netdev)) {
+ u32 ocp_data;
+
+ rcr = ocp_read_dword(tp, MCU_TYPE_PLA, PLA_RCR);
+ ocp_data = rcr & ~RCR_ACPT_ALL;
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, ocp_data);
+ rxdy_gated_en(tp, true);
+ ocp_data = ocp_read_byte(tp, MCU_TYPE_PLA,
+ PLA_OOB_CTRL);
+ if (!(ocp_data & RXFIFO_EMPTY)) {
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ ret = -EBUSY;
+ goto out1;
+ }
+ }
+
+ clear_bit(WORK_ENABLE, &tp->flags);
+ usb_kill_urb(tp->intr_urb);
+
+ tp->rtl_ops.autosuspend_en(tp, true);
+
+ if (netif_carrier_ok(netdev)) {
+ napi_disable(&tp->napi);
+ rtl_stop_rx(tp);
+ rxdy_gated_en(tp, false);
+ ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RCR, rcr);
+ napi_enable(&tp->napi);
+ }
}
+out1:
+ return ret;
+}
+
+static int rtl8152_system_suspend(struct r8152 *tp)
+{
+ struct net_device *netdev = tp->netdev;
+ int ret = 0;
+
+ netif_device_detach(netdev);
+
if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
clear_bit(WORK_ENABLE, &tp->flags);
usb_kill_urb(tp->intr_urb);
napi_disable(&tp->napi);
- if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
- rtl_stop_rx(tp);
- tp->rtl_ops.autosuspend_en(tp, true);
- } else {
- cancel_delayed_work_sync(&tp->schedule);
- tp->rtl_ops.down(tp);
- }
+ cancel_delayed_work_sync(&tp->schedule);
+ tp->rtl_ops.down(tp);
napi_enable(&tp->napi);
}
-out1:
+
+ return ret;
+}
+
+static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
+{
+ struct r8152 *tp = usb_get_intfdata(intf);
+ int ret;
+
+ mutex_lock(&tp->control);
+
+ if (PMSG_IS_AUTO(message))
+ ret = rtl8152_runtime_suspend(tp);
+ else
+ ret = rtl8152_system_suspend(tp);
+
mutex_unlock(&tp->control);
return ret;
@@ -3629,12 +3696,15 @@ static int rtl8152_resume(struct usb_interface *intf)
if (netif_running(tp->netdev) && tp->netdev->flags & IFF_UP) {
if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
tp->rtl_ops.autosuspend_en(tp, false);
- clear_bit(SELECTIVE_SUSPEND, &tp->flags);
napi_disable(&tp->napi);
set_bit(WORK_ENABLE, &tp->flags);
if (netif_carrier_ok(tp->netdev))
rtl_start_rx(tp);
napi_enable(&tp->napi);
+ clear_bit(SELECTIVE_SUSPEND, &tp->flags);
+ smp_mb__after_atomic();
+ if (!list_empty(&tp->rx_done))
+ napi_schedule(&tp->napi);
} else {
tp->rtl_ops.up(tp);
netif_carrier_off(tp->netdev);
@@ -4308,6 +4378,11 @@ static int rtl8152_probe(struct usb_interface *intf,
NETIF_F_HIGHDMA | NETIF_F_FRAGLIST |
NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+ if (tp->version == RTL_VER_01) {
+ netdev->features &= ~NETIF_F_RXCSUM;
+ netdev->hw_features &= ~NETIF_F_RXCSUM;
+ }
+
netdev->ethtool_ops = &ops;
netif_set_gso_max_size(netdev, RTL_LIMITED_TSO_SIZE);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 37db91d..bd22cf3 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -23,6 +23,7 @@
#include <linux/virtio.h>
#include <linux/virtio_net.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/scatterlist.h>
#include <linux/if_vlan.h>
#include <linux/slab.h>
@@ -48,8 +49,16 @@ module_param(gso, bool, 0444);
*/
DECLARE_EWMA(pkt_len, 1, 64)
+/* With mergeable buffers we align buffer address and use the low bits to
+ * encode its true size. Buffer size is up to 1 page so we need to align to
+ * square root of page size to ensure we reserve enough bits to encode the true
+ * size.
+ */
+#define MERGEABLE_BUFFER_MIN_ALIGN_SHIFT ((PAGE_SHIFT + 1) / 2)
+
/* Minimum alignment for mergeable packet buffers. */
-#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, 256)
+#define MERGEABLE_BUFFER_ALIGN max(L1_CACHE_BYTES, \
+ 1 << MERGEABLE_BUFFER_MIN_ALIGN_SHIFT)
#define VIRTNET_DRIVER_VERSION "1.0.0"
@@ -330,7 +339,7 @@ static struct sk_buff *page_to_skb(struct virtnet_info *vi,
return skb;
}
-static void virtnet_xdp_xmit(struct virtnet_info *vi,
+static bool virtnet_xdp_xmit(struct virtnet_info *vi,
struct receive_queue *rq,
struct send_queue *sq,
struct xdp_buff *xdp,
@@ -382,10 +391,12 @@ static void virtnet_xdp_xmit(struct virtnet_info *vi,
put_page(page);
} else /* small buffer */
kfree_skb(data);
- return; // On error abort to avoid unnecessary kick
+ /* On error abort to avoid unnecessary kick */
+ return false;
}
virtqueue_kick(sq->vq);
+ return true;
}
static u32 do_xdp_prog(struct virtnet_info *vi,
@@ -421,11 +432,14 @@ static u32 do_xdp_prog(struct virtnet_info *vi,
vi->xdp_queue_pairs +
smp_processor_id();
xdp.data = buf;
- virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp, data);
+ if (unlikely(!virtnet_xdp_xmit(vi, rq, &vi->sq[qp], &xdp,
+ data)))
+ trace_xdp_exception(vi->dev, xdp_prog, act);
return XDP_TX;
default:
bpf_warn_invalid_xdp_action(act);
case XDP_ABORTED:
+ trace_xdp_exception(vi->dev, xdp_prog, act);
case XDP_DROP:
return XDP_DROP;
}
@@ -1104,7 +1118,7 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
hdr = skb_vnet_hdr(skb);
if (virtio_net_hdr_from_skb(skb, &hdr->hdr,
- virtio_is_little_endian(vi->vdev)))
+ virtio_is_little_endian(vi->vdev), false))
BUG();
if (vi->mergeable_rx_bufs)
@@ -1704,6 +1718,11 @@ static int virtnet_xdp_set(struct net_device *dev, struct bpf_prog *prog)
u16 xdp_qp = 0, curr_qp;
int i, err;
+ if (prog && prog->xdp_adjust_head) {
+ netdev_warn(dev, "Does not support bpf_xdp_adjust_head()\n");
+ return -EOPNOTSUPP;
+ }
+
if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO4) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_TSO6) ||
virtio_has_feature(vi->vdev, VIRTIO_NET_F_GUEST_ECN) ||
@@ -1887,8 +1906,12 @@ static void free_receive_page_frags(struct virtnet_info *vi)
put_page(vi->rq[i].alloc_frag.page);
}
-static bool is_xdp_queue(struct virtnet_info *vi, int q)
+static bool is_xdp_raw_buffer_queue(struct virtnet_info *vi, int q)
{
+ /* For small receive mode always use kfree_skb variants */
+ if (!vi->mergeable_rx_bufs)
+ return false;
+
if (q < (vi->curr_queue_pairs - vi->xdp_queue_pairs))
return false;
else if (q < vi->curr_queue_pairs)
@@ -1905,7 +1928,7 @@ static void free_unused_bufs(struct virtnet_info *vi)
for (i = 0; i < vi->max_queue_pairs; i++) {
struct virtqueue *vq = vi->sq[i].vq;
while ((buf = virtqueue_detach_unused_buf(vq)) != NULL) {
- if (!is_xdp_queue(vi, i))
+ if (!is_xdp_raw_buffer_queue(vi, i))
dev_kfree_skb(buf);
else
put_page(virt_to_head_page(buf));
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index e34b129..25bc764 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -1851,7 +1851,7 @@ vmxnet3_poll(struct napi_struct *napi, int budget)
rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
if (rxd_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rxd_done);
vmxnet3_enable_all_intrs(rx_queue->adapter);
}
return rxd_done;
@@ -1882,7 +1882,7 @@ vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
if (rxd_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, rxd_done);
vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
}
return rxd_done;
diff --git a/drivers/net/vrf.c b/drivers/net/vrf.c
index 895e3e2..264fc15 100644
--- a/drivers/net/vrf.c
+++ b/drivers/net/vrf.c
@@ -262,7 +262,9 @@ static netdev_tx_t vrf_process_v4_outbound(struct sk_buff *skb,
.flowi4_iif = LOOPBACK_IFINDEX,
.flowi4_tos = RT_TOS(ip4h->tos),
.flowi4_flags = FLOWI_FLAG_ANYSRC | FLOWI_FLAG_SKIP_NH_OIF,
+ .flowi4_proto = ip4h->protocol,
.daddr = ip4h->daddr,
+ .saddr = ip4h->saddr,
};
struct net *net = dev_net(vrf_dev);
struct rtable *rt;
@@ -1249,6 +1251,8 @@ static int vrf_newlink(struct net *src_net, struct net_device *dev,
return -EINVAL;
vrf->tb_id = nla_get_u32(data[IFLA_VRF_TABLE]);
+ if (vrf->tb_id == RT_TABLE_UNSPEC)
+ return -EINVAL;
dev->priv_flags |= IFF_L3MDEV_MASTER;
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index bb70dd5..2e48ce2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1798,7 +1798,7 @@ static int vxlan_build_skb(struct sk_buff *skb, struct dst_entry *dst,
static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device *dev,
struct vxlan_sock *sock4,
struct sk_buff *skb, int oif, u8 tos,
- __be32 daddr, __be32 *saddr,
+ __be32 daddr, __be32 *saddr, __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1824,6 +1824,8 @@ static struct rtable *vxlan_get_route(struct vxlan_dev *vxlan, struct net_device
fl4.flowi4_proto = IPPROTO_UDP;
fl4.daddr = daddr;
fl4.saddr = *saddr;
+ fl4.fl4_dport = dport;
+ fl4.fl4_sport = sport;
rt = ip_route_output_key(vxlan->net, &fl4);
if (likely(!IS_ERR(rt))) {
@@ -1851,6 +1853,7 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
__be32 label,
const struct in6_addr *daddr,
struct in6_addr *saddr,
+ __be16 dport, __be16 sport,
struct dst_cache *dst_cache,
const struct ip_tunnel_info *info)
{
@@ -1877,6 +1880,8 @@ static struct dst_entry *vxlan6_get_route(struct vxlan_dev *vxlan,
fl6.flowlabel = ip6_make_flowinfo(RT_TOS(tos), label);
fl6.flowi6_mark = skb->mark;
fl6.flowi6_proto = IPPROTO_UDP;
+ fl6.fl6_dport = dport;
+ fl6.fl6_sport = sport;
err = ipv6_stub->ipv6_dst_lookup(vxlan->net,
sock6->sock->sk,
@@ -1946,7 +1951,7 @@ static void vxlan_encap_bypass(struct sk_buff *skb, struct vxlan_dev *src_vxlan,
static int encap_bypass_if_local(struct sk_buff *skb, struct net_device *dev,
struct vxlan_dev *vxlan, union vxlan_addr *daddr,
- __be32 dst_port, __be32 vni, struct dst_entry *dst,
+ __be16 dst_port, __be32 vni, struct dst_entry *dst,
u32 rt_flags)
{
#if IS_ENABLED(CONFIG_IPV6)
@@ -2068,6 +2073,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
dst->sin.sin_addr.s_addr,
&src->sin.sin_addr.s_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(rt)) {
err = PTR_ERR(rt);
@@ -2104,6 +2110,7 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
rdst ? rdst->remote_ifindex : 0, tos,
label, &dst->sin6.sin6_addr,
&src->sin6.sin6_addr,
+ dst_port, src_port,
dst_cache, info);
if (IS_ERR(ndst)) {
err = PTR_ERR(ndst);
@@ -2261,7 +2268,7 @@ static void vxlan_cleanup(unsigned long arg)
= container_of(p, struct vxlan_fdb, hlist);
unsigned long timeout;
- if (f->state & NUD_PERMANENT)
+ if (f->state & (NUD_PERMANENT | NUD_NOARP))
continue;
timeout = f->used + vxlan->cfg.age_interval * HZ;
@@ -2347,7 +2354,7 @@ static int vxlan_open(struct net_device *dev)
}
/* Purge the forwarding table */
-static void vxlan_flush(struct vxlan_dev *vxlan)
+static void vxlan_flush(struct vxlan_dev *vxlan, bool do_all)
{
unsigned int h;
@@ -2357,6 +2364,8 @@ static void vxlan_flush(struct vxlan_dev *vxlan)
hlist_for_each_safe(p, n, &vxlan->fdb_head[h]) {
struct vxlan_fdb *f
= container_of(p, struct vxlan_fdb, hlist);
+ if (!do_all && (f->state & (NUD_PERMANENT | NUD_NOARP)))
+ continue;
/* the all_zeros_mac entry is deleted at vxlan_uninit */
if (!is_zero_ether_addr(f->eth_addr))
vxlan_fdb_destroy(vxlan, f);
@@ -2378,7 +2387,7 @@ static int vxlan_stop(struct net_device *dev)
del_timer_sync(&vxlan->age_timer);
- vxlan_flush(vxlan);
+ vxlan_flush(vxlan, false);
vxlan_sock_release(vxlan);
return ret;
@@ -2430,7 +2439,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
rt = vxlan_get_route(vxlan, dev, sock4, skb, 0, info->key.tos,
info->key.u.ipv4.dst,
- &info->key.u.ipv4.src, NULL, info);
+ &info->key.u.ipv4.src, dport, sport, NULL, info);
if (IS_ERR(rt))
return PTR_ERR(rt);
ip_rt_put(rt);
@@ -2441,7 +2450,7 @@ static int vxlan_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
ndst = vxlan6_get_route(vxlan, dev, sock6, skb, 0, info->key.tos,
info->key.label, &info->key.u.ipv6.dst,
- &info->key.u.ipv6.src, NULL, info);
+ &info->key.u.ipv6.src, dport, sport, NULL, info);
if (IS_ERR(ndst))
return PTR_ERR(ndst);
dst_release(ndst);
@@ -2883,7 +2892,7 @@ static int vxlan_dev_configure(struct net *src_net, struct net_device *dev,
memcpy(&vxlan->cfg, conf, sizeof(*conf));
if (!vxlan->cfg.dst_port) {
if (conf->flags & VXLAN_F_GPE)
- vxlan->cfg.dst_port = 4790; /* IANA assigned VXLAN-GPE port */
+ vxlan->cfg.dst_port = htons(4790); /* IANA VXLAN-GPE port */
else
vxlan->cfg.dst_port = default_port;
}
@@ -3051,6 +3060,8 @@ static void vxlan_dellink(struct net_device *dev, struct list_head *head)
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
+ vxlan_flush(vxlan, true);
+
spin_lock(&vn->sock_lock);
if (!hlist_unhashed(&vxlan->hlist))
hlist_del_rcu(&vxlan->hlist);
diff --git a/drivers/net/wan/fsl_ucc_hdlc.c b/drivers/net/wan/fsl_ucc_hdlc.c
index e38ce4d..d869533 100644
--- a/drivers/net/wan/fsl_ucc_hdlc.c
+++ b/drivers/net/wan/fsl_ucc_hdlc.c
@@ -573,7 +573,7 @@ static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
howmany += hdlc_rx_done(priv, budget - howmany);
if (howmany < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, howmany);
qe_setbits32(priv->uccf->p_uccm,
(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
}
diff --git a/drivers/net/wan/hd64572.c b/drivers/net/wan/hd64572.c
index 7ef49da..cff0cfa 100644
--- a/drivers/net/wan/hd64572.c
+++ b/drivers/net/wan/hd64572.c
@@ -341,7 +341,7 @@ static int sca_poll(struct napi_struct *napi, int budget)
received = sca_rx_done(port, budget);
if (received < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, received);
enable_intr(port);
}
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 12e67c4..2743a9b 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -2458,8 +2458,7 @@ bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
u32 freq = __le32_to_cpu(resp->chan_change.freq);
- ar->tgt_oper_chan =
- __ieee80211_get_channel(ar->hw->wiphy, freq);
+ ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt chan change freq %u phymode %s\n",
freq, ath10k_wmi_phymode_str(phymode));
diff --git a/drivers/net/wireless/ath/ath10k/pci.c b/drivers/net/wireless/ath/ath10k/pci.c
index d2aa9e5..79e6145 100644
--- a/drivers/net/wireless/ath/ath10k/pci.c
+++ b/drivers/net/wireless/ath/ath10k/pci.c
@@ -2799,7 +2799,7 @@ static int ath10k_pci_napi_poll(struct napi_struct *ctx, int budget)
done = ath10k_htt_txrx_compl_task(ar, budget);
if (done < budget) {
- napi_complete(ctx);
+ napi_complete_done(ctx, done);
/* In case of MSI, it is possible that interrupts are received
* while NAPI poll is inprogress. So pending interrupts that are
* received after processing all copy engine pipes by NAPI poll
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 1843d98..708facd 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -69,7 +69,7 @@ static int wil6210_netdev_poll_rx(struct napi_struct *napi, int budget)
done = budget - quota;
if (done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, done);
wil6210_unmask_irq_rx(wil);
wil_dbg_txrx(wil, "NAPI RX complete\n");
}
diff --git a/drivers/net/wireless/marvell/mwifiex/cfg80211.c b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
index 145cc4b..1e3bd43 100644
--- a/drivers/net/wireless/marvell/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/marvell/mwifiex/cfg80211.c
@@ -2078,7 +2078,7 @@ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv)
ie_len = ie_buf[1] + sizeof(struct ieee_types_header);
band = mwifiex_band_to_radio_type(priv->curr_bss_params.band);
- chan = __ieee80211_get_channel(priv->wdev.wiphy,
+ chan = ieee80211_get_channel(priv->wdev.wiphy,
ieee80211_channel_to_frequency(bss_info.bss_chan,
band));
diff --git a/drivers/net/wireless/realtek/rtlwifi/usb.c b/drivers/net/wireless/realtek/rtlwifi/usb.c
index 44bdb2b..4d989b8 100644
--- a/drivers/net/wireless/realtek/rtlwifi/usb.c
+++ b/drivers/net/wireless/realtek/rtlwifi/usb.c
@@ -1065,6 +1065,7 @@ int rtl_usb_probe(struct usb_interface *intf,
return -ENOMEM;
}
rtlpriv = hw->priv;
+ rtlpriv->hw = hw;
rtlpriv->usb_data = kzalloc(RTL_USB_MAX_RX_COUNT * sizeof(u32),
GFP_KERNEL);
if (!rtlpriv->usb_data)
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index e30ffd2..1073b27 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -104,7 +104,7 @@ static int xenvif_poll(struct napi_struct *napi, int budget)
work_done = xenvif_tx_action(queue, budget);
if (work_done < budget) {
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
xenvif_napi_schedule_or_enable_events(queue);
}
@@ -221,18 +221,18 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
{
struct xenvif *vif = netdev_priv(dev);
struct xenvif_queue *queue = NULL;
- unsigned int num_queues = vif->num_queues;
unsigned long rx_bytes = 0;
unsigned long rx_packets = 0;
unsigned long tx_bytes = 0;
unsigned long tx_packets = 0;
unsigned int index;
+ spin_lock(&vif->lock);
if (vif->queues == NULL)
goto out;
/* Aggregate tx and rx stats from each queue */
- for (index = 0; index < num_queues; ++index) {
+ for (index = 0; index < vif->num_queues; ++index) {
queue = &vif->queues[index];
rx_bytes += queue->stats.rx_bytes;
rx_packets += queue->stats.rx_packets;
@@ -241,6 +241,8 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
}
out:
+ spin_unlock(&vif->lock);
+
vif->dev->stats.rx_bytes = rx_bytes;
vif->dev->stats.rx_packets = rx_packets;
vif->dev->stats.tx_bytes = tx_bytes;
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 3124eae..85b742e 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -493,11 +493,22 @@ static int backend_create_xenvif(struct backend_info *be)
static void backend_disconnect(struct backend_info *be)
{
if (be->vif) {
+ unsigned int queue_index;
+
xen_unregister_watchers(be->vif);
#ifdef CONFIG_DEBUG_FS
xenvif_debugfs_delif(be->vif);
#endif /* CONFIG_DEBUG_FS */
xenvif_disconnect_data(be->vif);
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
+
+ spin_lock(&be->vif->lock);
+ vfree(be->vif->queues);
+ be->vif->num_queues = 0;
+ be->vif->queues = NULL;
+ spin_unlock(&be->vif->lock);
+
xenvif_disconnect_ctrl(be->vif);
}
}
@@ -1034,6 +1045,8 @@ static void connect(struct backend_info *be)
err:
if (be->vif->num_queues > 0)
xenvif_disconnect_data(be->vif); /* Clean up existing queues */
+ for (queue_index = 0; queue_index < be->vif->num_queues; ++queue_index)
+ xenvif_deinit_queue(&be->vif->queues[queue_index]);
vfree(be->vif->queues);
be->vif->queues = NULL;
be->vif->num_queues = 0;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 40f26b6..cf82b5b 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -321,7 +321,7 @@ static void xennet_alloc_rx_buffers(struct netfront_queue *queue)
queue->rx.req_prod_pvt = req_prod;
/* Not enough requests? Try again later. */
- if (req_prod - queue->rx.rsp_cons < NET_RX_SLOTS_MIN) {
+ if (req_prod - queue->rx.sring->req_prod < NET_RX_SLOTS_MIN) {
mod_timer(&queue->rx_refill_timer, jiffies + (HZ/10));
return;
}
@@ -1051,7 +1051,7 @@ err:
if (work_done < budget) {
int more_to_do = 0;
- napi_complete(napi);
+ napi_complete_done(napi, work_done);
RING_FINAL_CHECK_FOR_RESPONSES(&queue->rx, more_to_do);
if (more_to_do)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 6307088..a518cb1 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -957,6 +957,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
{
resource_size_t allocated = 0, available = 0;
struct nd_region *nd_region = to_nd_region(dev->parent);
+ struct nd_namespace_common *ndns = to_ndns(dev);
struct nd_mapping *nd_mapping;
struct nvdimm_drvdata *ndd;
struct nd_label_id label_id;
@@ -964,7 +965,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
u8 *uuid = NULL;
int rc, i;
- if (dev->driver || to_ndns(dev)->claim)
+ if (dev->driver || ndns->claim)
return -EBUSY;
if (is_namespace_pmem(dev)) {
@@ -1034,20 +1035,16 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
nd_namespace_pmem_set_resource(nd_region, nspm,
val * nd_region->ndr_mappings);
- } else if (is_namespace_blk(dev)) {
- struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
-
- /*
- * Try to delete the namespace if we deleted all of its
- * allocation, this is not the seed device for the
- * region, and it is not actively claimed by a btt
- * instance.
- */
- if (val == 0 && nd_region->ns_seed != dev
- && !nsblk->common.claim)
- nd_device_unregister(dev, ND_ASYNC);
}
+ /*
+ * Try to delete the namespace if we deleted all of its
+ * allocation, this is not the seed device for the region, and
+ * it is not actively claimed by a btt instance.
+ */
+ if (val == 0 && nd_region->ns_seed != dev && !ndns->claim)
+ nd_device_unregister(dev, ND_ASYNC);
+
return rc;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7282d74..5b536be 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -90,7 +90,9 @@ static int read_pmem(struct page *page, unsigned int off,
rc = memcpy_from_pmem(mem + off, pmem_addr, len);
kunmap_atomic(mem);
- return rc;
+ if (rc)
+ return -EIO;
+ return 0;
}
static int pmem_do_bvec(struct pmem_device *pmem, struct page *page,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 2fc86dc..8a3c3e3 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1106,12 +1106,7 @@ int nvme_disable_ctrl(struct nvme_ctrl *ctrl, u64 cap)
if (ret)
return ret;
- /* Checking for ctrl->tagset is a trick to avoid sleeping on module
- * load, since we only need the quirk on reset_controller. Notice
- * that the HGST device needs this delay only in firmware activation
- * procedure; unfortunately we have no (easy) way to verify this.
- */
- if ((ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY) && ctrl->tagset)
+ if (ctrl->quirks & NVME_QUIRK_DELAY_BEFORE_CHK_RDY)
msleep(NVME_QUIRK_DELAY_AMOUNT);
return nvme_wait_ready(ctrl, cap, false);
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index aa0bc60..e65041c 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -1654,23 +1654,22 @@ nvme_fc_map_data(struct nvme_fc_ctrl *ctrl, struct request *rq,
struct nvme_fc_fcp_op *op)
{
struct nvmefc_fcp_req *freq = &op->fcp_req;
- u32 map_len = nvme_map_len(rq);
enum dma_data_direction dir;
int ret;
freq->sg_cnt = 0;
- if (!map_len)
+ if (!blk_rq_payload_bytes(rq))
return 0;
freq->sg_table.sgl = freq->first_sgl;
- ret = sg_alloc_table_chained(&freq->sg_table, rq->nr_phys_segments,
- freq->sg_table.sgl);
+ ret = sg_alloc_table_chained(&freq->sg_table,
+ blk_rq_nr_phys_segments(rq), freq->sg_table.sgl);
if (ret)
return -ENOMEM;
op->nents = blk_rq_map_sg(rq->q, rq, freq->sg_table.sgl);
- WARN_ON(op->nents > rq->nr_phys_segments);
+ WARN_ON(op->nents > blk_rq_nr_phys_segments(rq));
dir = (rq_data_dir(rq) == WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
freq->sg_cnt = fc_dma_map_sg(ctrl->lport->dev, freq->sg_table.sgl,
op->nents, dir);
@@ -1854,7 +1853,7 @@ nvme_fc_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret)
return ret;
- data_len = nvme_map_len(rq);
+ data_len = blk_rq_payload_bytes(rq);
if (data_len)
io_dir = ((rq_data_dir(rq) == WRITE) ?
NVMEFC_FCP_WRITE : NVMEFC_FCP_READ);
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 6377e14..aead6d0 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -225,14 +225,6 @@ static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
return (sector >> (ns->lba_shift - 9));
}
-static inline unsigned nvme_map_len(struct request *rq)
-{
- if (req_op(rq) == REQ_OP_DISCARD)
- return sizeof(struct nvme_dsm_range);
- else
- return blk_rq_bytes(rq);
-}
-
static inline void nvme_cleanup_cmd(struct request *req)
{
if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 19beeb7..3faefab 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -306,11 +306,11 @@ static __le64 **iod_list(struct request *req)
return (__le64 **)(iod->sg + blk_rq_nr_phys_segments(req));
}
-static int nvme_init_iod(struct request *rq, unsigned size,
- struct nvme_dev *dev)
+static int nvme_init_iod(struct request *rq, struct nvme_dev *dev)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(rq);
int nseg = blk_rq_nr_phys_segments(rq);
+ unsigned int size = blk_rq_payload_bytes(rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
iod->sg = kmalloc(nvme_iod_alloc_size(dev, size, nseg), GFP_ATOMIC);
@@ -420,12 +420,11 @@ static void nvme_dif_complete(u32 p, u32 v, struct t10_pi_tuple *pi)
}
#endif
-static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
- int total_len)
+static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct dma_pool *pool;
- int length = total_len;
+ int length = blk_rq_payload_bytes(req);
struct scatterlist *sg = iod->sg;
int dma_len = sg_dma_len(sg);
u64 dma_addr = sg_dma_address(sg);
@@ -501,7 +500,7 @@ static bool nvme_setup_prps(struct nvme_dev *dev, struct request *req,
}
static int nvme_map_data(struct nvme_dev *dev, struct request *req,
- unsigned size, struct nvme_command *cmnd)
+ struct nvme_command *cmnd)
{
struct nvme_iod *iod = blk_mq_rq_to_pdu(req);
struct request_queue *q = req->q;
@@ -519,7 +518,7 @@ static int nvme_map_data(struct nvme_dev *dev, struct request *req,
DMA_ATTR_NO_WARN))
goto out;
- if (!nvme_setup_prps(dev, req, size))
+ if (!nvme_setup_prps(dev, req))
goto out_unmap;
ret = BLK_MQ_RQ_QUEUE_ERROR;
@@ -580,7 +579,6 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_dev *dev = nvmeq->dev;
struct request *req = bd->rq;
struct nvme_command cmnd;
- unsigned map_len;
int ret = BLK_MQ_RQ_QUEUE_OK;
/*
@@ -600,13 +598,12 @@ static int nvme_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_MQ_RQ_QUEUE_OK)
return ret;
- map_len = nvme_map_len(req);
- ret = nvme_init_iod(req, map_len, dev);
+ ret = nvme_init_iod(req, dev);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_free_cmd;
if (blk_rq_nr_phys_segments(req))
- ret = nvme_map_data(dev, req, map_len, &cmnd);
+ ret = nvme_map_data(dev, req, &cmnd);
if (ret != BLK_MQ_RQ_QUEUE_OK)
goto out_cleanup_iod;
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index f587af3..557f29b 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -981,8 +981,7 @@ static int nvme_rdma_map_sg_fr(struct nvme_rdma_queue *queue,
}
static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
- struct request *rq, unsigned int map_len,
- struct nvme_command *c)
+ struct request *rq, struct nvme_command *c)
{
struct nvme_rdma_request *req = blk_mq_rq_to_pdu(rq);
struct nvme_rdma_device *dev = queue->device;
@@ -1014,9 +1013,9 @@ static int nvme_rdma_map_data(struct nvme_rdma_queue *queue,
}
if (count == 1) {
- if (rq_data_dir(rq) == WRITE &&
- map_len <= nvme_rdma_inline_data_size(queue) &&
- nvme_rdma_queue_idx(queue))
+ if (rq_data_dir(rq) == WRITE && nvme_rdma_queue_idx(queue) &&
+ blk_rq_payload_bytes(rq) <=
+ nvme_rdma_inline_data_size(queue))
return nvme_rdma_map_sg_inline(queue, req, c);
if (dev->pd->flags & IB_PD_UNSAFE_GLOBAL_RKEY)
@@ -1422,7 +1421,7 @@ static inline bool nvme_rdma_queue_is_ready(struct nvme_rdma_queue *queue,
struct request *rq)
{
if (unlikely(!test_bit(NVME_RDMA_Q_LIVE, &queue->flags))) {
- struct nvme_command *cmd = (struct nvme_command *)rq->cmd;
+ struct nvme_command *cmd = nvme_req(rq)->cmd;
if (rq->cmd_type != REQ_TYPE_DRV_PRIV ||
cmd->common.opcode != nvme_fabrics_command ||
@@ -1444,7 +1443,6 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
struct nvme_command *c = sqe->data;
bool flush = false;
struct ib_device *dev;
- unsigned int map_len;
int ret;
WARN_ON_ONCE(rq->tag < 0);
@@ -1462,8 +1460,7 @@ static int nvme_rdma_queue_rq(struct blk_mq_hw_ctx *hctx,
blk_mq_start_request(rq);
- map_len = nvme_map_len(rq);
- ret = nvme_rdma_map_data(queue, rq, map_len, c);
+ ret = nvme_rdma_map_data(queue, rq, c);
if (ret < 0) {
dev_err(queue->ctrl->ctrl.device,
"Failed to map data (%d)\n", ret);
diff --git a/drivers/nvme/target/configfs.c b/drivers/nvme/target/configfs.c
index 6f50741..be8c800 100644
--- a/drivers/nvme/target/configfs.c
+++ b/drivers/nvme/target/configfs.c
@@ -631,6 +631,7 @@ static void nvmet_subsys_release(struct config_item *item)
{
struct nvmet_subsys *subsys = to_subsys(item);
+ nvmet_subsys_del_ctrls(subsys);
nvmet_subsys_put(subsys);
}
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index b1d66ed..fc5ba2f 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -200,7 +200,7 @@ static void nvmet_keep_alive_timer(struct work_struct *work)
pr_err("ctrl %d keep-alive timer (%d seconds) expired!\n",
ctrl->cntlid, ctrl->kato);
- ctrl->ops->delete_ctrl(ctrl);
+ nvmet_ctrl_fatal_error(ctrl);
}
static void nvmet_start_keep_alive_timer(struct nvmet_ctrl *ctrl)
@@ -816,6 +816,9 @@ static void nvmet_ctrl_free(struct kref *ref)
list_del(&ctrl->subsys_entry);
mutex_unlock(&subsys->lock);
+ flush_work(&ctrl->async_event_work);
+ cancel_work_sync(&ctrl->fatal_err_work);
+
ida_simple_remove(&subsys->cntlid_ida, ctrl->cntlid);
nvmet_subsys_put(subsys);
@@ -935,6 +938,16 @@ static void nvmet_subsys_free(struct kref *ref)
kfree(subsys);
}
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys)
+{
+ struct nvmet_ctrl *ctrl;
+
+ mutex_lock(&subsys->lock);
+ list_for_each_entry(ctrl, &subsys->ctrls, subsys_entry)
+ ctrl->ops->delete_ctrl(ctrl);
+ mutex_unlock(&subsys->lock);
+}
+
void nvmet_subsys_put(struct nvmet_subsys *subsys)
{
kref_put(&subsys->ref, nvmet_subsys_free);
diff --git a/drivers/nvme/target/fc.c b/drivers/nvme/target/fc.c
index 173e842..ba57f98 100644
--- a/drivers/nvme/target/fc.c
+++ b/drivers/nvme/target/fc.c
@@ -1314,7 +1314,7 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
(struct fcnvme_ls_disconnect_rqst *)iod->rqstbuf;
struct fcnvme_ls_disconnect_acc *acc =
(struct fcnvme_ls_disconnect_acc *)iod->rspbuf;
- struct nvmet_fc_tgt_queue *queue;
+ struct nvmet_fc_tgt_queue *queue = NULL;
struct nvmet_fc_tgt_assoc *assoc;
int ret = 0;
bool del_assoc = false;
@@ -1348,7 +1348,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
assoc = nvmet_fc_find_target_assoc(tgtport,
be64_to_cpu(rqst->associd.association_id));
iod->assoc = assoc;
- if (!assoc)
+ if (assoc) {
+ if (rqst->discon_cmd.scope ==
+ FCNVME_DISCONN_CONNECTION) {
+ queue = nvmet_fc_find_target_queue(tgtport,
+ be64_to_cpu(
+ rqst->discon_cmd.id));
+ if (!queue) {
+ nvmet_fc_tgt_a_put(assoc);
+ ret = VERR_NO_CONN;
+ }
+ }
+ } else
ret = VERR_NO_ASSOC;
}
@@ -1373,21 +1384,18 @@ nvmet_fc_ls_disconnect(struct nvmet_fc_tgtport *tgtport,
FCNVME_LS_DISCONNECT);
- if (rqst->discon_cmd.scope == FCNVME_DISCONN_CONNECTION) {
- queue = nvmet_fc_find_target_queue(tgtport,
- be64_to_cpu(rqst->discon_cmd.id));
- if (queue) {
- int qid = queue->qid;
+ /* are we to delete a Connection ID (queue) */
+ if (queue) {
+ int qid = queue->qid;
- nvmet_fc_delete_target_queue(queue);
+ nvmet_fc_delete_target_queue(queue);
- /* release the get taken by find_target_queue */
- nvmet_fc_tgt_q_put(queue);
+ /* release the get taken by find_target_queue */
+ nvmet_fc_tgt_q_put(queue);
- /* tear association down if io queue terminated */
- if (!qid)
- del_assoc = true;
- }
+ /* tear association down if io queue terminated */
+ if (!qid)
+ del_assoc = true;
}
/* release get taken in nvmet_fc_find_target_assoc */
diff --git a/drivers/nvme/target/nvmet.h b/drivers/nvme/target/nvmet.h
index 23d5eb1..cc7ad06 100644
--- a/drivers/nvme/target/nvmet.h
+++ b/drivers/nvme/target/nvmet.h
@@ -282,6 +282,7 @@ void nvmet_ctrl_put(struct nvmet_ctrl *ctrl);
struct nvmet_subsys *nvmet_subsys_alloc(const char *subsysnqn,
enum nvme_subsys_type type);
void nvmet_subsys_put(struct nvmet_subsys *subsys);
+void nvmet_subsys_del_ctrls(struct nvmet_subsys *subsys);
struct nvmet_ns *nvmet_find_namespace(struct nvmet_ctrl *ctrl, __le32 nsid);
void nvmet_put_namespace(struct nvmet_ns *ns);
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 8c3760a..6099022 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -438,6 +438,10 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
{
struct ib_recv_wr *bad_wr;
+ ib_dma_sync_single_for_device(ndev->device,
+ cmd->sge[0].addr, cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+
if (ndev->srq)
return ib_post_srq_recv(ndev->srq, &cmd->wr, &bad_wr);
return ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, &bad_wr);
@@ -538,6 +542,11 @@ static void nvmet_rdma_queue_response(struct nvmet_req *req)
first_wr = &rsp->send_wr;
nvmet_rdma_post_recv(rsp->queue->dev, rsp->cmd);
+
+ ib_dma_sync_single_for_device(rsp->queue->dev->device,
+ rsp->send_sge.addr, rsp->send_sge.length,
+ DMA_TO_DEVICE);
+
if (ib_post_send(cm_id->qp, first_wr, &bad_wr)) {
pr_err("sending cmd response failed\n");
nvmet_rdma_release_rsp(rsp);
@@ -698,6 +707,14 @@ static void nvmet_rdma_handle_command(struct nvmet_rdma_queue *queue,
cmd->n_rdma = 0;
cmd->req.port = queue->port;
+
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->cmd->sge[0].addr, cmd->cmd->sge[0].length,
+ DMA_FROM_DEVICE);
+ ib_dma_sync_single_for_cpu(queue->dev->device,
+ cmd->send_sge.addr, cmd->send_sge.length,
+ DMA_TO_DEVICE);
+
if (!nvmet_req_init(&cmd->req, &queue->nvme_cq,
&queue->nvme_sq, &nvmet_rdma_ops))
return;
diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c
index 1f38d08..f1b633b 100644
--- a/drivers/pci/host/pci-xgene-msi.c
+++ b/drivers/pci/host/pci-xgene-msi.c
@@ -517,7 +517,7 @@ static int xgene_msi_probe(struct platform_device *pdev)
rc = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "pci/xgene:online",
xgene_msi_hwirq_alloc, NULL);
- if (rc)
+ if (rc < 0)
goto err_cpuhp;
pci_xgene_online = rc;
rc = cpuhp_setup_state(CPUHP_PCI_XGENE_DEAD, "pci/xgene:dead", NULL,
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index bed1999..af8f6e9 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -807,11 +807,6 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
{
u32 val;
- /* get iATU unroll support */
- pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
- dev_dbg(pp->dev, "iATU unroll: %s\n",
- pp->iatu_unroll_enabled ? "enabled" : "disabled");
-
/* set the number of lanes */
val = dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL);
val &= ~PORT_LINK_MODE_MASK;
@@ -882,6 +877,11 @@ void dw_pcie_setup_rc(struct pcie_port *pp)
* we should not program the ATU here.
*/
if (!pp->ops->rd_other_conf) {
+ /* get iATU unroll support */
+ pp->iatu_unroll_enabled = dw_pcie_iatu_unroll_enabled(pp);
+ dev_dbg(pp->dev, "iATU unroll: %s\n",
+ pp->iatu_unroll_enabled ? "enabled" : "disabled");
+
dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0,
PCIE_ATU_TYPE_MEM, pp->mem_base,
pp->mem_bus_addr, pp->mem_size);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1ccce1c..63d8e18 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -1432,6 +1432,11 @@ static int pci_uevent(struct device *dev, struct kobj_uevent_env *env)
return 0;
}
+static int pci_bus_num_vf(struct device *dev)
+{
+ return pci_num_vf(to_pci_dev(dev));
+}
+
struct bus_type pci_bus_type = {
.name = "pci",
.match = pci_bus_match,
@@ -1443,6 +1448,7 @@ struct bus_type pci_bus_type = {
.bus_groups = pci_bus_groups,
.drv_groups = pci_drv_groups,
.pm = PCI_PM_OPS_PTR,
+ .num_vf = pci_bus_num_vf,
};
EXPORT_SYMBOL(pci_bus_type);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e164b5c..204960e 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1169,6 +1169,7 @@ void set_pcie_port_type(struct pci_dev *pdev)
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
if (!pos)
return;
+
pdev->pcie_cap = pos;
pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
pdev->pcie_flags_reg = reg16;
@@ -1176,13 +1177,14 @@ void set_pcie_port_type(struct pci_dev *pdev)
pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
/*
- * A Root Port is always the upstream end of a Link. No PCIe
- * component has two Links. Two Links are connected by a Switch
- * that has a Port on each Link and internal logic to connect the
- * two Ports.
+ * A Root Port or a PCI-to-PCIe bridge is always the upstream end
+ * of a Link. No PCIe component has two Links. Two Links are
+ * connected by a Switch that has a Port on each Link and internal
+ * logic to connect the two Ports.
*/
type = pci_pcie_type(pdev);
- if (type == PCI_EXP_TYPE_ROOT_PORT)
+ if (type == PCI_EXP_TYPE_ROOT_PORT ||
+ type == PCI_EXP_TYPE_PCIE_BRIDGE)
pdev->has_secondary_link = 1;
else if (type == PCI_EXP_TYPE_UPSTREAM ||
type == PCI_EXP_TYPE_DOWNSTREAM) {
diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
index 3730063..c123488 100644
--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
+++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
@@ -1092,6 +1092,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
enum pin_config_param param = pinconf_to_config_param(*config);
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
unsigned long flags;
u32 conf, pull, val, debounce;
u16 arg = 0;
@@ -1128,7 +1129,7 @@ static int byt_pin_config_get(struct pinctrl_dev *pctl_dev, unsigned int offset,
return -EINVAL;
raw_spin_lock_irqsave(&vg->lock, flags);
- debounce = readl(byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG));
+ debounce = readl(db_reg);
raw_spin_unlock_irqrestore(&vg->lock, flags);
switch (debounce & BYT_DEBOUNCE_PULSE_MASK) {
@@ -1176,6 +1177,7 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
unsigned int param, arg;
void __iomem *conf_reg = byt_gpio_reg(vg, offset, BYT_CONF0_REG);
void __iomem *val_reg = byt_gpio_reg(vg, offset, BYT_VAL_REG);
+ void __iomem *db_reg = byt_gpio_reg(vg, offset, BYT_DEBOUNCE_REG);
unsigned long flags;
u32 conf, val, debounce;
int i, ret = 0;
@@ -1238,36 +1240,40 @@ static int byt_pin_config_set(struct pinctrl_dev *pctl_dev,
break;
case PIN_CONFIG_INPUT_DEBOUNCE:
- debounce = readl(byt_gpio_reg(vg, offset,
- BYT_DEBOUNCE_REG));
- conf &= ~BYT_DEBOUNCE_PULSE_MASK;
+ debounce = readl(db_reg);
+ debounce &= ~BYT_DEBOUNCE_PULSE_MASK;
switch (arg) {
+ case 0:
+ conf &= BYT_DEBOUNCE_EN;
+ break;
case 375:
- conf |= BYT_DEBOUNCE_PULSE_375US;
+ debounce |= BYT_DEBOUNCE_PULSE_375US;
break;
case 750:
- conf |= BYT_DEBOUNCE_PULSE_750US;
+ debounce |= BYT_DEBOUNCE_PULSE_750US;
break;
case 1500:
- conf |= BYT_DEBOUNCE_PULSE_1500US;
+ debounce |= BYT_DEBOUNCE_PULSE_1500US;
break;
case 3000:
- conf |= BYT_DEBOUNCE_PULSE_3MS;
+ debounce |= BYT_DEBOUNCE_PULSE_3MS;
break;
case 6000:
- conf |= BYT_DEBOUNCE_PULSE_6MS;
+ debounce |= BYT_DEBOUNCE_PULSE_6MS;
break;
case 12000:
- conf |= BYT_DEBOUNCE_PULSE_12MS;
+ debounce |= BYT_DEBOUNCE_PULSE_12MS;
break;
case 24000:
- conf |= BYT_DEBOUNCE_PULSE_24MS;
+ debounce |= BYT_DEBOUNCE_PULSE_24MS;
break;
default:
ret = -EINVAL;
}
+ if (!ret)
+ writel(debounce, db_reg);
break;
default:
ret = -ENOTSUPP;
@@ -1617,6 +1623,8 @@ static void byt_gpio_irq_handler(struct irq_desc *desc)
static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
{
+ struct gpio_chip *gc = &vg->chip;
+ struct device *dev = &vg->pdev->dev;
void __iomem *reg;
u32 base, value;
int i;
@@ -1638,10 +1646,12 @@ static void byt_gpio_irq_init_hw(struct byt_gpio *vg)
}
value = readl(reg);
- if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i) &&
- !(value & BYT_DIRECT_IRQ_EN)) {
+ if (value & BYT_DIRECT_IRQ_EN) {
+ clear_bit(i, gc->irq_valid_mask);
+ dev_dbg(dev, "excluding GPIO %d from IRQ domain\n", i);
+ } else if ((value & BYT_PIN_MUX) == byt_get_gpio_mux(vg, i)) {
byt_gpio_clear_triggering(vg, i);
- dev_dbg(&vg->pdev->dev, "disabling GPIO %d\n", i);
+ dev_dbg(dev, "disabling GPIO %d\n", i);
}
}
@@ -1680,6 +1690,7 @@ static int byt_gpio_probe(struct byt_gpio *vg)
gc->can_sleep = false;
gc->parent = &vg->pdev->dev;
gc->ngpio = vg->soc_data->npins;
+ gc->irq_need_valid_mask = true;
#ifdef CONFIG_PM_SLEEP
vg->saved_context = devm_kcalloc(&vg->pdev->dev, gc->ngpio,
diff --git a/drivers/pinctrl/intel/pinctrl-broxton.c b/drivers/pinctrl/intel/pinctrl-broxton.c
index 59cb7a6..901b356 100644
--- a/drivers/pinctrl/intel/pinctrl-broxton.c
+++ b/drivers/pinctrl/intel/pinctrl-broxton.c
@@ -19,7 +19,7 @@
#define BXT_PAD_OWN 0x020
#define BXT_HOSTSW_OWN 0x080
-#define BXT_PADCFGLOCK 0x090
+#define BXT_PADCFGLOCK 0x060
#define BXT_GPI_IE 0x110
#define BXT_COMMUNITY(s, e) \
diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c
index 1e13967..6df35dc 100644
--- a/drivers/pinctrl/intel/pinctrl-intel.c
+++ b/drivers/pinctrl/intel/pinctrl-intel.c
@@ -353,6 +353,21 @@ static int intel_pinmux_set_mux(struct pinctrl_dev *pctldev, unsigned function,
return 0;
}
+static void __intel_gpio_set_direction(void __iomem *padcfg0, bool input)
+{
+ u32 value;
+
+ value = readl(padcfg0);
+ if (input) {
+ value &= ~PADCFG0_GPIORXDIS;
+ value |= PADCFG0_GPIOTXDIS;
+ } else {
+ value &= ~PADCFG0_GPIOTXDIS;
+ value |= PADCFG0_GPIORXDIS;
+ }
+ writel(value, padcfg0);
+}
+
static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
struct pinctrl_gpio_range *range,
unsigned pin)
@@ -375,11 +390,11 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev,
/* Disable SCI/SMI/NMI generation */
value &= ~(PADCFG0_GPIROUTIOXAPIC | PADCFG0_GPIROUTSCI);
value &= ~(PADCFG0_GPIROUTSMI | PADCFG0_GPIROUTNMI);
- /* Disable TX buffer and enable RX (this will be input) */
- value &= ~PADCFG0_GPIORXDIS;
- value |= PADCFG0_GPIOTXDIS;
writel(value, padcfg0);
+ /* Disable TX buffer and enable RX (this will be input) */
+ __intel_gpio_set_direction(padcfg0, true);
+
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
return 0;
@@ -392,18 +407,11 @@ static int intel_gpio_set_direction(struct pinctrl_dev *pctldev,
struct intel_pinctrl *pctrl = pinctrl_dev_get_drvdata(pctldev);
void __iomem *padcfg0;
unsigned long flags;
- u32 value;
raw_spin_lock_irqsave(&pctrl->lock, flags);
padcfg0 = intel_get_padcfg(pctrl, pin, PADCFG0);
-
- value = readl(padcfg0);
- if (input)
- value |= PADCFG0_GPIOTXDIS;
- else
- value &= ~PADCFG0_GPIOTXDIS;
- writel(value, padcfg0);
+ __intel_gpio_set_direction(padcfg0, input);
raw_spin_unlock_irqrestore(&pctrl->lock, flags);
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
index c3928aa..e0bca4d 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxbb.c
@@ -253,9 +253,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
- PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
@@ -498,7 +497,7 @@ static struct meson_pmx_group meson_gxbb_aobus_groups[] = {
GPIO_GROUP(GPIOAO_13, 0),
/* bank AO */
- GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_tx_ao_b, 0, 24),
GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/meson/pinctrl-meson-gxl.c b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
index 25694f7..b69743b 100644
--- a/drivers/pinctrl/meson/pinctrl-meson-gxl.c
+++ b/drivers/pinctrl/meson/pinctrl-meson-gxl.c
@@ -214,9 +214,8 @@ static const unsigned int uart_tx_ao_a_pins[] = { PIN(GPIOAO_0, 0) };
static const unsigned int uart_rx_ao_a_pins[] = { PIN(GPIOAO_1, 0) };
static const unsigned int uart_cts_ao_a_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_a_pins[] = { PIN(GPIOAO_3, 0) };
-static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_0, 0) };
-static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_1, 0),
- PIN(GPIOAO_5, 0) };
+static const unsigned int uart_tx_ao_b_pins[] = { PIN(GPIOAO_4, 0) };
+static const unsigned int uart_rx_ao_b_pins[] = { PIN(GPIOAO_5, 0) };
static const unsigned int uart_cts_ao_b_pins[] = { PIN(GPIOAO_2, 0) };
static const unsigned int uart_rts_ao_b_pins[] = { PIN(GPIOAO_3, 0) };
@@ -409,7 +408,7 @@ static struct meson_pmx_group meson_gxl_aobus_groups[] = {
GPIO_GROUP(GPIOAO_9, 0),
/* bank AO */
- GROUP(uart_tx_ao_b, 0, 26),
+ GROUP(uart_tx_ao_b, 0, 24),
GROUP(uart_rx_ao_b, 0, 25),
GROUP(uart_tx_ao_a, 0, 12),
GROUP(uart_rx_ao_a, 0, 11),
diff --git a/drivers/pinctrl/pinctrl-amd.c b/drivers/pinctrl/pinctrl-amd.c
index c9a1469..537b520 100644
--- a/drivers/pinctrl/pinctrl-amd.c
+++ b/drivers/pinctrl/pinctrl-amd.c
@@ -202,6 +202,8 @@ static void amd_gpio_dbg_show(struct seq_file *s, struct gpio_chip *gc)
i = 128;
pin_num = AMD_GPIO_PINS_BANK2 + i;
break;
+ default:
+ return;
}
for (; i < pin_num; i++) {
diff --git a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
index aa8bd97..9668633 100644
--- a/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
+++ b/drivers/pinctrl/uniphier/pinctrl-uniphier-ld20.c
@@ -561,7 +561,7 @@ static const int ether_rgmii_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0};
static const unsigned ether_rmii_pins[] = {30, 31, 32, 33, 34, 35, 36, 37, 39,
41, 42, 45};
-static const int ether_rmii_muxvals[] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1};
+static const int ether_rmii_muxvals[] = {0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1};
static const unsigned i2c0_pins[] = {63, 64};
static const int i2c0_muxvals[] = {0, 0};
static const unsigned i2c1_pins[] = {65, 66};
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 410741a..f46ece2 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -813,6 +813,7 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
case 8:
case 7:
case 6:
+ case 1:
ideapad_input_report(priv, vpc_bit);
break;
case 5:
diff --git a/drivers/platform/x86/intel_mid_powerbtn.c b/drivers/platform/x86/intel_mid_powerbtn.c
index 1fc0de8..3617705 100644
--- a/drivers/platform/x86/intel_mid_powerbtn.c
+++ b/drivers/platform/x86/intel_mid_powerbtn.c
@@ -77,7 +77,7 @@ static int mfld_pb_probe(struct platform_device *pdev)
input_set_capability(input, EV_KEY, KEY_POWER);
- error = request_threaded_irq(irq, NULL, mfld_pb_isr, 0,
+ error = request_threaded_irq(irq, NULL, mfld_pb_isr, IRQF_ONESHOT,
DRIVER_NAME, input);
if (error) {
dev_err(&pdev->dev, "Unable to request irq %d for mfld power"
diff --git a/drivers/platform/x86/mlx-platform.c b/drivers/platform/x86/mlx-platform.c
index 97b4c3a..25f15df 100644
--- a/drivers/platform/x86/mlx-platform.c
+++ b/drivers/platform/x86/mlx-platform.c
@@ -326,7 +326,7 @@ static int __init mlxplat_init(void)
return 0;
fail_platform_mux_register:
- for (i--; i > 0 ; i--)
+ while (--i >= 0)
platform_device_unregister(priv->pdev_mux[i]);
platform_device_unregister(priv->pdev_i2c);
fail_alloc:
diff --git a/drivers/platform/x86/surface3-wmi.c b/drivers/platform/x86/surface3-wmi.c
index cbf4d83..25b1769 100644
--- a/drivers/platform/x86/surface3-wmi.c
+++ b/drivers/platform/x86/surface3-wmi.c
@@ -139,7 +139,7 @@ static acpi_status s3_wmi_attach_spi_device(acpi_handle handle,
static int s3_wmi_check_platform_device(struct device *dev, void *data)
{
- struct acpi_device *adev, *ts_adev;
+ struct acpi_device *adev, *ts_adev = NULL;
acpi_handle handle;
acpi_status status;
@@ -244,13 +244,11 @@ static int s3_wmi_remove(struct platform_device *device)
return 0;
}
-#ifdef CONFIG_PM
-static int s3_wmi_resume(struct device *dev)
+static int __maybe_unused s3_wmi_resume(struct device *dev)
{
s3_wmi_send_lid_state();
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(s3_wmi_pm, NULL, s3_wmi_resume);
static struct platform_driver s3_wmi_driver = {
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 9a507e7..90b05c7 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -396,9 +396,6 @@ static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc,
goto unwind_vring_allocations;
}
- /* track the rvdevs list reference */
- kref_get(&rvdev->refcount);
-
list_add_tail(&rvdev->node, &rproc->rvdevs);
rproc_add_subdev(rproc, &rvdev->subdev,
@@ -889,13 +886,15 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
/*
* Create a copy of the resource table. When a virtio device starts
* and calls vring_new_virtqueue() the address of the allocated vring
- * will be stored in the table_ptr. Before the device is started,
- * table_ptr will be copied into device memory.
+ * will be stored in the cached_table. Before the device is started,
+ * cached_table will be copied into device memory.
*/
- rproc->table_ptr = kmemdup(table, tablesz, GFP_KERNEL);
- if (!rproc->table_ptr)
+ rproc->cached_table = kmemdup(table, tablesz, GFP_KERNEL);
+ if (!rproc->cached_table)
goto clean_up;
+ rproc->table_ptr = rproc->cached_table;
+
/* reset max_notifyid */
rproc->max_notifyid = -1;
@@ -914,16 +913,18 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
}
/*
- * The starting device has been given the rproc->table_ptr as the
+ * The starting device has been given the rproc->cached_table as the
* resource table. The address of the vring along with the other
- * allocated resources (carveouts etc) is stored in table_ptr.
+ * allocated resources (carveouts etc) is stored in cached_table.
* In order to pass this information to the remote device we must copy
* this information to device memory. We also update the table_ptr so
* that any subsequent changes will be applied to the loaded version.
*/
loaded_table = rproc_find_loaded_rsc_table(rproc, fw);
- if (loaded_table)
- memcpy(loaded_table, rproc->table_ptr, tablesz);
+ if (loaded_table) {
+ memcpy(loaded_table, rproc->cached_table, tablesz);
+ rproc->table_ptr = loaded_table;
+ }
/* power up the remote processor */
ret = rproc->ops->start(rproc);
@@ -951,7 +952,8 @@ stop_rproc:
clean_up_resources:
rproc_resource_cleanup(rproc);
clean_up:
- kfree(rproc->table_ptr);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
rproc->table_ptr = NULL;
rproc_disable_iommu(rproc);
@@ -1185,7 +1187,8 @@ void rproc_shutdown(struct rproc *rproc)
rproc_disable_iommu(rproc);
/* Free the copy of the resource table */
- kfree(rproc->table_ptr);
+ kfree(rproc->cached_table);
+ rproc->cached_table = NULL;
rproc->table_ptr = NULL;
/* if in crash state, unlock crash handler */
diff --git a/drivers/rpmsg/rpmsg_core.c b/drivers/rpmsg/rpmsg_core.c
index a79cb5a..1cfb775 100644
--- a/drivers/rpmsg/rpmsg_core.c
+++ b/drivers/rpmsg/rpmsg_core.c
@@ -453,8 +453,8 @@ int rpmsg_register_device(struct rpmsg_device *rpdev)
struct device *dev = &rpdev->dev;
int ret;
- dev_set_name(&rpdev->dev, "%s:%s",
- dev_name(dev->parent), rpdev->id.name);
+ dev_set_name(&rpdev->dev, "%s.%s.%d.%d", dev_name(dev->parent),
+ rpdev->id.name, rpdev->src, rpdev->dst);
rpdev->dev.bus = &rpmsg_bus;
rpdev->dev.release = rpmsg_release_device;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 6d4b68c4..e7addea 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -281,8 +281,6 @@ static inline int qeth_is_ipa_enabled(struct qeth_ipa_info *ipa,
#define QETH_HIGH_WATERMARK_PACK 5
#define QETH_WATERMARK_PACK_FUZZ 1
-#define QETH_IP_HEADER_SIZE 40
-
/* large receive scatter gather copy break */
#define QETH_RX_SG_CB (PAGE_SIZE >> 1)
#define QETH_RX_PULL_LEN 256
@@ -674,8 +672,6 @@ struct qeth_card_info {
int broadcast_capable;
int unique_id;
struct qeth_card_blkt blkt;
- __u32 csum_mask;
- __u32 tx_csum_mask;
enum qeth_ipa_promisc_modes promisc_mode;
__u32 diagass_support;
__u32 hwtrap;
@@ -917,7 +913,6 @@ void qeth_clear_thread_running_bit(struct qeth_card *, unsigned long);
int qeth_core_hardsetup_card(struct qeth_card *);
void qeth_print_status_message(struct qeth_card *);
int qeth_init_qdio_queues(struct qeth_card *);
-int qeth_send_startlan(struct qeth_card *);
int qeth_send_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *,
int (*reply_cb)
(struct qeth_card *, struct qeth_reply *, unsigned long),
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index e335583..315d8a2 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -2944,7 +2944,7 @@ int qeth_send_ipa_cmd(struct qeth_card *card, struct qeth_cmd_buffer *iob,
}
EXPORT_SYMBOL_GPL(qeth_send_ipa_cmd);
-int qeth_send_startlan(struct qeth_card *card)
+static int qeth_send_startlan(struct qeth_card *card)
{
int rc;
struct qeth_cmd_buffer *iob;
@@ -2957,7 +2957,6 @@ int qeth_send_startlan(struct qeth_card *card)
rc = qeth_send_ipa_cmd(card, iob, NULL, NULL);
return rc;
}
-EXPORT_SYMBOL_GPL(qeth_send_startlan);
static int qeth_default_setadapterparms_cb(struct qeth_card *card,
struct qeth_reply *reply, unsigned long data)
@@ -5087,6 +5086,20 @@ retriable:
goto out;
}
+ rc = qeth_send_startlan(card);
+ if (rc) {
+ QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ if (rc == IPA_RC_LAN_OFFLINE) {
+ dev_warn(&card->gdev->dev,
+ "The LAN is offline\n");
+ card->lan_online = 0;
+ } else {
+ rc = -ENODEV;
+ goto out;
+ }
+ } else
+ card->lan_online = 1;
+
card->options.ipa4.supported_funcs = 0;
card->options.ipa6.supported_funcs = 0;
card->options.adp.supported_funcs = 0;
@@ -5098,14 +5111,14 @@ retriable:
if (qeth_is_supported(card, IPA_SETADAPTERPARMS)) {
rc = qeth_query_setadapterparms(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
goto out;
}
}
if (qeth_adp_supported(card, IPA_SETADP_SET_DIAG_ASSIST)) {
rc = qeth_query_setdiagass(card);
if (rc < 0) {
- QETH_DBF_TEXT_(SETUP, 2, "7err%d", rc);
+ QETH_DBF_TEXT_(SETUP, 2, "8err%d", rc);
goto out;
}
}
@@ -5289,18 +5302,6 @@ int qeth_setassparms_cb(struct qeth_card *card,
if (cmd->hdr.prot_version == QETH_PROT_IPV6)
card->options.ipa6.enabled_funcs = cmd->hdr.ipa_enabled;
}
- if (cmd->data.setassparms.hdr.assist_no == IPA_INBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.csum_mask = cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "csum:%d", card->info.csum_mask);
- }
- if (cmd->data.setassparms.hdr.assist_no == IPA_OUTBOUND_CHECKSUM &&
- cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
- card->info.tx_csum_mask =
- cmd->data.setassparms.data.flags_32bit;
- QETH_CARD_TEXT_(card, 3, "tcsu:%d", card->info.tx_csum_mask);
- }
-
return 0;
}
EXPORT_SYMBOL_GPL(qeth_setassparms_cb);
@@ -6060,23 +6061,96 @@ int qeth_core_ethtool_get_settings(struct net_device *netdev,
}
EXPORT_SYMBOL_GPL(qeth_core_ethtool_get_settings);
+/* Callback to handle checksum offload command reply from OSA card.
+ * Verify that required features have been enabled on the card.
+ * Return error in hdr->return_code as this value is checked by caller.
+ *
+ * Always returns zero to indicate no further messages from the OSA card.
+ */
+static int qeth_ipa_checksum_run_cmd_cb(struct qeth_card *card,
+ struct qeth_reply *reply,
+ unsigned long data)
+{
+ struct qeth_ipa_cmd *cmd = (struct qeth_ipa_cmd *) data;
+ struct qeth_checksum_cmd *chksum_cb =
+ (struct qeth_checksum_cmd *)reply->param;
+
+ QETH_CARD_TEXT(card, 4, "chkdoccb");
+ if (cmd->hdr.return_code)
+ return 0;
+
+ memset(chksum_cb, 0, sizeof(*chksum_cb));
+ if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_START) {
+ chksum_cb->supported =
+ cmd->data.setassparms.data.chksum.supported;
+ QETH_CARD_TEXT_(card, 3, "strt:%x", chksum_cb->supported);
+ }
+ if (cmd->data.setassparms.hdr.command_code == IPA_CMD_ASS_ENABLE) {
+ chksum_cb->supported =
+ cmd->data.setassparms.data.chksum.supported;
+ chksum_cb->enabled =
+ cmd->data.setassparms.data.chksum.enabled;
+ QETH_CARD_TEXT_(card, 3, "supp:%x", chksum_cb->supported);
+ QETH_CARD_TEXT_(card, 3, "enab:%x", chksum_cb->enabled);
+ }
+ return 0;
+}
+
+/* Send command to OSA card and check results. */
+static int qeth_ipa_checksum_run_cmd(struct qeth_card *card,
+ enum qeth_ipa_funcs ipa_func,
+ __u16 cmd_code, long data,
+ struct qeth_checksum_cmd *chksum_cb)
+{
+ struct qeth_cmd_buffer *iob;
+ int rc = -ENOMEM;
+
+ QETH_CARD_TEXT(card, 4, "chkdocmd");
+ iob = qeth_get_setassparms_cmd(card, ipa_func, cmd_code,
+ sizeof(__u32), QETH_PROT_IPV4);
+ if (iob)
+ rc = qeth_send_setassparms(card, iob, sizeof(__u32), data,
+ qeth_ipa_checksum_run_cmd_cb,
+ chksum_cb);
+ return rc;
+}
+
static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
{
- long rxtx_arg;
+ const __u32 required_features = QETH_IPA_CHECKSUM_IP_HDR |
+ QETH_IPA_CHECKSUM_UDP |
+ QETH_IPA_CHECKSUM_TCP;
+ struct qeth_checksum_cmd chksum_cb;
int rc;
- rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_START, 0);
+ rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_START, 0,
+ &chksum_cb);
+ if (!rc) {
+ if ((required_features & chksum_cb.supported) !=
+ required_features)
+ rc = -EIO;
+ else if (!(QETH_IPA_CHECKSUM_LP2LP & chksum_cb.supported) &&
+ cstype == IPA_INBOUND_CHECKSUM)
+ dev_warn(&card->gdev->dev,
+ "Hardware checksumming is performed only if %s and its peer use different OSA Express 3 ports\n",
+ QETH_CARD_IFNAME(card));
+ }
if (rc) {
+ qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
dev_warn(&card->gdev->dev,
"Starting HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
return rc;
}
- rxtx_arg = (cstype == IPA_OUTBOUND_CHECKSUM) ? card->info.tx_csum_mask
- : card->info.csum_mask;
- rc = qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_ENABLE,
- rxtx_arg);
+ rc = qeth_ipa_checksum_run_cmd(card, cstype, IPA_CMD_ASS_ENABLE,
+ chksum_cb.supported, &chksum_cb);
+ if (!rc) {
+ if ((required_features & chksum_cb.enabled) !=
+ required_features)
+ rc = -EIO;
+ }
if (rc) {
+ qeth_send_simple_setassparms(card, cstype, IPA_CMD_ASS_STOP, 0);
dev_warn(&card->gdev->dev,
"Enabling HW checksumming for %s failed, using SW checksumming\n",
QETH_CARD_IFNAME(card));
@@ -6090,19 +6164,10 @@ static int qeth_send_checksum_on(struct qeth_card *card, int cstype)
static int qeth_set_ipa_csum(struct qeth_card *card, int on, int cstype)
{
- int rc;
-
- if (on) {
- rc = qeth_send_checksum_on(card, cstype);
- if (rc)
- return -EIO;
- } else {
- rc = qeth_send_simple_setassparms(card, cstype,
- IPA_CMD_ASS_STOP, 0);
- if (rc)
- return -EIO;
- }
- return 0;
+ int rc = (on) ? qeth_send_checksum_on(card, cstype)
+ : qeth_send_simple_setassparms(card, cstype,
+ IPA_CMD_ASS_STOP, 0);
+ return rc ? -EIO : 0;
}
static int qeth_set_ipa_tso(struct qeth_card *card, int on)
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index 6cccc9a..bc69d0a 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -352,11 +352,28 @@ struct qeth_arp_query_info {
char *udata;
};
+/* IPA set assist segmentation bit definitions for receive and
+ * transmit checksum offloading.
+ */
+enum qeth_ipa_checksum_bits {
+ QETH_IPA_CHECKSUM_IP_HDR = 0x0002,
+ QETH_IPA_CHECKSUM_UDP = 0x0008,
+ QETH_IPA_CHECKSUM_TCP = 0x0010,
+ QETH_IPA_CHECKSUM_LP2LP = 0x0020
+};
+
+/* IPA Assist checksum offload reply layout. */
+struct qeth_checksum_cmd {
+ __u32 supported;
+ __u32 enabled;
+} __packed;
+
/* SETASSPARMS IPA Command: */
struct qeth_ipacmd_setassparms {
struct qeth_ipacmd_setassparms_hdr hdr;
union {
__u32 flags_32bit;
+ struct qeth_checksum_cmd chksum;
struct qeth_arp_cache_entry add_arp_entry;
struct qeth_arp_query_data query_arp;
__u8 ip[16];
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 9c921c28..bea4833 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -27,9 +27,6 @@
static int qeth_l2_set_offline(struct ccwgroup_device *);
static int qeth_l2_stop(struct net_device *);
-static int qeth_l2_send_delmac(struct qeth_card *, __u8 *);
-static int qeth_l2_send_setdelmac(struct qeth_card *, __u8 *,
- enum qeth_ipa_cmds);
static void qeth_l2_set_rx_mode(struct net_device *);
static int qeth_l2_recover(void *);
static void qeth_bridgeport_query_support(struct qeth_card *card);
@@ -165,13 +162,70 @@ static int qeth_setdel_makerc(struct qeth_card *card, int retcode)
return rc;
}
+static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
+ enum qeth_ipa_cmds ipacmd)
+{
+ struct qeth_ipa_cmd *cmd;
+ struct qeth_cmd_buffer *iob;
+
+ QETH_CARD_TEXT(card, 2, "L2sdmac");
+ iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
+ if (!iob)
+ return -ENOMEM;
+ cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
+ cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
+ memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
+ return qeth_setdel_makerc(card, qeth_send_ipa_cmd(card, iob,
+ NULL, NULL));
+}
+
+static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
+{
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "L2Setmac");
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETVMAC);
+ if (rc == 0) {
+ card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
+ memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
+ dev_info(&card->gdev->dev,
+ "MAC address %pM successfully registered on device %s\n",
+ card->dev->dev_addr, card->dev->name);
+ } else {
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ switch (rc) {
+ case -EEXIST:
+ dev_warn(&card->gdev->dev,
+ "MAC address %pM already exists\n", mac);
+ break;
+ case -EPERM:
+ dev_warn(&card->gdev->dev,
+ "MAC address %pM is not authorized\n", mac);
+ break;
+ }
+ }
+ return rc;
+}
+
+static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
+{
+ int rc;
+
+ QETH_CARD_TEXT(card, 2, "L2Delmac");
+ if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
+ return 0;
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELVMAC);
+ if (rc == 0)
+ card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
+ return rc;
+}
+
static int qeth_l2_send_setgroupmac(struct qeth_card *card, __u8 *mac)
{
int rc;
QETH_CARD_TEXT(card, 2, "L2Sgmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_SETGMAC));
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_SETGMAC);
if (rc == -EEXIST)
QETH_DBF_MESSAGE(2, "Group MAC %pM already existing on %s\n",
mac, QETH_CARD_IFNAME(card));
@@ -186,8 +240,7 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
int rc;
QETH_CARD_TEXT(card, 2, "L2Dgmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_DELGMAC));
+ rc = qeth_l2_send_setdelmac(card, mac, IPA_CMD_DELGMAC);
if (rc)
QETH_DBF_MESSAGE(2,
"Could not delete group MAC %pM on %s: %d\n",
@@ -195,28 +248,27 @@ static int qeth_l2_send_delgroupmac(struct qeth_card *card, __u8 *mac)
return rc;
}
-static inline u32 qeth_l2_mac_hash(const u8 *addr)
+static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
{
- return get_unaligned((u32 *)(&addr[2]));
+ if (mac->is_uc) {
+ return qeth_l2_send_setdelmac(card, mac->mac_addr,
+ IPA_CMD_SETVMAC);
+ } else {
+ return qeth_l2_send_setgroupmac(card, mac->mac_addr);
+ }
}
-static int qeth_l2_write_mac(struct qeth_card *card, struct qeth_mac *mac)
+static int qeth_l2_remove_mac(struct qeth_card *card, struct qeth_mac *mac)
{
-
- int rc;
-
if (mac->is_uc) {
- rc = qeth_setdel_makerc(card,
- qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_SETVMAC));
+ return qeth_l2_send_setdelmac(card, mac->mac_addr,
+ IPA_CMD_DELVMAC);
} else {
- rc = qeth_setdel_makerc(card,
- qeth_l2_send_setgroupmac(card, mac->mac_addr));
+ return qeth_l2_send_delgroupmac(card, mac->mac_addr);
}
- return rc;
}
-static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
+static void qeth_l2_del_all_macs(struct qeth_card *card)
{
struct qeth_mac *mac;
struct hlist_node *tmp;
@@ -224,19 +276,17 @@ static void qeth_l2_del_all_macs(struct qeth_card *card, int del)
spin_lock_bh(&card->mclock);
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
- if (del) {
- if (mac->is_uc)
- qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- else
- qeth_l2_send_delgroupmac(card, mac->mac_addr);
- }
hash_del(&mac->hnode);
kfree(mac);
}
spin_unlock_bh(&card->mclock);
}
+static inline u32 qeth_l2_mac_hash(const u8 *addr)
+{
+ return get_unaligned((u32 *)(&addr[2]));
+}
+
static inline int qeth_l2_get_cast_type(struct qeth_card *card,
struct sk_buff *skb)
{
@@ -425,7 +475,7 @@ static void qeth_l2_stop_card(struct qeth_card *card, int recovery_mode)
card->state = CARD_STATE_SOFTSETUP;
}
if (card->state == CARD_STATE_SOFTSETUP) {
- qeth_l2_del_all_macs(card, 0);
+ qeth_l2_del_all_macs(card);
qeth_clear_ipacmd_list(card);
card->state = CARD_STATE_HARDSETUP;
}
@@ -577,65 +627,6 @@ out:
return work_done;
}
-static int qeth_l2_send_setdelmac(struct qeth_card *card, __u8 *mac,
- enum qeth_ipa_cmds ipacmd)
-{
- struct qeth_ipa_cmd *cmd;
- struct qeth_cmd_buffer *iob;
-
- QETH_CARD_TEXT(card, 2, "L2sdmac");
- iob = qeth_get_ipacmd_buffer(card, ipacmd, QETH_PROT_IPV4);
- if (!iob)
- return -ENOMEM;
- cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
- cmd->data.setdelmac.mac_length = OSA_ADDR_LEN;
- memcpy(&cmd->data.setdelmac.mac, mac, OSA_ADDR_LEN);
- return qeth_send_ipa_cmd(card, iob, NULL, NULL);
-}
-
-static int qeth_l2_send_setmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Setmac");
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_SETVMAC));
- if (rc == 0) {
- card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED;
- memcpy(card->dev->dev_addr, mac, OSA_ADDR_LEN);
- dev_info(&card->gdev->dev,
- "MAC address %pM successfully registered on device %s\n",
- card->dev->dev_addr, card->dev->name);
- } else {
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- switch (rc) {
- case -EEXIST:
- dev_warn(&card->gdev->dev,
- "MAC address %pM already exists\n", mac);
- break;
- case -EPERM:
- dev_warn(&card->gdev->dev,
- "MAC address %pM is not authorized\n", mac);
- break;
- }
- }
- return rc;
-}
-
-static int qeth_l2_send_delmac(struct qeth_card *card, __u8 *mac)
-{
- int rc;
-
- QETH_CARD_TEXT(card, 2, "L2Delmac");
- if (!(card->info.mac_bits & QETH_LAYER2_MAC_REGISTERED))
- return 0;
- rc = qeth_setdel_makerc(card, qeth_l2_send_setdelmac(card, mac,
- IPA_CMD_DELVMAC));
- if (rc == 0)
- card->info.mac_bits &= ~QETH_LAYER2_MAC_REGISTERED;
- return rc;
-}
-
static int qeth_l2_request_initial_mac(struct qeth_card *card)
{
int rc = 0;
@@ -794,14 +785,7 @@ static void qeth_l2_set_rx_mode(struct net_device *dev)
hash_for_each_safe(card->mac_htable, i, tmp, mac, hnode) {
if (mac->disp_flag == QETH_DISP_ADDR_DELETE) {
- if (!mac->is_uc)
- rc = qeth_l2_send_delgroupmac(card,
- mac->mac_addr);
- else {
- rc = qeth_l2_send_setdelmac(card, mac->mac_addr,
- IPA_CMD_DELVMAC);
- }
-
+ qeth_l2_remove_mac(card, mac);
hash_del(&mac->hnode);
kfree(mac);
@@ -1193,21 +1177,6 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
if ((card->info.type == QETH_CARD_TYPE_OSD) ||
(card->info.type == QETH_CARD_TYPE_OSX)) {
rc = qeth_l2_start_ipassists(card);
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index ac37d05..06d0add 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -3227,21 +3227,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
/* softsetup */
QETH_DBF_TEXT(SETUP, 2, "softsetp");
- rc = qeth_send_startlan(card);
- if (rc) {
- QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
- if (rc == 0xe080) {
- dev_warn(&card->gdev->dev,
- "The LAN is offline\n");
- card->lan_online = 0;
- goto contin;
- }
- rc = -ENODEV;
- goto out_remove;
- } else
- card->lan_online = 1;
-
-contin:
rc = qeth_l3_setadapter_parms(card);
if (rc)
QETH_DBF_TEXT_(SETUP, 2, "2err%04x", rc);
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index 0e00a5c..05e9471 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -250,9 +250,6 @@ static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
- if (card->state == CARD_STATE_DOWN)
- return -EPERM;
-
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
return sprintf(buf, "%s\n", tmp_hsuid);
@@ -692,15 +689,15 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
@@ -708,16 +705,17 @@ static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
@@ -854,15 +852,15 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
- struct hlist_node *tmp;
char addr_str[40];
+ int str_len = 0;
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
- int i = 0;
+ int i;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_bh(&card->ip_lock);
- hash_for_each_safe(card->ip_htable, i, tmp, ipaddr, hnode) {
+ hash_for_each(card->ip_htable, i, ipaddr, hnode) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
@@ -870,16 +868,17 @@ static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
- if ((PAGE_SIZE - i) <= entry_len)
+ if ((PAGE_SIZE - str_len) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
- i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "%s\n",
+ addr_str);
}
spin_unlock_bh(&card->ip_lock);
- i += snprintf(buf + i, PAGE_SIZE - i, "\n");
+ str_len += snprintf(buf + str_len, PAGE_SIZE - str_len, "\n");
- return i;
+ return str_len;
}
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index 639ed4e..070c4da 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -145,6 +145,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define CCW_CMD_WRITE_CONF 0x21
#define CCW_CMD_WRITE_STATUS 0x31
#define CCW_CMD_READ_VQ_CONF 0x32
+#define CCW_CMD_READ_STATUS 0x72
#define CCW_CMD_SET_IND_ADAPTER 0x73
#define CCW_CMD_SET_VIRTIO_REV 0x83
@@ -160,6 +161,7 @@ static struct airq_info *airq_areas[MAX_AIRQ_AREAS];
#define VIRTIO_CCW_DOING_SET_CONF_IND 0x04000000
#define VIRTIO_CCW_DOING_SET_IND_ADAPTER 0x08000000
#define VIRTIO_CCW_DOING_SET_VIRTIO_REV 0x10000000
+#define VIRTIO_CCW_DOING_READ_STATUS 0x20000000
#define VIRTIO_CCW_INTPARM_MASK 0xffff0000
static struct virtio_ccw_device *to_vc_device(struct virtio_device *vdev)
@@ -452,7 +454,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
* This may happen on device detach.
*/
if (ret && (ret != -ENODEV))
- dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d",
+ dev_warn(&vq->vdev->dev, "Error %d while deleting queue %d\n",
ret, index);
vring_del_virtqueue(vq);
@@ -892,6 +894,28 @@ out_free:
static u8 virtio_ccw_get_status(struct virtio_device *vdev)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+ u8 old_status = *vcdev->status;
+ struct ccw1 *ccw;
+
+ if (vcdev->revision < 1)
+ return *vcdev->status;
+
+ ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+ if (!ccw)
+ return old_status;
+
+ ccw->cmd_code = CCW_CMD_READ_STATUS;
+ ccw->flags = 0;
+ ccw->count = sizeof(*vcdev->status);
+ ccw->cda = (__u32)(unsigned long)vcdev->status;
+ ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
+/*
+ * If the channel program failed (should only happen if the device
+ * was hotunplugged, and then we clean up via the machine check
+ * handler anyway), vcdev->status was not overwritten and we just
+ * return the old status, which is fine.
+*/
+ kfree(ccw);
return *vcdev->status;
}
@@ -920,7 +944,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
kfree(ccw);
}
-static struct virtio_config_ops virtio_ccw_config_ops = {
+static const struct virtio_config_ops virtio_ccw_config_ops = {
.get_features = virtio_ccw_get_features,
.finalize_features = virtio_ccw_finalize_features,
.get = virtio_ccw_get_config,
@@ -987,6 +1011,7 @@ static void virtio_ccw_check_activity(struct virtio_ccw_device *vcdev,
case VIRTIO_CCW_DOING_READ_CONFIG:
case VIRTIO_CCW_DOING_WRITE_CONFIG:
case VIRTIO_CCW_DOING_WRITE_STATUS:
+ case VIRTIO_CCW_DOING_READ_STATUS:
case VIRTIO_CCW_DOING_SET_VQ:
case VIRTIO_CCW_DOING_SET_IND:
case VIRTIO_CCW_DOING_SET_CONF_IND:
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index d9e1521..5caf5f3 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -64,9 +64,9 @@ int max_rport_logins = BFA_FCS_MAX_RPORT_LOGINS;
u32 bfi_image_cb_size, bfi_image_ct_size, bfi_image_ct2_size;
u32 *bfi_image_cb, *bfi_image_ct, *bfi_image_ct2;
-#define BFAD_FW_FILE_CB "cbfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT "ctfw-3.2.3.0.bin"
-#define BFAD_FW_FILE_CT2 "ct2fw-3.2.3.0.bin"
+#define BFAD_FW_FILE_CB "cbfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT "ctfw-3.2.5.1.bin"
+#define BFAD_FW_FILE_CT2 "ct2fw-3.2.5.1.bin"
static u32 *bfad_load_fwimg(struct pci_dev *pdev);
static void bfad_free_fwimg(void);
diff --git a/drivers/scsi/bfa/bfad_bsg.c b/drivers/scsi/bfa/bfad_bsg.c
index a9a0016..b2e8c0d 100644
--- a/drivers/scsi/bfa/bfad_bsg.c
+++ b/drivers/scsi/bfa/bfad_bsg.c
@@ -3363,7 +3363,7 @@ bfad_im_bsg_els_ct_request(struct bsg_job *job)
struct bfad_fcxp *drv_fcxp;
struct bfa_fcs_lport_s *fcs_port;
struct bfa_fcs_rport_s *fcs_rport;
- struct fc_bsg_request *bsg_request = bsg_request;
+ struct fc_bsg_request *bsg_request = job->request;
struct fc_bsg_reply *bsg_reply = job->reply;
uint32_t command_type = bsg_request->msgcode;
unsigned long flags;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index f9e8620..cfcfff4 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -58,7 +58,7 @@
#ifdef BFA_DRIVER_VERSION
#define BFAD_DRIVER_VERSION BFA_DRIVER_VERSION
#else
-#define BFAD_DRIVER_VERSION "3.2.25.0"
+#define BFAD_DRIVER_VERSION "3.2.25.1"
#endif
#define BFAD_PROTO_NAME FCPI_NAME
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 9ddc920..9e4b770 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -248,6 +248,7 @@ struct fnic {
struct completion *remove_wait; /* device remove thread blocks */
atomic_t in_flight; /* io counter */
+ bool internal_reset_inprogress;
u32 _reserved; /* fill hole */
unsigned long state_flags; /* protected by host lock */
enum fnic_state state;
diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
index 2544a37..adb3d58 100644
--- a/drivers/scsi/fnic/fnic_scsi.c
+++ b/drivers/scsi/fnic/fnic_scsi.c
@@ -2581,6 +2581,19 @@ int fnic_host_reset(struct scsi_cmnd *sc)
unsigned long wait_host_tmo;
struct Scsi_Host *shost = sc->device->host;
struct fc_lport *lp = shost_priv(shost);
+ struct fnic *fnic = lport_priv(lp);
+ unsigned long flags;
+
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ if (fnic->internal_reset_inprogress == 0) {
+ fnic->internal_reset_inprogress = 1;
+ } else {
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
+ FNIC_SCSI_DBG(KERN_DEBUG, fnic->lport->host,
+ "host reset in progress skipping another host reset\n");
+ return SUCCESS;
+ }
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
/*
* If fnic_reset is successful, wait for fabric login to complete
@@ -2601,6 +2614,9 @@ int fnic_host_reset(struct scsi_cmnd *sc)
}
}
+ spin_lock_irqsave(&fnic->fnic_lock, flags);
+ fnic->internal_reset_inprogress = 0;
+ spin_unlock_irqrestore(&fnic->fnic_lock, flags);
return ret;
}
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 3d3768a..99b747c 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -46,6 +46,7 @@
#define INITIAL_SRP_LIMIT 800
#define DEFAULT_MAX_SECTORS 256
+#define MAX_TXU 1024 * 1024
static uint max_vdma_size = MAX_H_COPY_RDMA;
@@ -1391,7 +1392,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
}
info = dma_alloc_coherent(&vscsi->dma_dev->dev, sizeof(*info), &token,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!info) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target);
@@ -1443,7 +1444,7 @@ static long ibmvscsis_adapter_info(struct scsi_info *vscsi,
info->mad_version = cpu_to_be32(MAD_VERSION_1);
info->os_type = cpu_to_be32(LINUX);
memset(&info->port_max_txu[0], 0, sizeof(info->port_max_txu));
- info->port_max_txu[0] = cpu_to_be32(128 * PAGE_SIZE);
+ info->port_max_txu[0] = cpu_to_be32(MAX_TXU);
dma_wmb();
rc = h_copy_rdma(sizeof(*info), vscsi->dds.window[LOCAL].liobn,
@@ -1509,7 +1510,7 @@ static int ibmvscsis_cap_mad(struct scsi_info *vscsi, struct iu_entry *iue)
}
cap = dma_alloc_coherent(&vscsi->dma_dev->dev, olen, &token,
- GFP_KERNEL);
+ GFP_ATOMIC);
if (!cap) {
dev_err(&vscsi->dev, "bad dma_alloc_coherent %p\n",
iue->target);
@@ -3585,7 +3586,7 @@ static int ibmvscsis_write_pending(struct se_cmd *se_cmd)
1, 1);
if (rc) {
pr_err("srp_transfer_data() failed: %d\n", rc);
- return -EAGAIN;
+ return -EIO;
}
/*
* We now tell TCM to add this WRITE CDB directly into the TCM storage
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 236e4e5..7b6bd8e 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -3590,12 +3590,14 @@ lpfc_els_free_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *elsiocb)
} else {
buf_ptr1 = (struct lpfc_dmabuf *) elsiocb->context2;
lpfc_els_free_data(phba, buf_ptr1);
+ elsiocb->context2 = NULL;
}
}
if (elsiocb->context3) {
buf_ptr = (struct lpfc_dmabuf *) elsiocb->context3;
lpfc_els_free_bpl(phba, buf_ptr);
+ elsiocb->context3 = NULL;
}
lpfc_sli_release_iocbq(phba, elsiocb);
return 0;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 4faa767..a78a3df 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -5954,18 +5954,25 @@ lpfc_sli4_alloc_resource_identifiers(struct lpfc_hba *phba)
free_vfi_bmask:
kfree(phba->sli4_hba.vfi_bmask);
+ phba->sli4_hba.vfi_bmask = NULL;
free_xri_ids:
kfree(phba->sli4_hba.xri_ids);
+ phba->sli4_hba.xri_ids = NULL;
free_xri_bmask:
kfree(phba->sli4_hba.xri_bmask);
+ phba->sli4_hba.xri_bmask = NULL;
free_vpi_ids:
kfree(phba->vpi_ids);
+ phba->vpi_ids = NULL;
free_vpi_bmask:
kfree(phba->vpi_bmask);
+ phba->vpi_bmask = NULL;
free_rpi_ids:
kfree(phba->sli4_hba.rpi_ids);
+ phba->sli4_hba.rpi_ids = NULL;
free_rpi_bmask:
kfree(phba->sli4_hba.rpi_bmask);
+ phba->sli4_hba.rpi_bmask = NULL;
err_exit:
return rc;
}
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 394fe13..dcb33f4 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -393,6 +393,7 @@ struct MPT3SAS_TARGET {
* @eedp_enable: eedp support enable bit
* @eedp_type: 0(type_1), 1(type_2), 2(type_3)
* @eedp_block_length: block size
+ * @ata_command_pending: SATL passthrough outstanding for device
*/
struct MPT3SAS_DEVICE {
struct MPT3SAS_TARGET *sas_target;
@@ -404,6 +405,17 @@ struct MPT3SAS_DEVICE {
u8 ignore_delay_remove;
/* Iopriority Command Handling */
u8 ncq_prio_enable;
+ /*
+ * Bug workaround for SATL handling: the mpt2/3sas firmware
+ * doesn't return BUSY or TASK_SET_FULL for subsequent
+ * commands while a SATL pass through is in operation as the
+ * spec requires, it simply does nothing with them until the
+ * pass through completes, causing them possibly to timeout if
+ * the passthrough is a long executing command (like format or
+ * secure erase). This variable allows us to do the right
+ * thing while a SATL command is pending.
+ */
+ unsigned long ata_command_pending;
};
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index b5c966e..75f3fce 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -3899,9 +3899,18 @@ _scsih_temp_threshold_events(struct MPT3SAS_ADAPTER *ioc,
}
}
-static inline bool ata_12_16_cmd(struct scsi_cmnd *scmd)
+static int _scsih_set_satl_pending(struct scsi_cmnd *scmd, bool pending)
{
- return (scmd->cmnd[0] == ATA_12 || scmd->cmnd[0] == ATA_16);
+ struct MPT3SAS_DEVICE *priv = scmd->device->hostdata;
+
+ if (scmd->cmnd[0] != ATA_12 && scmd->cmnd[0] != ATA_16)
+ return 0;
+
+ if (pending)
+ return test_and_set_bit(0, &priv->ata_command_pending);
+
+ clear_bit(0, &priv->ata_command_pending);
+ return 0;
}
/**
@@ -3925,9 +3934,7 @@ _scsih_flush_running_cmds(struct MPT3SAS_ADAPTER *ioc)
if (!scmd)
continue;
count++;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device,
- SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpt3sas_base_free_smid(ioc, smid);
scsi_dma_unmap(scmd);
if (ioc->pci_error_recovery)
@@ -4063,13 +4070,6 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
if (ioc->logging_level & MPT_DEBUG_SCSI)
scsi_print_command(scmd);
- /*
- * Lock the device for any subsequent command until command is
- * done.
- */
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_block(scmd->device);
-
sas_device_priv_data = scmd->device->hostdata;
if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
scmd->result = DID_NO_CONNECT << 16;
@@ -4083,6 +4083,19 @@ scsih_qcmd(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
return 0;
}
+ /*
+ * Bug work around for firmware SATL handling. The loop
+ * is based on atomic operations and ensures consistency
+ * since we're lockless at this point
+ */
+ do {
+ if (test_bit(0, &sas_device_priv_data->ata_command_pending)) {
+ scmd->result = SAM_STAT_BUSY;
+ scmd->scsi_done(scmd);
+ return 0;
+ }
+ } while (_scsih_set_satl_pending(scmd, true));
+
sas_target_priv_data = sas_device_priv_data->sas_target;
/* invalid device handle */
@@ -4650,8 +4663,7 @@ _scsih_io_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index, u32 reply)
if (scmd == NULL)
return 1;
- if (ata_12_16_cmd(scmd))
- scsi_internal_device_unblock(scmd->device, SDEV_RUNNING);
+ _scsih_set_satl_pending(scmd, false);
mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
diff --git a/drivers/scsi/qedi/Kconfig b/drivers/scsi/qedi/Kconfig
index 23ca8a2..2133145 100644
--- a/drivers/scsi/qedi/Kconfig
+++ b/drivers/scsi/qedi/Kconfig
@@ -1,6 +1,6 @@
config QEDI
tristate "QLogic QEDI 25/40/100Gb iSCSI Initiator Driver Support"
- depends on PCI && SCSI
+ depends on PCI && SCSI && UIO
depends on QED
select SCSI_ISCSI_ATTRS
select QED_LL2
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 47eb4d5..f201f40 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -243,12 +243,15 @@ qla2x00_sysfs_read_optrom(struct file *filp, struct kobject *kobj,
struct qla_hw_data *ha = vha->hw;
ssize_t rval = 0;
+ mutex_lock(&ha->optrom_mutex);
+
if (ha->optrom_state != QLA_SREADING)
- return 0;
+ goto out;
- mutex_lock(&ha->optrom_mutex);
rval = memory_read_from_buffer(buf, count, &off, ha->optrom_buffer,
ha->optrom_region_size);
+
+out:
mutex_unlock(&ha->optrom_mutex);
return rval;
@@ -263,14 +266,19 @@ qla2x00_sysfs_write_optrom(struct file *filp, struct kobject *kobj,
struct device, kobj)));
struct qla_hw_data *ha = vha->hw;
- if (ha->optrom_state != QLA_SWRITING)
+ mutex_lock(&ha->optrom_mutex);
+
+ if (ha->optrom_state != QLA_SWRITING) {
+ mutex_unlock(&ha->optrom_mutex);
return -EINVAL;
- if (off > ha->optrom_region_size)
+ }
+ if (off > ha->optrom_region_size) {
+ mutex_unlock(&ha->optrom_mutex);
return -ERANGE;
+ }
if (off + count > ha->optrom_region_size)
count = ha->optrom_region_size - off;
- mutex_lock(&ha->optrom_mutex);
memcpy(&ha->optrom_buffer[off], buf, count);
mutex_unlock(&ha->optrom_mutex);
@@ -753,7 +761,6 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
struct device, kobj)));
int type;
- int rval = 0;
port_id_t did;
type = simple_strtol(buf, NULL, 10);
@@ -767,7 +774,7 @@ qla2x00_issue_logo(struct file *filp, struct kobject *kobj,
ql_log(ql_log_info, vha, 0x70e4, "%s: %d\n", __func__, type);
- rval = qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
+ qla24xx_els_dcmd_iocb(vha, ELS_DCMD_LOGO, did);
return count;
}
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index f7df01b..5b1287a 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1556,7 +1556,8 @@ typedef struct {
struct atio {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
};
@@ -2732,7 +2733,7 @@ struct isp_operations {
#define QLA_MSIX_FW_MODE(m) (((m) & (BIT_7|BIT_8|BIT_9)) >> 7)
#define QLA_MSIX_FW_MODE_1(m) (QLA_MSIX_FW_MODE(m) == 1)
-#define QLA_MSIX_DEFAULT 0x00
+#define QLA_BASE_VECTORS 2 /* default + RSP */
#define QLA_MSIX_RSP_Q 0x01
#define QLA_ATIO_VECTOR 0x02
#define QLA_MSIX_QPAIR_MULTIQ_RSP_Q 0x03
@@ -2754,7 +2755,6 @@ struct qla_msix_entry {
uint16_t entry;
char name[30];
void *handle;
- struct irq_affinity_notify irq_notify;
int cpuid;
};
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 632d5f3..7b6317c 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1191,7 +1191,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
/* Wait for soft-reset to complete. */
RD_REG_DWORD(&reg->ctrl_status);
- for (cnt = 0; cnt < 6000000; cnt++) {
+ for (cnt = 0; cnt < 60; cnt++) {
barrier();
if ((RD_REG_DWORD(&reg->ctrl_status) &
CSRX_ISP_SOFT_RESET) == 0)
@@ -1234,7 +1234,7 @@ qla24xx_reset_risc(scsi_qla_host_t *vha)
RD_REG_DWORD(&reg->hccr);
RD_REG_WORD(&reg->mailbox0);
- for (cnt = 6000000; RD_REG_WORD(&reg->mailbox0) != 0 &&
+ for (cnt = 60; RD_REG_WORD(&reg->mailbox0) != 0 &&
rval == QLA_SUCCESS; cnt--) {
barrier();
if (cnt)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 5093ca9..dc88a09 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -19,10 +19,6 @@ static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
sts_entry_t *);
-static void qla_irq_affinity_notify(struct irq_affinity_notify *,
- const cpumask_t *);
-static void qla_irq_affinity_release(struct kref *);
-
/**
* qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -2496,6 +2492,10 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
if (pkt->entry_status & RF_BUSY)
res = DID_BUS_BUSY << 16;
+ if (pkt->entry_type == NOTIFY_ACK_TYPE &&
+ pkt->handle == QLA_TGT_SKIP_HANDLE)
+ return;
+
sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
if (sp) {
sp->done(ha, sp, res);
@@ -2572,14 +2572,6 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
if (!vha->flags.online)
return;
- if (rsp->msix && rsp->msix->cpuid != smp_processor_id()) {
- /* if kernel does not notify qla of IRQ's CPU change,
- * then set it here.
- */
- rsp->msix->cpuid = smp_processor_id();
- ha->tgt.rspq_vector_cpuid = rsp->msix->cpuid;
- }
-
while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
@@ -3018,13 +3010,20 @@ static struct qla_init_msix_entry qla82xx_msix_entries[] = {
static int
qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
{
-#define MIN_MSIX_COUNT 2
int i, ret;
struct qla_msix_entry *qentry;
scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
+ struct irq_affinity desc = {
+ .pre_vectors = QLA_BASE_VECTORS,
+ };
+
+ if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha))
+ desc.pre_vectors++;
+
+ ret = pci_alloc_irq_vectors_affinity(ha->pdev, QLA_BASE_VECTORS,
+ ha->msix_count, PCI_IRQ_MSIX | PCI_IRQ_AFFINITY,
+ &desc);
- ret = pci_alloc_irq_vectors(ha->pdev, MIN_MSIX_COUNT, ha->msix_count,
- PCI_IRQ_MSIX | PCI_IRQ_AFFINITY);
if (ret < 0) {
ql_log(ql_log_fatal, vha, 0x00c7,
"MSI-X: Failed to enable support, "
@@ -3069,13 +3068,10 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
qentry->have_irq = 0;
qentry->in_use = 0;
qentry->handle = NULL;
- qentry->irq_notify.notify = qla_irq_affinity_notify;
- qentry->irq_notify.release = qla_irq_affinity_release;
- qentry->cpuid = -1;
}
/* Enable MSI-X vectors for the base queue */
- for (i = 0; i < (QLA_MSIX_RSP_Q + 1); i++) {
+ for (i = 0; i < QLA_BASE_VECTORS; i++) {
qentry = &ha->msix_entries[i];
qentry->handle = rsp;
rsp->msix = qentry;
@@ -3093,18 +3089,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
goto msix_register_fail;
qentry->have_irq = 1;
qentry->in_use = 1;
-
- /* Register for CPU affinity notification. */
- irq_set_affinity_notifier(qentry->vector, &qentry->irq_notify);
-
- /* Schedule work (ie. trigger a notification) to read cpu
- * mask for this specific irq.
- * kref_get is required because
- * irq_affinity_notify() will do
- * kref_put().
- */
- kref_get(&qentry->irq_notify.kref);
- schedule_work(&qentry->irq_notify.work);
}
/*
@@ -3301,49 +3285,3 @@ int qla25xx_request_irq(struct qla_hw_data *ha, struct qla_qpair *qpair,
msix->handle = qpair;
return ret;
}
-
-
-/* irq_set_affinity/irqbalance will trigger notification of cpu mask update */
-static void qla_irq_affinity_notify(struct irq_affinity_notify *notify,
- const cpumask_t *mask)
-{
- struct qla_msix_entry *e =
- container_of(notify, struct qla_msix_entry, irq_notify);
- struct qla_hw_data *ha;
- struct scsi_qla_host *base_vha;
- struct rsp_que *rsp = e->handle;
-
- /* user is recommended to set mask to just 1 cpu */
- e->cpuid = cpumask_first(mask);
-
- ha = rsp->hw;
- base_vha = pci_get_drvdata(ha->pdev);
-
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host %ld : vector %d cpu %d \n", __func__,
- base_vha->host_no, e->vector, e->cpuid);
-
- if (e->have_irq) {
- if ((IS_QLA83XX(ha) || IS_QLA27XX(ha)) &&
- (e->entry == QLA83XX_RSPQ_MSIX_ENTRY_NUMBER)) {
- ha->tgt.rspq_vector_cpuid = e->cpuid;
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: rspq vector %d cpu %d runtime change\n",
- __func__, base_vha->host_no, e->vector, e->cpuid);
- }
- }
-}
-
-static void qla_irq_affinity_release(struct kref *ref)
-{
- struct irq_affinity_notify *notify =
- container_of(ref, struct irq_affinity_notify, kref);
- struct qla_msix_entry *e =
- container_of(notify, struct qla_msix_entry, irq_notify);
- struct rsp_que *rsp = e->handle;
- struct scsi_qla_host *base_vha = pci_get_drvdata(rsp->hw->pdev);
-
- ql_dbg(ql_dbg_init, base_vha, 0xffff,
- "%s: host%ld: vector %d cpu %d\n", __func__,
- base_vha->host_no, e->vector, e->cpuid);
-}
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 2819ceb..67f64db 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -10,7 +10,7 @@
#include <linux/delay.h>
#include <linux/gfp.h>
-struct rom_cmd {
+static struct rom_cmd {
uint16_t cmd;
} rom_cmds[] = {
{ MBC_LOAD_RAM },
@@ -101,12 +101,12 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
return QLA_FUNCTION_TIMEOUT;
}
- /* if PCI error, then avoid mbx processing.*/
- if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
+ /* if PCI error, then avoid mbx processing.*/
+ if (test_bit(PCI_ERR, &base_vha->dpc_flags)) {
ql_log(ql_log_warn, vha, 0x1191,
"PCI error, exiting.\n");
return QLA_FUNCTION_TIMEOUT;
- }
+ }
reg = ha->iobase;
io_lock_on = base_vha->flags.init_done;
@@ -323,20 +323,33 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
}
} else {
- uint16_t mb0;
- uint32_t ictrl;
+ uint16_t mb[8];
+ uint32_t ictrl, host_status, hccr;
uint16_t w;
if (IS_FWI2_CAPABLE(ha)) {
- mb0 = RD_REG_WORD(&reg->isp24.mailbox0);
+ mb[0] = RD_REG_WORD(&reg->isp24.mailbox0);
+ mb[1] = RD_REG_WORD(&reg->isp24.mailbox1);
+ mb[2] = RD_REG_WORD(&reg->isp24.mailbox2);
+ mb[3] = RD_REG_WORD(&reg->isp24.mailbox3);
+ mb[7] = RD_REG_WORD(&reg->isp24.mailbox7);
ictrl = RD_REG_DWORD(&reg->isp24.ictrl);
+ host_status = RD_REG_DWORD(&reg->isp24.host_status);
+ hccr = RD_REG_DWORD(&reg->isp24.hccr);
+
+ ql_log(ql_log_warn, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0-3]=[0x%x 0x%x 0x%x 0x%x] mb7 0x%x host_status 0x%x hccr 0x%x\n",
+ command, ictrl, jiffies, mb[0], mb[1], mb[2], mb[3],
+ mb[7], host_status, hccr);
+
} else {
- mb0 = RD_MAILBOX_REG(ha, &reg->isp, 0);
+ mb[0] = RD_MAILBOX_REG(ha, &reg->isp, 0);
ictrl = RD_REG_WORD(&reg->isp.ictrl);
+ ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
+ "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
+ "mb[0]=0x%x\n", command, ictrl, jiffies, mb[0]);
}
- ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
- "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
- "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
/* Capture FW dump only, if PCI device active */
@@ -684,7 +697,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
mbx_cmd_t mc;
mbx_cmd_t *mcp = &mc;
struct qla_hw_data *ha = vha->hw;
- int configured_count;
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111a,
"Entered %s.\n", __func__);
@@ -707,7 +719,6 @@ qla_set_exlogin_mem_cfg(scsi_qla_host_t *vha, dma_addr_t phys_addr)
/*EMPTY*/
ql_dbg(ql_dbg_mbx, vha, 0x111b, "Failed=%x.\n", rval);
} else {
- configured_count = mcp->mb[11];
ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x118c,
"Done %s.\n", __func__);
}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 54380b4..0a1723c 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -42,6 +42,11 @@ static int qla82xx_crb_table_initialized;
(crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \
QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20)
+const int MD_MIU_TEST_AGT_RDDATA[] = {
+ 0x410000A8, 0x410000AC,
+ 0x410000B8, 0x410000BC
+};
+
static void qla82xx_crb_addr_transform_setup(void)
{
qla82xx_crb_addr_transform(XDMA);
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 6201dce..77624ea 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -1176,8 +1176,7 @@ struct qla82xx_md_entry_queue {
#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
-static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
- 0x410000B8, 0x410000BC };
+extern const int MD_MIU_TEST_AGT_RDDATA[4];
#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
diff --git a/drivers/scsi/qla2xxx/qla_nx2.c b/drivers/scsi/qla2xxx/qla_nx2.c
index 007192d..dc1ec9b 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.c
+++ b/drivers/scsi/qla2xxx/qla_nx2.c
@@ -15,6 +15,23 @@
#define TIMEOUT_100_MS 100
+static const uint32_t qla8044_reg_tbl[] = {
+ QLA8044_PEG_HALT_STATUS1,
+ QLA8044_PEG_HALT_STATUS2,
+ QLA8044_PEG_ALIVE_COUNTER,
+ QLA8044_CRB_DRV_ACTIVE,
+ QLA8044_CRB_DEV_STATE,
+ QLA8044_CRB_DRV_STATE,
+ QLA8044_CRB_DRV_SCRATCH,
+ QLA8044_CRB_DEV_PART_INFO1,
+ QLA8044_CRB_IDC_VER_MAJOR,
+ QLA8044_FW_VER_MAJOR,
+ QLA8044_FW_VER_MINOR,
+ QLA8044_FW_VER_SUB,
+ QLA8044_CMDPEG_STATE,
+ QLA8044_ASIC_TEMP,
+};
+
/* 8044 Flash Read/Write functions */
uint32_t
qla8044_rd_reg(struct qla_hw_data *ha, ulong addr)
diff --git a/drivers/scsi/qla2xxx/qla_nx2.h b/drivers/scsi/qla2xxx/qla_nx2.h
index 02fe3c4..83c1b7e 100644
--- a/drivers/scsi/qla2xxx/qla_nx2.h
+++ b/drivers/scsi/qla2xxx/qla_nx2.h
@@ -535,23 +535,6 @@ enum qla_regs {
#define CRB_CMDPEG_CHECK_RETRY_COUNT 60
#define CRB_CMDPEG_CHECK_DELAY 500
-static const uint32_t qla8044_reg_tbl[] = {
- QLA8044_PEG_HALT_STATUS1,
- QLA8044_PEG_HALT_STATUS2,
- QLA8044_PEG_ALIVE_COUNTER,
- QLA8044_CRB_DRV_ACTIVE,
- QLA8044_CRB_DEV_STATE,
- QLA8044_CRB_DRV_STATE,
- QLA8044_CRB_DRV_SCRATCH,
- QLA8044_CRB_DEV_PART_INFO1,
- QLA8044_CRB_IDC_VER_MAJOR,
- QLA8044_FW_VER_MAJOR,
- QLA8044_FW_VER_MINOR,
- QLA8044_FW_VER_SUB,
- QLA8044_CMDPEG_STATE,
- QLA8044_ASIC_TEMP,
-};
-
/* MiniDump Structures */
/* Driver_code is for driver to write some info about the entry
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 8521cfe..0a000ec 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -466,7 +466,7 @@ static void qla2x00_free_queues(struct qla_hw_data *ha)
continue;
rsp = ha->rsp_q_map[cnt];
- clear_bit(cnt, ha->req_qid_map);
+ clear_bit(cnt, ha->rsp_qid_map);
ha->rsp_q_map[cnt] = NULL;
spin_unlock_irqrestore(&ha->hardware_lock, flags);
qla2x00_free_rsp_que(ha, rsp);
@@ -3662,7 +3662,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
sizeof(struct ct6_dsd), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!ctx_cachep)
- goto fail_free_gid_list;
+ goto fail_free_srb_mempool;
}
ha->ctx_mempool = mempool_create_slab_pool(SRB_MIN_REQ,
ctx_cachep);
@@ -3815,7 +3815,7 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
ha->loop_id_map = kzalloc(BITS_TO_LONGS(LOOPID_MAP_SIZE) * sizeof(long),
GFP_KERNEL);
if (!ha->loop_id_map)
- goto fail_async_pd;
+ goto fail_loop_id_map;
else {
qla2x00_set_reserved_loop_ids(ha);
ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0123,
@@ -3824,6 +3824,8 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
return 0;
+fail_loop_id_map:
+ dma_pool_free(ha->s_dma_pool, ha->async_pd, ha->async_pd_dma);
fail_async_pd:
dma_pool_free(ha->s_dma_pool, ha->ex_init_cb, ha->ex_init_cb_dma);
fail_ex_init_cb:
@@ -3851,6 +3853,10 @@ fail_free_ms_iocb:
dma_pool_free(ha->s_dma_pool, ha->ms_iocb, ha->ms_iocb_dma);
ha->ms_iocb = NULL;
ha->ms_iocb_dma = 0;
+
+ if (ha->sns_cmd)
+ dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
+ ha->sns_cmd, ha->sns_cmd_dma);
fail_dma_pool:
if (IS_QLA82XX(ha) || ql2xenabledif) {
dma_pool_destroy(ha->fcp_cmnd_dma_pool);
@@ -3868,10 +3874,12 @@ fail_free_nvram:
kfree(ha->nvram);
ha->nvram = NULL;
fail_free_ctx_mempool:
- mempool_destroy(ha->ctx_mempool);
+ if (ha->ctx_mempool)
+ mempool_destroy(ha->ctx_mempool);
ha->ctx_mempool = NULL;
fail_free_srb_mempool:
- mempool_destroy(ha->srb_mempool);
+ if (ha->srb_mempool)
+ mempool_destroy(ha->srb_mempool);
ha->srb_mempool = NULL;
fail_free_gid_list:
dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index bff9689..e4fda84 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -668,11 +668,9 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
{
struct qla_hw_data *ha = vha->hw;
struct qla_tgt_sess *sess = NULL;
- uint32_t unpacked_lun, lun = 0;
uint16_t loop_id;
int res = 0;
struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
- struct atio_from_isp *a = (struct atio_from_isp *)iocb;
unsigned long flags;
loop_id = le16_to_cpu(n->u.isp24.nport_handle);
@@ -725,11 +723,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
"loop_id %d)\n", vha->host_no, sess, sess->port_name,
mcmd, loop_id);
- lun = a->u.isp24.fcp_cmnd.lun;
- unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
-
- return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
- iocb, QLA24XX_MGMT_SEND_NACK);
+ return qlt_issue_task_mgmt(sess, 0, mcmd, iocb, QLA24XX_MGMT_SEND_NACK);
}
/* ha->tgt.sess_lock supposed to be held on entry */
@@ -3067,7 +3061,7 @@ static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
pkt->entry_type = NOTIFY_ACK_TYPE;
pkt->entry_count = 1;
- pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
+ pkt->handle = QLA_TGT_SKIP_HANDLE;
nack = (struct nack_to_isp *)pkt;
nack->ox_id = ntfy->ox_id;
@@ -3110,6 +3104,9 @@ static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
#if 0 /* Todo */
if (rc == -ENOMEM)
qlt_alloc_qfull_cmd(vha, imm, 0, 0);
+#else
+ if (rc) {
+ }
#endif
goto done;
}
@@ -6457,12 +6454,29 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha, uint8_t ha_locked)
if (!vha->flags.online)
return;
- while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
+ while ((ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) ||
+ fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr)) {
pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
cnt = pkt->u.raw.entry_count;
- qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt,
- ha_locked);
+ if (unlikely(fcpcmd_is_corrupted(ha->tgt.atio_ring_ptr))) {
+ /*
+ * This packet is corrupted. The header + payload
+ * can not be trusted. There is no point in passing
+ * it further up.
+ */
+ ql_log(ql_log_warn, vha, 0xffff,
+ "corrupted fcp frame SID[%3phN] OXID[%04x] EXCG[%x] %64phN\n",
+ pkt->u.isp24.fcp_hdr.s_id,
+ be16_to_cpu(pkt->u.isp24.fcp_hdr.ox_id),
+ le32_to_cpu(pkt->u.isp24.exchange_addr), pkt);
+
+ adjust_corrupted_atio(pkt);
+ qlt_send_term_exchange(vha, NULL, pkt, ha_locked, 0);
+ } else {
+ qlt_24xx_atio_pkt_all_vps(vha,
+ (struct atio_from_isp *)pkt, ha_locked);
+ }
for (i = 0; i < cnt; i++) {
ha->tgt.atio_ring_index++;
@@ -6545,6 +6559,13 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
/* Disable Full Login after LIP */
nv->host_p &= cpu_to_le32(~BIT_10);
+
+ /*
+ * clear BIT 15 explicitly as we have seen at least
+ * a couple of instances where this was set and this
+ * was causing the firmware to not be initialized.
+ */
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
/* Enable target PRLI control */
nv->firmware_options_2 |= cpu_to_le32(BIT_14);
} else {
@@ -6560,9 +6581,6 @@ qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
return;
}
- /* out-of-order frames reassembly */
- nv->firmware_options_3 |= BIT_6|BIT_9;
-
if (ha->tgt.enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
@@ -6629,11 +6647,17 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
/* Disable ini mode, if requested */
if (!qla_ini_mode_enabled(vha))
nv->firmware_options_1 |= cpu_to_le32(BIT_5);
-
/* Disable Full Login after LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_13);
/* Enable initial LIP */
nv->firmware_options_1 &= cpu_to_le32(~BIT_9);
+ /*
+ * clear BIT 15 explicitly as we have seen at
+ * least a couple of instances where this was set
+ * and this was causing the firmware to not be
+ * initialized.
+ */
+ nv->firmware_options_1 &= cpu_to_le32(~BIT_15);
if (ql2xtgt_tape_enable)
/* Enable FC tape support */
nv->firmware_options_2 |= cpu_to_le32(BIT_12);
@@ -6658,9 +6682,6 @@ qlt_81xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_81xx *nv)
return;
}
- /* out-of-order frames reassembly */
- nv->firmware_options_3 |= BIT_6|BIT_9;
-
if (ha->tgt.enable_class_2) {
if (vha->flags.init_done)
fc_host_supported_classes(vha->host) =
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index f26c5f6..0824a81 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -427,13 +427,33 @@ struct atio_from_isp {
struct {
uint8_t entry_type; /* Entry type. */
uint8_t entry_count; /* Entry count. */
- uint8_t data[58];
+ __le16 attr_n_length;
+#define FCP_CMD_LENGTH_MASK 0x0fff
+#define FCP_CMD_LENGTH_MIN 0x38
+ uint8_t data[56];
uint32_t signature;
#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
} raw;
} u;
} __packed;
+static inline int fcpcmd_is_corrupted(struct atio *atio)
+{
+ if (atio->entry_type == ATIO_TYPE7 &&
+ (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
+ return 1;
+ else
+ return 0;
+}
+
+/* adjust corrupted atio so we won't trip over the same entry again. */
+static inline void adjust_corrupted_atio(struct atio_from_isp *atio)
+{
+ atio->u.raw.attr_n_length = cpu_to_le16(FCP_CMD_LENGTH_MIN);
+ atio->u.isp24.fcp_cmnd.add_cdb_len = 0;
+}
+
#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
/*
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index 36935c9..8a58ef3 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -433,6 +433,18 @@ qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
count++;
}
}
+ } else if (QLA_TGT_MODE_ENABLED() &&
+ ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
+ struct qla_hw_data *ha = vha->hw;
+ struct atio *atr = ha->tgt.atio_ring;
+
+ if (atr || !buf) {
+ length = ha->tgt.atio_q_length;
+ qla27xx_insert16(0, buf, len);
+ qla27xx_insert16(length, buf, len);
+ qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
+ count++;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd026,
"%s: unknown queue %x\n", __func__, ent->t263.queue_type);
@@ -676,6 +688,18 @@ qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
count++;
}
}
+ } else if (QLA_TGT_MODE_ENABLED() &&
+ ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
+ struct qla_hw_data *ha = vha->hw;
+ struct atio *atr = ha->tgt.atio_ring_ptr;
+
+ if (atr || !buf) {
+ qla27xx_insert16(0, buf, len);
+ qla27xx_insert16(1, buf, len);
+ qla27xx_insert32(ha->tgt.atio_q_in ?
+ readl(ha->tgt.atio_q_in) : 0, buf, len);
+ count++;
+ }
} else {
ql_dbg(ql_dbg_misc, vha, 0xd02f,
"%s: unknown queue %x\n", __func__, ent->t274.queue_type);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 6643f6f..d925910 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -1800,7 +1800,7 @@ static ssize_t tcm_qla2xxx_wwn_version_show(struct config_item *item,
{
return sprintf(page,
"TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
- UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
}
@@ -1906,7 +1906,7 @@ static int tcm_qla2xxx_register_configfs(void)
int ret;
pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
- UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
+ UTS_RELEASE"\n", QLA2XXX_VERSION, utsname()->sysname,
utsname()->machine);
ret = target_register_template(&tcm_qla2xxx_ops);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
index 37e026a..cf8430b 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.h
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -1,7 +1,6 @@
#include <target/target_core_base.h>
#include <linux/btree.h>
-#define TCM_QLA2XXX_VERSION "v0.1"
/* length of ASCII WWPNs including pad */
#define TCM_QLA2XXX_NAMELEN 32
/*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index c35b6de..e9e1e14 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1018,7 +1018,7 @@ static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb)
count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
BUG_ON(count > sdb->table.nents);
sdb->table.nents = count;
- sdb->length = blk_rq_bytes(req);
+ sdb->length = blk_rq_payload_bytes(req);
return BLKPREP_OK;
}
@@ -2893,7 +2893,7 @@ scsi_internal_device_block(struct scsi_device *sdev)
* request queue.
*/
if (q->mq_ops) {
- blk_mq_stop_hw_queues(q);
+ blk_mq_quiesce_queue(q);
} else {
spin_lock_irqsave(q->queue_lock, flags);
blk_stop_queue(q);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b193304..1f5d92a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -871,11 +871,11 @@ static int sd_setup_write_same_cmnd(struct scsi_cmnd *cmd)
cmd->allowed = SD_MAX_RETRIES;
/*
- * For WRITE_SAME the data transferred in the DATA IN buffer is
+ * For WRITE SAME the data transferred via the DATA OUT buffer is
* different from the amount of data actually written to the target.
*
- * We set up __data_len to the amount of data transferred from the
- * DATA IN buffer so that blk_rq_map_sg set up the proper S/G list
+ * We set up __data_len to the amount of data transferred via the
+ * DATA OUT buffer so that blk_rq_map_sg sets up the proper S/G list
* to transfer a single sector of data first, but then reset it to
* the amount of data to be written right after so that the I/O path
* knows how much to actually write.
@@ -2600,7 +2600,8 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
if (sdp->broken_fua) {
sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
sdkp->DPOFUA = 0;
- } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+ } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw &&
+ !sdkp->device->use_16_for_rw) {
sd_first_printk(KERN_NOTICE, sdkp,
"Uses READ/WRITE(6), disabling FUA\n");
sdkp->DPOFUA = 0;
@@ -2783,13 +2784,21 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp)
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, q);
}
- sdkp->zoned = (buffer[8] >> 4) & 3;
- if (sdkp->zoned == 1)
- q->limits.zoned = BLK_ZONED_HA;
- else if (sdkp->device->type == TYPE_ZBC)
+ if (sdkp->device->type == TYPE_ZBC) {
+ /* Host-managed */
q->limits.zoned = BLK_ZONED_HM;
- else
- q->limits.zoned = BLK_ZONED_NONE;
+ } else {
+ sdkp->zoned = (buffer[8] >> 4) & 3;
+ if (sdkp->zoned == 1)
+ /* Host-aware */
+ q->limits.zoned = BLK_ZONED_HA;
+ else
+ /*
+ * Treat drive-managed devices as
+ * regular block devices.
+ */
+ q->limits.zoned = BLK_ZONED_NONE;
+ }
if (blk_queue_is_zoned(q) && sdkp->first_scan)
sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n",
q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware");
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 8c9a35c..50adabb 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -587,7 +587,7 @@ static void ses_match_to_enclosure(struct enclosure_device *edev,
ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
- if (scsi_is_sas_rphy(&sdev->sdev_gendev))
+ if (scsi_is_sas_rphy(sdev->sdev_target->dev.parent))
efd.addr = sas_get_address(sdev);
if (efd.addr) {
diff --git a/drivers/scsi/snic/snic_main.c b/drivers/scsi/snic/snic_main.c
index 396b32d..7cf70aa 100644
--- a/drivers/scsi/snic/snic_main.c
+++ b/drivers/scsi/snic/snic_main.c
@@ -591,6 +591,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "dflt sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_res;
}
@@ -601,6 +602,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "max sgl pool creation failed\n");
+ ret = -ENOMEM;
goto err_free_dflt_sgl_pool;
}
@@ -611,6 +613,7 @@ snic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (!pool) {
SNIC_HOST_ERR(shost, "snic tmreq info pool creation failed.\n");
+ ret = -ENOMEM;
goto err_free_max_sgl_pool;
}
diff --git a/drivers/soc/ti/wkup_m3_ipc.c b/drivers/soc/ti/wkup_m3_ipc.c
index 8823cc8..5bb3760 100644
--- a/drivers/soc/ti/wkup_m3_ipc.c
+++ b/drivers/soc/ti/wkup_m3_ipc.c
@@ -459,6 +459,7 @@ static int wkup_m3_ipc_probe(struct platform_device *pdev)
if (IS_ERR(task)) {
dev_err(dev, "can't create rproc_boot thread\n");
+ ret = PTR_ERR(task);
goto err_put_rproc;
}
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index ec4aa25..2922a99 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -378,6 +378,7 @@ config SPI_FSL_SPI
config SPI_FSL_DSPI
tristate "Freescale DSPI controller"
select REGMAP_MMIO
+ depends on HAS_DMA
depends on SOC_VF610 || SOC_LS1021A || ARCH_LAYERSCAPE || COMPILE_TEST
help
This enables support for the Freescale DSPI controller in master
diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c
index e89da0a..0314c6b9 100644
--- a/drivers/spi/spi-armada-3700.c
+++ b/drivers/spi/spi-armada-3700.c
@@ -800,7 +800,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
struct spi_master *master;
struct a3700_spi *spi;
u32 num_cs = 0;
- int ret = 0;
+ int irq, ret = 0;
master = spi_alloc_master(dev, sizeof(*spi));
if (!master) {
@@ -825,7 +825,7 @@ static int a3700_spi_probe(struct platform_device *pdev)
master->unprepare_message = a3700_spi_unprepare_message;
master->set_cs = a3700_spi_set_cs;
master->flags = SPI_MASTER_HALF_DUPLEX;
- master->mode_bits |= (SPI_RX_DUAL | SPI_RX_DUAL |
+ master->mode_bits |= (SPI_RX_DUAL | SPI_TX_DUAL |
SPI_RX_QUAD | SPI_TX_QUAD);
platform_set_drvdata(pdev, master);
@@ -846,12 +846,13 @@ static int a3700_spi_probe(struct platform_device *pdev)
goto error;
}
- spi->irq = platform_get_irq(pdev, 0);
- if (spi->irq < 0) {
- dev_err(dev, "could not get irq: %d\n", spi->irq);
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "could not get irq: %d\n", irq);
ret = -ENXIO;
goto error;
}
+ spi->irq = irq;
init_completion(&spi->done);
diff --git a/drivers/spi/spi-axi-spi-engine.c b/drivers/spi/spi-axi-spi-engine.c
index 319225d..6ab4c77 100644
--- a/drivers/spi/spi-axi-spi-engine.c
+++ b/drivers/spi/spi-axi-spi-engine.c
@@ -494,7 +494,8 @@ static int spi_engine_probe(struct platform_device *pdev)
SPI_ENGINE_VERSION_MAJOR(version),
SPI_ENGINE_VERSION_MINOR(version),
SPI_ENGINE_VERSION_PATCH(version));
- return -ENODEV;
+ ret = -ENODEV;
+ goto err_put_master;
}
spi_engine->clk = devm_clk_get(&pdev->dev, "s_axi_aclk");
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index d36c11b..02fb967 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -646,7 +646,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = t->rx_buf;
t->rx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_FROM_DEVICE);
- if (!t->rx_dma) {
+ if (dma_mapping_error(&spi->dev, !t->rx_dma)) {
ret = -EFAULT;
goto err_rx_map;
}
@@ -660,7 +660,7 @@ static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
buf = (void *)t->tx_buf;
t->tx_dma = dma_map_single(&spi->dev, buf,
t->len, DMA_TO_DEVICE);
- if (!t->tx_dma) {
+ if (dma_mapping_error(&spi->dev, t->tx_dma)) {
ret = -EFAULT;
goto err_tx_map;
}
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
index e31971f9..837cb8d 100644
--- a/drivers/spi/spi-dw-mid.c
+++ b/drivers/spi/spi-dw-mid.c
@@ -274,11 +274,11 @@ static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
static void mid_spi_dma_stop(struct dw_spi *dws)
{
if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_all(dws->txchan);
+ dmaengine_terminate_sync(dws->txchan);
clear_bit(TX_BUSY, &dws->dma_chan_busy);
}
if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
- dmaengine_terminate_all(dws->rxchan);
+ dmaengine_terminate_sync(dws->rxchan);
clear_bit(RX_BUSY, &dws->dma_chan_busy);
}
}
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index b715a26..054012f 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -107,7 +107,10 @@ static const struct file_operations dw_spi_regs_ops = {
static int dw_spi_debugfs_init(struct dw_spi *dws)
{
- dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+ char name[128];
+
+ snprintf(name, 128, "dw_spi-%s", dev_name(&dws->master->dev));
+ dws->debugfs = debugfs_create_dir(name, NULL);
if (!dws->debugfs)
return -ENOMEM;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index dd7b5b4..d6239fa 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1690,6 +1690,7 @@ static int pxa2xx_spi_probe(struct platform_device *pdev)
pxa2xx_spi_write(drv_data, SSCR1, tmp);
tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
default:
tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
SSCR1_TxTresh(TX_THRESH_DFLT);
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index 0012ad0..1f00eeb 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -973,14 +973,16 @@ static const struct sh_msiof_chipdata r8a779x_data = {
};
static const struct of_device_id sh_msiof_match[] = {
- { .compatible = "renesas,sh-msiof", .data = &sh_data },
{ .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
{ .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
+ { .compatible = "renesas,rcar-gen2-msiof", .data = &r8a779x_data },
{ .compatible = "renesas,msiof-r8a7796", .data = &r8a779x_data },
+ { .compatible = "renesas,rcar-gen3-msiof", .data = &r8a779x_data },
+ { .compatible = "renesas,sh-msiof", .data = &sh_data }, /* Deprecated */
{},
};
MODULE_DEVICE_TABLE(of, sh_msiof_match);
diff --git a/drivers/staging/ks7010/ks_hostif.c b/drivers/staging/ks7010/ks_hostif.c
index 1fbd495..c7652c3 100644
--- a/drivers/staging/ks7010/ks_hostif.c
+++ b/drivers/staging/ks7010/ks_hostif.c
@@ -461,7 +461,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->protocol = eth_type_trans(skb, skb->dev);
priv->nstats.rx_packets++;
priv->nstats.rx_bytes += rx_ind_size;
- skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
priv->nstats.rx_dropped++;
@@ -494,7 +493,6 @@ void hostif_data_indication(struct ks_wlan_private *priv)
skb->protocol = eth_type_trans(skb, skb->dev);
priv->nstats.rx_packets++;
priv->nstats.rx_bytes += rx_ind_size;
- skb->dev->last_rx = jiffies;
netif_rx(skb);
} else {
priv->nstats.rx_dropped++;
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index f84069f..781ef62 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -155,7 +155,6 @@ static void xlr_net_fmn_handler(int bkt, int src_stnid, int size, int code,
skb_reserve(skb, BYTE_OFFSET);
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, skb->dev);
- skb->dev->last_rx = jiffies;
netif_rx(skb);
/* Fill rx ring */
skb_data = xlr_alloc_skb();
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index f0900d1..fc849d4 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -429,7 +429,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget) {
/* No more work */
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
enable_irq(rx_group->irq);
}
return rx_count;
diff --git a/drivers/staging/rtl8192e/rtllib_rx.c b/drivers/staging/rtl8192e/rtllib_rx.c
index e5ba7d1..43a7774 100644
--- a/drivers/staging/rtl8192e/rtllib_rx.c
+++ b/drivers/staging/rtl8192e/rtllib_rx.c
@@ -1375,7 +1375,6 @@ static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb,
ieee->LinkDetectInfo.NumRecvDataInPeriod++;
ieee->LinkDetectInfo.NumRxOkInPeriod++;
}
- dev->last_rx = jiffies;
/* Data frame - extract src/dst addresses */
rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid);
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index 82f6543..b1f2fdf 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -1103,11 +1103,7 @@ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb,
stats = hostap_get_stats(dev);
from_assoc_ap = 1;
}
-#endif
-
- dev->last_rx = jiffies;
-#ifdef NOT_YET
if ((ieee->iw_mode == IW_MODE_MASTER ||
ieee->iw_mode == IW_MODE_REPEAT) &&
!from_assoc_ap) {
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index c1f674f..ca3743d 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -1657,7 +1657,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
/* If there aren't any more packets to receive stop the poll */
if (rx_count < budget)
- napi_complete(napi);
+ napi_complete_done(napi, rx_count);
return rx_count;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 4fe037a..6134eba 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -3409,7 +3409,6 @@ static void hfa384x_usbin_rx(struct wlandevice *wlandev, struct sk_buff *skb)
&usbin->rxfrm.desc.frame_control, hdrlen);
skb->dev = wlandev->netdev;
- skb->dev->last_rx = jiffies;
/* And set the frame length properly */
skb_trim(skb, data_len + hdrlen);
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 73fcf07..53dbbd6 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -252,7 +252,6 @@ static int p80211_convert_to_ether(struct wlandevice *wlandev,
}
if (skb_p80211_to_ether(wlandev, wlandev->ethconv, skb) == 0) {
- skb->dev->last_rx = jiffies;
wlandev->netdev->stats.rx_packets++;
wlandev->netdev->stats.rx_bytes += skb->len;
netif_rx_ni(skb);
@@ -287,7 +286,6 @@ static void p80211netdev_rx_bh(unsigned long arg)
skb->ip_summed = CHECKSUM_NONE;
skb->pkt_type = PACKET_OTHERHOST;
skb->protocol = htons(ETH_P_80211_RAW);
- dev->last_rx = jiffies;
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7dfefd6..1cadc9e 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -1693,6 +1693,10 @@ void transport_generic_request_failure(struct se_cmd *cmd,
case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
+ case TCM_TOO_MANY_TARGET_DESCS:
+ case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
+ case TCM_TOO_MANY_SEGMENT_DESCS:
+ case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
break;
case TCM_OUT_OF_RESOURCES:
sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2808,6 +2812,26 @@ static const struct sense_info sense_info_table[] = {
.key = ILLEGAL_REQUEST,
.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
},
+ [TCM_TOO_MANY_TARGET_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
+ },
+ [TCM_TOO_MANY_SEGMENT_DESCS] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
+ },
+ [TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
+ .key = ILLEGAL_REQUEST,
+ .asc = 0x26,
+ .ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
+ },
[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
.key = ILLEGAL_REQUEST,
.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index 37d5cae..d828b3b 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -53,18 +53,13 @@ static int target_xcopy_gen_naa_ieee(struct se_device *dev, unsigned char *buf)
return 0;
}
-static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
- bool src)
+static int target_xcopy_locate_se_dev_e4(const unsigned char *dev_wwn,
+ struct se_device **found_dev)
{
struct se_device *se_dev;
- unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
+ unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN];
int rc;
- if (src)
- dev_wwn = &xop->dst_tid_wwn[0];
- else
- dev_wwn = &xop->src_tid_wwn[0];
-
mutex_lock(&g_device_mutex);
list_for_each_entry(se_dev, &g_device_list, g_dev_node) {
@@ -78,15 +73,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
if (rc != 0)
continue;
- if (src) {
- xop->dst_dev = se_dev;
- pr_debug("XCOPY 0xe4: Setting xop->dst_dev: %p from located"
- " se_dev\n", xop->dst_dev);
- } else {
- xop->src_dev = se_dev;
- pr_debug("XCOPY 0xe4: Setting xop->src_dev: %p from located"
- " se_dev\n", xop->src_dev);
- }
+ *found_dev = se_dev;
+ pr_debug("XCOPY 0xe4: located se_dev: %p\n", se_dev);
rc = target_depend_item(&se_dev->dev_group.cg_item);
if (rc != 0) {
@@ -110,7 +98,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
}
static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op *xop,
- unsigned char *p, bool src)
+ unsigned char *p, unsigned short cscd_index)
{
unsigned char *desc = p;
unsigned short ript;
@@ -155,7 +143,13 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
return -EINVAL;
}
- if (src) {
+ if (cscd_index != xop->stdi && cscd_index != xop->dtdi) {
+ pr_debug("XCOPY 0xe4: ignoring CSCD entry %d - neither src nor "
+ "dest\n", cscd_index);
+ return 0;
+ }
+
+ if (cscd_index == xop->stdi) {
memcpy(&xop->src_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
* Determine if the source designator matches the local device
@@ -167,10 +161,15 @@ static int target_xcopy_parse_tiddesc_e4(struct se_cmd *se_cmd, struct xcopy_op
pr_debug("XCOPY 0xe4: Set xop->src_dev %p from source"
" received xop\n", xop->src_dev);
}
- } else {
+ }
+
+ if (cscd_index == xop->dtdi) {
memcpy(&xop->dst_tid_wwn[0], &desc[8], XCOPY_NAA_IEEE_REGEX_LEN);
/*
- * Determine if the destination designator matches the local device
+ * Determine if the destination designator matches the local
+ * device. If @cscd_index corresponds to both source (stdi) and
+ * destination (dtdi), or dtdi comes after stdi, then
+ * XCOL_DEST_RECV_OP wins.
*/
if (!memcmp(&xop->local_dev_wwn[0], &xop->dst_tid_wwn[0],
XCOPY_NAA_IEEE_REGEX_LEN)) {
@@ -190,20 +189,23 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
{
struct se_device *local_dev = se_cmd->se_dev;
unsigned char *desc = p;
- int offset = tdll % XCOPY_TARGET_DESC_LEN, rc, ret = 0;
+ int offset = tdll % XCOPY_TARGET_DESC_LEN, rc;
+ unsigned short cscd_index = 0;
unsigned short start = 0;
- bool src = true;
*sense_ret = TCM_INVALID_PARAMETER_LIST;
if (offset != 0) {
pr_err("XCOPY target descriptor list length is not"
" multiple of %d\n", XCOPY_TARGET_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
return -EINVAL;
}
- if (tdll > 64) {
+ if (tdll > RCR_OP_MAX_TARGET_DESC_COUNT * XCOPY_TARGET_DESC_LEN) {
pr_err("XCOPY target descriptor supports a maximum"
" two src/dest descriptors, tdll: %hu too large..\n", tdll);
+ /* spc4r37 6.4.3.4 CSCD DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_TARGET_DESCS;
return -EINVAL;
}
/*
@@ -215,37 +217,43 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
while (start < tdll) {
/*
- * Check target descriptor identification with 0xE4 type with
- * use VPD 0x83 WWPN matching ..
+ * Check target descriptor identification with 0xE4 type, and
+ * compare the current index with the CSCD descriptor IDs in
+ * the segment descriptor. Use VPD 0x83 WWPN matching ..
*/
switch (desc[0]) {
case 0xe4:
rc = target_xcopy_parse_tiddesc_e4(se_cmd, xop,
- &desc[0], src);
+ &desc[0], cscd_index);
if (rc != 0)
goto out;
- /*
- * Assume target descriptors are in source -> destination order..
- */
- if (src)
- src = false;
- else
- src = true;
start += XCOPY_TARGET_DESC_LEN;
desc += XCOPY_TARGET_DESC_LEN;
- ret++;
+ cscd_index++;
break;
default:
pr_err("XCOPY unsupported descriptor type code:"
" 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE;
goto out;
}
}
- if (xop->op_origin == XCOL_SOURCE_RECV_OP)
- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, true);
- else
- rc = target_xcopy_locate_se_dev_e4(se_cmd, xop, false);
+ switch (xop->op_origin) {
+ case XCOL_SOURCE_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(xop->dst_tid_wwn,
+ &xop->dst_dev);
+ break;
+ case XCOL_DEST_RECV_OP:
+ rc = target_xcopy_locate_se_dev_e4(xop->src_tid_wwn,
+ &xop->src_dev);
+ break;
+ default:
+ pr_err("XCOPY CSCD descriptor IDs not found in CSCD list - "
+ "stdi: %hu dtdi: %hu\n", xop->stdi, xop->dtdi);
+ rc = -EINVAL;
+ break;
+ }
/*
* If a matching IEEE NAA 0x83 descriptor for the requested device
* is not located on this node, return COPY_ABORTED with ASQ/ASQC
@@ -262,7 +270,7 @@ static int target_xcopy_parse_target_descriptors(struct se_cmd *se_cmd,
pr_debug("XCOPY TGT desc: Dest dev: %p NAA IEEE WWN: 0x%16phN\n",
xop->dst_dev, &xop->dst_tid_wwn[0]);
- return ret;
+ return cscd_index;
out:
return -EINVAL;
@@ -284,6 +292,14 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
xop->stdi = get_unaligned_be16(&desc[4]);
xop->dtdi = get_unaligned_be16(&desc[6]);
+
+ if (xop->stdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX ||
+ xop->dtdi > XCOPY_CSCD_DESC_ID_LIST_OFF_MAX) {
+ pr_err("XCOPY segment desc 0x02: unsupported CSCD ID > 0x%x; stdi: %hu dtdi: %hu\n",
+ XCOPY_CSCD_DESC_ID_LIST_OFF_MAX, xop->stdi, xop->dtdi);
+ return -EINVAL;
+ }
+
pr_debug("XCOPY seg desc 0x02: desc_len: %hu stdi: %hu dtdi: %hu, DC: %d\n",
desc_len, xop->stdi, xop->dtdi, dc);
@@ -306,15 +322,25 @@ static int target_xcopy_parse_segdesc_02(struct se_cmd *se_cmd, struct xcopy_op
static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
struct xcopy_op *xop, unsigned char *p,
- unsigned int sdll)
+ unsigned int sdll, sense_reason_t *sense_ret)
{
unsigned char *desc = p;
unsigned int start = 0;
int offset = sdll % XCOPY_SEGMENT_DESC_LEN, rc, ret = 0;
+ *sense_ret = TCM_INVALID_PARAMETER_LIST;
+
if (offset != 0) {
pr_err("XCOPY segment descriptor list length is not"
" multiple of %d\n", XCOPY_SEGMENT_DESC_LEN);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
+ return -EINVAL;
+ }
+ if (sdll > RCR_OP_MAX_SG_DESC_COUNT * XCOPY_SEGMENT_DESC_LEN) {
+ pr_err("XCOPY supports %u segment descriptor(s), sdll: %u too"
+ " large..\n", RCR_OP_MAX_SG_DESC_COUNT, sdll);
+ /* spc4r37 6.4.3.5 SEGMENT DESCRIPTOR LIST LENGTH field */
+ *sense_ret = TCM_TOO_MANY_SEGMENT_DESCS;
return -EINVAL;
}
@@ -335,6 +361,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
default:
pr_err("XCOPY unsupported segment descriptor"
"type: 0x%02x\n", desc[0]);
+ *sense_ret = TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE;
goto out;
}
}
@@ -861,6 +888,16 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
return TCM_UNSUPPORTED_SCSI_OPCODE;
}
+ if (se_cmd->data_length == 0) {
+ target_complete_cmd(se_cmd, SAM_STAT_GOOD);
+ return TCM_NO_SENSE;
+ }
+ if (se_cmd->data_length < XCOPY_HDR_LEN) {
+ pr_err("XCOPY parameter truncation: length %u < hdr_len %u\n",
+ se_cmd->data_length, XCOPY_HDR_LEN);
+ return TCM_PARAMETER_LIST_LENGTH_ERROR;
+ }
+
xop = kzalloc(sizeof(struct xcopy_op), GFP_KERNEL);
if (!xop) {
pr_err("Unable to allocate xcopy_op\n");
@@ -883,6 +920,12 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
*/
tdll = get_unaligned_be16(&p[2]);
sdll = get_unaligned_be32(&p[8]);
+ if (tdll + sdll > RCR_OP_MAX_DESC_LIST_LEN) {
+ pr_err("XCOPY descriptor list length %u exceeds maximum %u\n",
+ tdll + sdll, RCR_OP_MAX_DESC_LIST_LEN);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
inline_dl = get_unaligned_be32(&p[12]);
if (inline_dl != 0) {
@@ -890,10 +933,32 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
goto out;
}
+ if (se_cmd->data_length < (XCOPY_HDR_LEN + tdll + sdll + inline_dl)) {
+ pr_err("XCOPY parameter truncation: data length %u too small "
+ "for tdll: %hu sdll: %u inline_dl: %u\n",
+ se_cmd->data_length, tdll, sdll, inline_dl);
+ ret = TCM_PARAMETER_LIST_LENGTH_ERROR;
+ goto out;
+ }
+
pr_debug("Processing XCOPY with list_id: 0x%02x list_id_usage: 0x%02x"
" tdll: %hu sdll: %u inline_dl: %u\n", list_id, list_id_usage,
tdll, sdll, inline_dl);
+ /*
+ * skip over the target descriptors until segment descriptors
+ * have been passed - CSCD ids are needed to determine src and dest.
+ */
+ seg_desc = &p[16] + tdll;
+
+ rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc,
+ sdll, &ret);
+ if (rc <= 0)
+ goto out;
+
+ pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
+ rc * XCOPY_SEGMENT_DESC_LEN);
+
rc = target_xcopy_parse_target_descriptors(se_cmd, xop, &p[16], tdll, &ret);
if (rc <= 0)
goto out;
@@ -911,18 +976,8 @@ sense_reason_t target_do_xcopy(struct se_cmd *se_cmd)
pr_debug("XCOPY: Processed %d target descriptors, length: %u\n", rc,
rc * XCOPY_TARGET_DESC_LEN);
- seg_desc = &p[16];
- seg_desc += (rc * XCOPY_TARGET_DESC_LEN);
-
- rc = target_xcopy_parse_segment_descriptors(se_cmd, xop, seg_desc, sdll);
- if (rc <= 0) {
- xcopy_pt_undepend_remotedev(xop);
- goto out;
- }
transport_kunmap_data_sg(se_cmd);
- pr_debug("XCOPY: Processed %d segment descriptors, length: %u\n", rc,
- rc * XCOPY_SEGMENT_DESC_LEN);
INIT_WORK(&xop->xop_work, target_xcopy_do_work);
queue_work(xcopy_wq, &xop->xop_work);
return TCM_NO_SENSE;
diff --git a/drivers/target/target_core_xcopy.h b/drivers/target/target_core_xcopy.h
index 4d3d4dd..7c0b105 100644
--- a/drivers/target/target_core_xcopy.h
+++ b/drivers/target/target_core_xcopy.h
@@ -1,10 +1,17 @@
#include <target/target_core_base.h>
+#define XCOPY_HDR_LEN 16
#define XCOPY_TARGET_DESC_LEN 32
#define XCOPY_SEGMENT_DESC_LEN 28
#define XCOPY_NAA_IEEE_REGEX_LEN 16
#define XCOPY_MAX_SECTORS 1024
+/*
+ * SPC4r37 6.4.6.1
+ * Table 150 — CSCD descriptor ID values
+ */
+#define XCOPY_CSCD_DESC_ID_LIST_OFF_MAX 0x07FF
+
enum xcopy_origin_list {
XCOL_SOURCE_RECV_OP = 0x01,
XCOL_DEST_RECV_OP = 0x02,
diff --git a/drivers/thermal/rockchip_thermal.c b/drivers/thermal/rockchip_thermal.c
index b811b0f..4c77965 100644
--- a/drivers/thermal/rockchip_thermal.c
+++ b/drivers/thermal/rockchip_thermal.c
@@ -118,12 +118,12 @@ struct rockchip_tsadc_chip {
void (*control)(void __iomem *reg, bool on);
/* Per-sensor methods */
- int (*get_temp)(struct chip_tsadc_table table,
+ int (*get_temp)(const struct chip_tsadc_table *table,
int chn, void __iomem *reg, int *temp);
- void (*set_alarm_temp)(struct chip_tsadc_table table,
- int chn, void __iomem *reg, int temp);
- void (*set_tshut_temp)(struct chip_tsadc_table table,
- int chn, void __iomem *reg, int temp);
+ int (*set_alarm_temp)(const struct chip_tsadc_table *table,
+ int chn, void __iomem *reg, int temp);
+ int (*set_tshut_temp)(const struct chip_tsadc_table *table,
+ int chn, void __iomem *reg, int temp);
void (*set_tshut_mode)(int chn, void __iomem *reg, enum tshut_mode m);
/* Per-table methods */
@@ -317,6 +317,7 @@ static const struct tsadc_table rk3288_code_table[] = {
{3452, 115000},
{3437, 120000},
{3421, 125000},
+ {0, 125000},
};
static const struct tsadc_table rk3368_code_table[] = {
@@ -397,59 +398,80 @@ static const struct tsadc_table rk3399_code_table[] = {
{TSADCV3_DATA_MASK, 125000},
};
-static u32 rk_tsadcv2_temp_to_code(struct chip_tsadc_table table,
+static u32 rk_tsadcv2_temp_to_code(const struct chip_tsadc_table *table,
int temp)
{
int high, low, mid;
- u32 error = 0;
+ unsigned long num;
+ unsigned int denom;
+ u32 error = table->data_mask;
low = 0;
- high = table.length - 1;
+ high = (table->length - 1) - 1; /* ignore the last check for table */
mid = (high + low) / 2;
/* Return mask code data when the temp is over table range */
- if (temp < table.id[low].temp || temp > table.id[high].temp) {
- error = table.data_mask;
+ if (temp < table->id[low].temp || temp > table->id[high].temp)
goto exit;
- }
while (low <= high) {
- if (temp == table.id[mid].temp)
- return table.id[mid].code;
- else if (temp < table.id[mid].temp)
+ if (temp == table->id[mid].temp)
+ return table->id[mid].code;
+ else if (temp < table->id[mid].temp)
high = mid - 1;
else
low = mid + 1;
mid = (low + high) / 2;
}
+ /*
+ * The conversion code granularity provided by the table. Let's
+ * assume that the relationship between temperature and
+ * analog value between 2 table entries is linear and interpolate
+ * to produce less granular result.
+ */
+ num = abs(table->id[mid + 1].code - table->id[mid].code);
+ num *= temp - table->id[mid].temp;
+ denom = table->id[mid + 1].temp - table->id[mid].temp;
+
+ switch (table->mode) {
+ case ADC_DECREMENT:
+ return table->id[mid].code - (num / denom);
+ case ADC_INCREMENT:
+ return table->id[mid].code + (num / denom);
+ default:
+ pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+ return error;
+ }
+
exit:
- pr_err("Invalid the conversion, error=%d\n", error);
+ pr_err("%s: invalid temperature, temp=%d error=%d\n",
+ __func__, temp, error);
return error;
}
-static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
- int *temp)
+static int rk_tsadcv2_code_to_temp(const struct chip_tsadc_table *table,
+ u32 code, int *temp)
{
unsigned int low = 1;
- unsigned int high = table.length - 1;
+ unsigned int high = table->length - 1;
unsigned int mid = (low + high) / 2;
unsigned int num;
unsigned long denom;
- WARN_ON(table.length < 2);
+ WARN_ON(table->length < 2);
- switch (table.mode) {
+ switch (table->mode) {
case ADC_DECREMENT:
- code &= table.data_mask;
- if (code < table.id[high].code)
+ code &= table->data_mask;
+ if (code <= table->id[high].code)
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code >= table.id[mid].code &&
- code < table.id[mid - 1].code)
+ if (code >= table->id[mid].code &&
+ code < table->id[mid - 1].code)
break;
- else if (code < table.id[mid].code)
+ else if (code < table->id[mid].code)
low = mid + 1;
else
high = mid - 1;
@@ -458,15 +480,15 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
}
break;
case ADC_INCREMENT:
- code &= table.data_mask;
- if (code < table.id[low].code)
+ code &= table->data_mask;
+ if (code < table->id[low].code)
return -EAGAIN; /* Incorrect reading */
while (low <= high) {
- if (code <= table.id[mid].code &&
- code > table.id[mid - 1].code)
+ if (code <= table->id[mid].code &&
+ code > table->id[mid - 1].code)
break;
- else if (code > table.id[mid].code)
+ else if (code > table->id[mid].code)
low = mid + 1;
else
high = mid - 1;
@@ -475,7 +497,8 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
}
break;
default:
- pr_err("Invalid the conversion table\n");
+ pr_err("%s: unknown table mode: %d\n", __func__, table->mode);
+ return -EINVAL;
}
/*
@@ -484,10 +507,10 @@ static int rk_tsadcv2_code_to_temp(struct chip_tsadc_table table, u32 code,
* temperature between 2 table entries is linear and interpolate
* to produce less granular result.
*/
- num = table.id[mid].temp - table.id[mid - 1].temp;
- num *= abs(table.id[mid - 1].code - code);
- denom = abs(table.id[mid - 1].code - table.id[mid].code);
- *temp = table.id[mid - 1].temp + (num / denom);
+ num = table->id[mid].temp - table->id[mid - 1].temp;
+ num *= abs(table->id[mid - 1].code - code);
+ denom = abs(table->id[mid - 1].code - table->id[mid].code);
+ *temp = table->id[mid - 1].temp + (num / denom);
return 0;
}
@@ -638,7 +661,7 @@ static void rk_tsadcv3_control(void __iomem *regs, bool enable)
writel_relaxed(val, regs + TSADCV2_AUTO_CON);
}
-static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
+static int rk_tsadcv2_get_temp(const struct chip_tsadc_table *table,
int chn, void __iomem *regs, int *temp)
{
u32 val;
@@ -648,39 +671,57 @@ static int rk_tsadcv2_get_temp(struct chip_tsadc_table table,
return rk_tsadcv2_code_to_temp(table, val, temp);
}
-static void rk_tsadcv2_alarm_temp(struct chip_tsadc_table table,
- int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_alarm_temp(const struct chip_tsadc_table *table,
+ int chn, void __iomem *regs, int temp)
{
- u32 alarm_value, int_en;
+ u32 alarm_value;
+ u32 int_en, int_clr;
+
+ /*
+ * In some cases, some sensors didn't need the trip points, the
+ * set_trips will pass {-INT_MAX, INT_MAX} to trigger tsadc alarm
+ * in the end, ignore this case and disable the high temperature
+ * interrupt.
+ */
+ if (temp == INT_MAX) {
+ int_clr = readl_relaxed(regs + TSADCV2_INT_EN);
+ int_clr &= ~TSADCV2_INT_SRC_EN(chn);
+ writel_relaxed(int_clr, regs + TSADCV2_INT_EN);
+ return 0;
+ }
/* Make sure the value is valid */
alarm_value = rk_tsadcv2_temp_to_code(table, temp);
- if (alarm_value == table.data_mask)
- return;
+ if (alarm_value == table->data_mask)
+ return -ERANGE;
- writel_relaxed(alarm_value & table.data_mask,
+ writel_relaxed(alarm_value & table->data_mask,
regs + TSADCV2_COMP_INT(chn));
int_en = readl_relaxed(regs + TSADCV2_INT_EN);
int_en |= TSADCV2_INT_SRC_EN(chn);
writel_relaxed(int_en, regs + TSADCV2_INT_EN);
+
+ return 0;
}
-static void rk_tsadcv2_tshut_temp(struct chip_tsadc_table table,
- int chn, void __iomem *regs, int temp)
+static int rk_tsadcv2_tshut_temp(const struct chip_tsadc_table *table,
+ int chn, void __iomem *regs, int temp)
{
u32 tshut_value, val;
/* Make sure the value is valid */
tshut_value = rk_tsadcv2_temp_to_code(table, temp);
- if (tshut_value == table.data_mask)
- return;
+ if (tshut_value == table->data_mask)
+ return -ERANGE;
writel_relaxed(tshut_value, regs + TSADCV2_COMP_SHUT(chn));
/* TSHUT will be valid */
val = readl_relaxed(regs + TSADCV2_AUTO_CON);
writel_relaxed(val | TSADCV2_AUTO_SRC_EN(chn), regs + TSADCV2_AUTO_CON);
+
+ return 0;
}
static void rk_tsadcv2_tshut_mode(int chn, void __iomem *regs,
@@ -883,10 +924,8 @@ static int rockchip_thermal_set_trips(void *_sensor, int low, int high)
dev_dbg(&thermal->pdev->dev, "%s: sensor %d: low: %d, high %d\n",
__func__, sensor->id, low, high);
- tsadc->set_alarm_temp(tsadc->table,
- sensor->id, thermal->regs, high);
-
- return 0;
+ return tsadc->set_alarm_temp(&tsadc->table,
+ sensor->id, thermal->regs, high);
}
static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
@@ -896,7 +935,7 @@ static int rockchip_thermal_get_temp(void *_sensor, int *out_temp)
const struct rockchip_tsadc_chip *tsadc = sensor->thermal->chip;
int retval;
- retval = tsadc->get_temp(tsadc->table,
+ retval = tsadc->get_temp(&tsadc->table,
sensor->id, thermal->regs, out_temp);
dev_dbg(&thermal->pdev->dev, "sensor %d - temp: %d, retval: %d\n",
sensor->id, *out_temp, retval);
@@ -982,8 +1021,12 @@ rockchip_thermal_register_sensor(struct platform_device *pdev,
int error;
tsadc->set_tshut_mode(id, thermal->regs, thermal->tshut_mode);
- tsadc->set_tshut_temp(tsadc->table, id, thermal->regs,
+
+ error = tsadc->set_tshut_temp(&tsadc->table, id, thermal->regs,
thermal->tshut_temp);
+ if (error)
+ dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, thermal->tshut_temp, error);
sensor->thermal = thermal;
sensor->id = id;
@@ -1196,9 +1239,13 @@ static int __maybe_unused rockchip_thermal_resume(struct device *dev)
thermal->chip->set_tshut_mode(id, thermal->regs,
thermal->tshut_mode);
- thermal->chip->set_tshut_temp(thermal->chip->table,
+
+ error = thermal->chip->set_tshut_temp(&thermal->chip->table,
id, thermal->regs,
thermal->tshut_temp);
+ if (error)
+ dev_err(&pdev->dev, "%s: invalid tshut=%d, error=%d\n",
+ __func__, thermal->tshut_temp, error);
}
thermal->chip->control(thermal->regs, true);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 641faab..6555913 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -799,6 +799,11 @@ static void thermal_release(struct device *dev)
if (!strncmp(dev_name(dev), "thermal_zone",
sizeof("thermal_zone") - 1)) {
tz = to_thermal_zone(dev);
+ kfree(tz->trip_type_attrs);
+ kfree(tz->trip_temp_attrs);
+ kfree(tz->trip_hyst_attrs);
+ kfree(tz->trips_attribute_group.attrs);
+ kfree(tz->device.groups);
kfree(tz);
} else if (!strncmp(dev_name(dev), "cooling_device",
sizeof("cooling_device") - 1)) {
@@ -1305,10 +1310,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
thermal_zone_device_set_polling(tz, 0);
- kfree(tz->trip_type_attrs);
- kfree(tz->trip_temp_attrs);
- kfree(tz->trip_hyst_attrs);
- kfree(tz->trips_attribute_group.attrs);
thermal_set_governor(tz, NULL);
thermal_remove_hwmon_sysfs(tz);
@@ -1316,7 +1317,6 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
idr_destroy(&tz->idr);
mutex_destroy(&tz->lock);
device_unregister(&tz->device);
- kfree(tz->device.groups);
}
EXPORT_SYMBOL_GPL(thermal_zone_device_unregister);
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 61569a7..76e03a7 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -675,7 +675,7 @@ static struct console univ8250_console = {
.device = uart_console_device,
.setup = univ8250_console_setup,
.match = univ8250_console_match,
- .flags = CON_PRINTBUFFER | CON_ANYTIME | CON_CONSDEV,
+ .flags = CON_PRINTBUFFER | CON_ANYTIME,
.index = -1,
.data = &serial8250_reg,
};
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index aa0166b..116436b 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -5642,17 +5642,15 @@ static pci_ers_result_t serial8250_io_slot_reset(struct pci_dev *dev)
static void serial8250_io_resume(struct pci_dev *dev)
{
struct serial_private *priv = pci_get_drvdata(dev);
- const struct pciserial_board *board;
+ struct serial_private *new;
if (!priv)
return;
- board = priv->board;
- kfree(priv);
- priv = pciserial_init_ports(dev, board);
-
- if (!IS_ERR(priv)) {
- pci_set_drvdata(dev, priv);
+ new = pciserial_init_ports(dev, priv->board);
+ if (!IS_ERR(new)) {
+ pci_set_drvdata(dev, new);
+ kfree(priv);
}
}
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index fe4399b..c13fec4 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -1413,7 +1413,7 @@ static void __do_stop_tx_rs485(struct uart_8250_port *p)
* Enable previously disabled RX interrupts.
*/
if (!(p->port.rs485.flags & SER_RS485_RX_DURING_TX)) {
- serial8250_clear_fifos(p);
+ serial8250_clear_and_reinit_fifos(p);
p->ier |= UART_IER_RLSI | UART_IER_RDI;
serial_port_out(&p->port, UART_IER, p->ier);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index 168b10c..fabbe76 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -481,6 +481,14 @@ static void atmel_stop_tx(struct uart_port *port)
/* disable PDC transmit */
atmel_uart_writel(port, ATMEL_PDC_PTCR, ATMEL_PDC_TXTDIS);
}
+
+ /*
+ * Disable the transmitter.
+ * This is mandatory when DMA is used, otherwise the DMA buffer
+ * is fully transmitted.
+ */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXDIS);
+
/* Disable interrupts */
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
@@ -513,6 +521,9 @@ static void atmel_start_tx(struct uart_port *port)
/* Enable interrupts */
atmel_uart_writel(port, ATMEL_US_IER, atmel_port->tx_done_mask);
+
+ /* re-enable the transmitter */
+ atmel_uart_writel(port, ATMEL_US_CR, ATMEL_US_TXEN);
}
/*
@@ -798,6 +809,11 @@ static void atmel_complete_tx_dma(void *arg)
*/
if (!uart_circ_empty(xmit))
atmel_tasklet_schedule(atmel_port, &atmel_port->tasklet_tx);
+ else if ((port->rs485.flags & SER_RS485_ENABLED) &&
+ !(port->rs485.flags & SER_RS485_RX_DURING_TX)) {
+ /* DMA done, stop TX, start RX for RS485 */
+ atmel_start_rx(port);
+ }
spin_unlock_irqrestore(&port->lock, flags);
}
@@ -900,12 +916,6 @@ static void atmel_tx_dma(struct uart_port *port)
desc->callback = atmel_complete_tx_dma;
desc->callback_param = atmel_port;
atmel_port->cookie_tx = dmaengine_submit(desc);
-
- } else {
- if (port->rs485.flags & SER_RS485_ENABLED) {
- /* DMA done, stop TX, start RX for RS485 */
- atmel_start_rx(port);
- }
}
if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
index 52bbd27..701c085 100644
--- a/drivers/tty/sysrq.c
+++ b/drivers/tty/sysrq.c
@@ -946,8 +946,8 @@ static const struct input_device_id sysrq_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT |
INPUT_DEVICE_ID_MATCH_KEYBIT,
- .evbit = { BIT_MASK(EV_KEY) },
- .keybit = { BIT_MASK(KEY_LEFTALT) },
+ .evbit = { [BIT_WORD(EV_KEY)] = BIT_MASK(EV_KEY) },
+ .keybit = { [BIT_WORD(KEY_LEFTALT)] = BIT_MASK(KEY_LEFTALT) },
},
{ },
};
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index 9548d3e..302b8f5 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -513,8 +513,8 @@ struct dwc2_core_params {
/* Gadget parameters */
bool g_dma;
bool g_dma_desc;
- u16 g_rx_fifo_size;
- u16 g_np_tx_fifo_size;
+ u32 g_rx_fifo_size;
+ u32 g_np_tx_fifo_size;
u32 g_tx_fifo_size[MAX_EPS_CHANNELS];
};
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index c55db4a..77c5fcf 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -3169,7 +3169,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg,
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
if (hsotg->params.phy_type == DWC2_PHY_TYPE_PARAM_FS &&
(hsotg->params.speed == DWC2_SPEED_PARAM_FULL ||
@@ -3749,8 +3749,8 @@ static int dwc2_hsotg_ep_enable(struct usb_ep *ep,
__func__, epctrl, epctrl_reg);
/* Allocate DMA descriptor chain for non-ctrl endpoints */
- if (using_desc_dma(hsotg)) {
- hs_ep->desc_list = dma_alloc_coherent(hsotg->dev,
+ if (using_desc_dma(hsotg) && !hs_ep->desc_list) {
+ hs_ep->desc_list = dmam_alloc_coherent(hsotg->dev,
MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
&hs_ep->desc_list_dma, GFP_ATOMIC);
@@ -3872,7 +3872,7 @@ error1:
error2:
if (ret && using_desc_dma(hsotg) && hs_ep->desc_list) {
- dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
+ dmam_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
sizeof(struct dwc2_dma_desc),
hs_ep->desc_list, hs_ep->desc_list_dma);
hs_ep->desc_list = NULL;
@@ -3902,14 +3902,6 @@ static int dwc2_hsotg_ep_disable(struct usb_ep *ep)
return -EINVAL;
}
- /* Remove DMA memory allocated for non-control Endpoints */
- if (using_desc_dma(hsotg)) {
- dma_free_coherent(hsotg->dev, MAX_DMA_DESC_NUM_GENERIC *
- sizeof(struct dwc2_dma_desc),
- hs_ep->desc_list, hs_ep->desc_list_dma);
- hs_ep->desc_list = NULL;
- }
-
epctrl_reg = dir_in ? DIEPCTL(index) : DOEPCTL(index);
spin_lock_irqsave(&hsotg->lock, flags);
@@ -4131,7 +4123,7 @@ static void dwc2_hsotg_init(struct dwc2_hsotg *hsotg)
/* keep other bits untouched (so e.g. forced modes are not lost) */
usbcfg = dwc2_readl(hsotg->regs + GUSBCFG);
usbcfg &= ~(GUSBCFG_TOUTCAL_MASK | GUSBCFG_PHYIF16 | GUSBCFG_SRPCAP |
- GUSBCFG_HNPCAP);
+ GUSBCFG_HNPCAP | GUSBCFG_USBTRDTIM_MASK);
/* set the PLL on, remove the HNP/SRP and set the PHY */
trdtim = (hsotg->phyif == GUSBCFG_PHYIF8) ? 9 : 5;
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index 911c3b3..46d0ad5 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -4367,6 +4367,9 @@ static int _dwc2_hcd_suspend(struct usb_hcd *hcd)
if (!HCD_HW_ACCESSIBLE(hcd))
goto unlock;
+ if (hsotg->op_state == OTG_STATE_B_PERIPHERAL)
+ goto unlock;
+
if (!hsotg->params.hibernation)
goto skip_power_saving;
@@ -4489,8 +4492,8 @@ static void dwc2_dump_urb_info(struct usb_hcd *hcd, struct urb *urb,
{
#ifdef VERBOSE_DEBUG
struct dwc2_hsotg *hsotg = dwc2_hcd_to_hsotg(hcd);
- char *pipetype;
- char *speed;
+ char *pipetype = NULL;
+ char *speed = NULL;
dev_vdbg(hsotg->dev, "%s, urb %p\n", fn_name, urb);
dev_vdbg(hsotg->dev, " Device address: %d\n",
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 11fe68a..bcd1e19 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -385,16 +385,16 @@ static void dwc2_set_param(struct dwc2_hsotg *hsotg, void *param,
}
/**
- * dwc2_set_param_u16() - Set a u16 parameter
+ * dwc2_set_param_u32() - Set a u32 parameter
*
* See dwc2_set_param().
*/
-static void dwc2_set_param_u16(struct dwc2_hsotg *hsotg, u16 *param,
+static void dwc2_set_param_u32(struct dwc2_hsotg *hsotg, u32 *param,
bool lookup, char *property, u16 legacy,
u16 def, u16 min, u16 max)
{
dwc2_set_param(hsotg, param, lookup, property,
- legacy, def, min, max, 2);
+ legacy, def, min, max, 4);
}
/**
@@ -1178,12 +1178,12 @@ static void dwc2_set_parameters(struct dwc2_hsotg *hsotg,
* auto-detect if the hardware does not support the
* default.
*/
- dwc2_set_param_u16(hsotg, &p->g_rx_fifo_size,
+ dwc2_set_param_u32(hsotg, &p->g_rx_fifo_size,
true, "g-rx-fifo-size", 2048,
hw->rx_fifo_size,
16, hw->rx_fifo_size);
- dwc2_set_param_u16(hsotg, &p->g_np_tx_fifo_size,
+ dwc2_set_param_u32(hsotg, &p->g_np_tx_fifo_size,
true, "g-np-tx-fifo-size", 1024,
hw->dev_nperio_tx_fifo_size,
16, hw->dev_nperio_tx_fifo_size);
diff --git a/drivers/usb/dwc3/dwc3-exynos.c b/drivers/usb/dwc3/dwc3-exynos.c
index e27899b..e956306 100644
--- a/drivers/usb/dwc3/dwc3-exynos.c
+++ b/drivers/usb/dwc3/dwc3-exynos.c
@@ -138,7 +138,8 @@ static int dwc3_exynos_probe(struct platform_device *pdev)
exynos->axius_clk = devm_clk_get(dev, "usbdrd30_axius_clk");
if (IS_ERR(exynos->axius_clk)) {
dev_err(dev, "no AXI UpScaler clk specified\n");
- return -ENODEV;
+ ret = -ENODEV;
+ goto axius_clk_err;
}
clk_prepare_enable(exynos->axius_clk);
} else {
@@ -196,6 +197,7 @@ err3:
regulator_disable(exynos->vdd33);
err2:
clk_disable_unprepare(exynos->axius_clk);
+axius_clk_err:
clk_disable_unprepare(exynos->susp_clk);
clk_disable_unprepare(exynos->clk);
return ret;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 002822d..49d685a 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -2147,7 +2147,7 @@ int composite_os_desc_req_prepare(struct usb_composite_dev *cdev,
cdev->os_desc_req->buf = kmalloc(4096, GFP_KERNEL);
if (!cdev->os_desc_req->buf) {
ret = -ENOMEM;
- kfree(cdev->os_desc_req);
+ usb_ep_free_request(ep0, cdev->os_desc_req);
goto end;
}
cdev->os_desc_req->context = cdev;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 5e746ad..5490fc5 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1806,7 +1806,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
unsigned long flags;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while (count--) {
/* pending requests get nuked */
if (likely(ep->ep))
usb_ep_disable(ep->ep);
@@ -1817,7 +1817,7 @@ static void ffs_func_eps_disable(struct ffs_function *func)
__ffs_epfile_read_buffer_free(epfile);
++epfile;
}
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
}
@@ -1831,7 +1831,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
int ret = 0;
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while(count--) {
struct usb_endpoint_descriptor *ds;
int desc_idx;
@@ -1867,7 +1867,7 @@ static int ffs_func_eps_enable(struct ffs_function *func)
++ep;
++epfile;
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
return ret;
@@ -3448,12 +3448,12 @@ static void ffs_func_unbind(struct usb_configuration *c,
/* cleanup after autoconfig */
spin_lock_irqsave(&func->ffs->eps_lock, flags);
- do {
+ while (count--) {
if (ep->ep && ep->req)
usb_ep_free_request(ep->ep, ep->req);
ep->req = NULL;
++ep;
- } while (--count);
+ }
spin_unlock_irqrestore(&func->ffs->eps_lock, flags);
kfree(func->eps);
func->eps = NULL;
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c
index f3212db..12c7687 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.c
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.c
@@ -1978,7 +1978,8 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev,
dev_err(&pdev->dev, "of_probe: name error(%d)\n", ret);
goto err;
}
- ep->ep.name = kasprintf(GFP_KERNEL, "ep%d", ep->index);
+ sprintf(ep->name, "ep%d", ep->index);
+ ep->ep.name = ep->name;
ep->ep_regs = udc->regs + USBA_EPT_BASE(i);
ep->dma_regs = udc->regs + USBA_DMA_BASE(i);
diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h
index 3e1c9d5..b03b2eb 100644
--- a/drivers/usb/gadget/udc/atmel_usba_udc.h
+++ b/drivers/usb/gadget/udc/atmel_usba_udc.h
@@ -280,6 +280,7 @@ struct usba_ep {
void __iomem *ep_regs;
void __iomem *dma_regs;
void __iomem *fifo;
+ char name[8];
struct usb_ep ep;
struct usba_udc *udc;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index ddfab30..e5834dd 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -165,7 +165,7 @@ static int xhci_plat_probe(struct platform_device *pdev)
return -ENODEV;
/* Try to set 64-bit DMA first */
- if (WARN_ON(!pdev->dev.dma_mask))
+ if (!pdev->dev.dma_mask)
/* Platform did not initialize dma_mask */
ret = dma_coerce_mask_and_coherent(&pdev->dev,
DMA_BIT_MASK(64));
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 25f522b..e32029a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -913,17 +913,6 @@ void xhci_stop_endpoint_command_watchdog(unsigned long arg)
spin_lock_irqsave(&xhci->lock, flags);
ep->stop_cmds_pending--;
- if (xhci->xhc_state & XHCI_STATE_REMOVING) {
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
- if (xhci->xhc_state & XHCI_STATE_DYING) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Stop EP timer ran, but another timer marked "
- "xHCI as DYING, exiting.");
- spin_unlock_irqrestore(&xhci->lock, flags);
- return;
- }
if (!(ep->stop_cmds_pending == 0 && (ep->ep_state & EP_HALT_PENDING))) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Stop EP timer ran, but no command pending, "
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 0c8deb9..9a0ec11 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1534,19 +1534,6 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
xhci_urb_free_priv(urb_priv);
return ret;
}
- if ((xhci->xhc_state & XHCI_STATE_DYING) ||
- (xhci->xhc_state & XHCI_STATE_HALTED)) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Ep 0x%x: URB %p to be canceled on "
- "non-responsive xHCI host.",
- urb->ep->desc.bEndpointAddress, urb);
- /* Let the stop endpoint command watchdog timer (which set this
- * state) finish cleaning up the endpoint TD lists. We must
- * have caught it in the middle of dropping a lock and giving
- * back an URB.
- */
- goto done;
- }
ep_index = xhci_get_endpoint_index(&urb->ep->desc);
ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
diff --git a/drivers/usb/musb/musb_debugfs.c b/drivers/usb/musb/musb_debugfs.c
index 4fef50e..dd70c88 100644
--- a/drivers/usb/musb/musb_debugfs.c
+++ b/drivers/usb/musb/musb_debugfs.c
@@ -114,6 +114,7 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
unsigned i;
seq_printf(s, "MUSB (M)HDRC Register Dump\n");
+ pm_runtime_get_sync(musb->controller);
for (i = 0; i < ARRAY_SIZE(musb_regmap); i++) {
switch (musb_regmap[i].size) {
@@ -132,6 +133,8 @@ static int musb_regdump_show(struct seq_file *s, void *unused)
}
}
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return 0;
}
@@ -145,7 +148,10 @@ static int musb_test_mode_show(struct seq_file *s, void *unused)
struct musb *musb = s->private;
unsigned test;
+ pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
if (test & MUSB_TEST_FORCE_HOST)
seq_printf(s, "force host\n");
@@ -194,11 +200,12 @@ static ssize_t musb_test_mode_write(struct file *file,
u8 test;
char buf[18];
+ pm_runtime_get_sync(musb->controller);
test = musb_readb(musb->mregs, MUSB_TESTMODE);
if (test) {
dev_err(musb->controller, "Error: test mode is already set. "
"Please do USB Bus Reset to start a new test.\n");
- return count;
+ goto ret;
}
memset(buf, 0x00, sizeof(buf));
@@ -234,6 +241,9 @@ static ssize_t musb_test_mode_write(struct file *file,
musb_writeb(musb->mregs, MUSB_TESTMODE, test);
+ret:
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return count;
}
@@ -254,8 +264,13 @@ static int musb_softconnect_show(struct seq_file *s, void *unused)
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
case OTG_STATE_A_WAIT_BCON:
+ pm_runtime_get_sync(musb->controller);
+
reg = musb_readb(musb->mregs, MUSB_DEVCTL);
connect = reg & MUSB_DEVCTL_SESSION ? 1 : 0;
+
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
break;
default:
connect = -1;
@@ -284,6 +299,7 @@ static ssize_t musb_softconnect_write(struct file *file,
if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
return -EFAULT;
+ pm_runtime_get_sync(musb->controller);
if (!strncmp(buf, "0", 1)) {
switch (musb->xceiv->otg->state) {
case OTG_STATE_A_HOST:
@@ -314,6 +330,8 @@ static ssize_t musb_softconnect_write(struct file *file,
}
}
+ pm_runtime_mark_last_busy(musb->controller);
+ pm_runtime_put_autosuspend(musb->controller);
return count;
}
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index 2597b83..95aa523 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -95,6 +95,7 @@ struct ch341_private {
unsigned baud_rate; /* set baud rate */
u8 line_control; /* set line control value RTS/DTR */
u8 line_status; /* active status of modem control inputs */
+ u8 lcr;
};
static void ch341_set_termios(struct tty_struct *tty,
@@ -112,6 +113,8 @@ static int ch341_control_out(struct usb_device *dev, u8 request,
r = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT,
value, index, NULL, 0, DEFAULT_TIMEOUT);
+ if (r < 0)
+ dev_err(&dev->dev, "failed to send control message: %d\n", r);
return r;
}
@@ -129,11 +132,24 @@ static int ch341_control_in(struct usb_device *dev,
r = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, buf, bufsize, DEFAULT_TIMEOUT);
- return r;
+ if (r < bufsize) {
+ if (r >= 0) {
+ dev_err(&dev->dev,
+ "short control message received (%d < %u)\n",
+ r, bufsize);
+ r = -EIO;
+ }
+
+ dev_err(&dev->dev, "failed to receive control message: %d\n",
+ r);
+ return r;
+ }
+
+ return 0;
}
-static int ch341_init_set_baudrate(struct usb_device *dev,
- struct ch341_private *priv, unsigned ctrl)
+static int ch341_set_baudrate_lcr(struct usb_device *dev,
+ struct ch341_private *priv, u8 lcr)
{
short a;
int r;
@@ -156,9 +172,19 @@ static int ch341_init_set_baudrate(struct usb_device *dev,
factor = 0x10000 - factor;
a = (factor & 0xff00) | divisor;
- /* 0x9c is "enable SFR_UART Control register and timer" */
- r = ch341_control_out(dev, CH341_REQ_SERIAL_INIT,
- 0x9c | (ctrl << 8), a | 0x80);
+ /*
+ * CH341A buffers data until a full endpoint-size packet (32 bytes)
+ * has been received unless bit 7 is set.
+ */
+ a |= BIT(7);
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x1312, a);
+ if (r)
+ return r;
+
+ r = ch341_control_out(dev, CH341_REQ_WRITE_REG, 0x2518, lcr);
+ if (r)
+ return r;
return r;
}
@@ -170,9 +196,9 @@ static int ch341_set_handshake(struct usb_device *dev, u8 control)
static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
unsigned long flags;
buffer = kmalloc(size, GFP_KERNEL);
@@ -183,14 +209,9 @@ static int ch341_get_status(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- /* setup the private status if available */
- if (r == 2) {
- r = 0;
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
- spin_unlock_irqrestore(&priv->lock, flags);
- } else
- r = -EPROTO;
+ spin_lock_irqsave(&priv->lock, flags);
+ priv->line_status = (~(*buffer)) & CH341_BITS_MODEM_STAT;
+ spin_unlock_irqrestore(&priv->lock, flags);
out: kfree(buffer);
return r;
@@ -200,9 +221,9 @@ out: kfree(buffer);
static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
{
+ const unsigned int size = 2;
char *buffer;
int r;
- const unsigned size = 8;
buffer = kmalloc(size, GFP_KERNEL);
if (!buffer)
@@ -232,7 +253,7 @@ static int ch341_configure(struct usb_device *dev, struct ch341_private *priv)
if (r < 0)
goto out;
- r = ch341_init_set_baudrate(dev, priv, 0);
+ r = ch341_set_baudrate_lcr(dev, priv, priv->lcr);
if (r < 0)
goto out;
@@ -258,7 +279,6 @@ static int ch341_port_probe(struct usb_serial_port *port)
spin_lock_init(&priv->lock);
priv->baud_rate = DEFAULT_BAUD_RATE;
- priv->line_control = CH341_BIT_RTS | CH341_BIT_DTR;
r = ch341_configure(port->serial->dev, priv);
if (r < 0)
@@ -320,7 +340,7 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
r = ch341_configure(serial->dev, priv);
if (r)
- goto out;
+ return r;
if (tty)
ch341_set_termios(tty, port, NULL);
@@ -330,12 +350,19 @@ static int ch341_open(struct tty_struct *tty, struct usb_serial_port *port)
if (r) {
dev_err(&port->dev, "%s - failed to submit interrupt urb: %d\n",
__func__, r);
- goto out;
+ return r;
}
r = usb_serial_generic_open(tty, port);
+ if (r)
+ goto err_kill_interrupt_urb;
+
+ return 0;
+
+err_kill_interrupt_urb:
+ usb_kill_urb(port->interrupt_in_urb);
-out: return r;
+ return r;
}
/* Old_termios contains the original termios settings and
@@ -356,7 +383,6 @@ static void ch341_set_termios(struct tty_struct *tty,
baud_rate = tty_get_baud_rate(tty);
- priv->baud_rate = baud_rate;
ctrl = CH341_LCR_ENABLE_RX | CH341_LCR_ENABLE_TX;
switch (C_CSIZE(tty)) {
@@ -386,22 +412,25 @@ static void ch341_set_termios(struct tty_struct *tty,
ctrl |= CH341_LCR_STOP_BITS_2;
if (baud_rate) {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
- r = ch341_init_set_baudrate(port->serial->dev, priv, ctrl);
+ priv->baud_rate = baud_rate;
+
+ r = ch341_set_baudrate_lcr(port->serial->dev, priv, ctrl);
if (r < 0 && old_termios) {
priv->baud_rate = tty_termios_baud_rate(old_termios);
tty_termios_copy_hw(&tty->termios, old_termios);
+ } else if (r == 0) {
+ priv->lcr = ctrl;
}
- } else {
- spin_lock_irqsave(&priv->lock, flags);
- priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
- spin_unlock_irqrestore(&priv->lock, flags);
}
- ch341_set_handshake(port->serial->dev, priv->line_control);
+ spin_lock_irqsave(&priv->lock, flags);
+ if (C_BAUD(tty) == B0)
+ priv->line_control &= ~(CH341_BIT_DTR | CH341_BIT_RTS);
+ else if (old_termios && (old_termios->c_cflag & CBAUD) == B0)
+ priv->line_control |= (CH341_BIT_DTR | CH341_BIT_RTS);
+ spin_unlock_irqrestore(&priv->lock, flags);
+ ch341_set_handshake(port->serial->dev, priv->line_control);
}
static void ch341_break_ctl(struct tty_struct *tty, int break_state)
@@ -576,14 +605,23 @@ static int ch341_tiocmget(struct tty_struct *tty)
static int ch341_reset_resume(struct usb_serial *serial)
{
- struct ch341_private *priv;
-
- priv = usb_get_serial_port_data(serial->port[0]);
+ struct usb_serial_port *port = serial->port[0];
+ struct ch341_private *priv = usb_get_serial_port_data(port);
+ int ret;
/* reconfigure ch341 serial port after bus-reset */
ch341_configure(serial->dev, priv);
- return 0;
+ if (tty_port_initialized(&port->port)) {
+ ret = usb_submit_urb(port->interrupt_in_urb, GFP_NOIO);
+ if (ret) {
+ dev_err(&port->dev, "failed to submit interrupt urb: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ return usb_serial_generic_resume(serial);
}
static struct usb_serial_driver ch341_device = {
diff --git a/drivers/usb/serial/kl5kusb105.c b/drivers/usb/serial/kl5kusb105.c
index 0ee190f..6cb4575 100644
--- a/drivers/usb/serial/kl5kusb105.c
+++ b/drivers/usb/serial/kl5kusb105.c
@@ -192,10 +192,11 @@ static int klsi_105_get_line_state(struct usb_serial_port *port,
status_buf, KLSI_STATUSBUF_LEN,
10000
);
- if (rc < 0)
- dev_err(&port->dev, "Reading line status failed (error = %d)\n",
- rc);
- else {
+ if (rc != KLSI_STATUSBUF_LEN) {
+ dev_err(&port->dev, "reading line status failed: %d\n", rc);
+ if (rc >= 0)
+ rc = -EIO;
+ } else {
status = get_unaligned_le16(status_buf);
dev_info(&port->serial->dev->dev, "read status %x %x\n",
diff --git a/drivers/usb/wusbcore/crypto.c b/drivers/usb/wusbcore/crypto.c
index 79451f7..062c205 100644
--- a/drivers/usb/wusbcore/crypto.c
+++ b/drivers/usb/wusbcore/crypto.c
@@ -216,7 +216,6 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
struct scatterlist sg[4], sg_dst;
void *dst_buf;
size_t dst_size;
- const u8 bzero[16] = { 0 };
u8 iv[crypto_skcipher_ivsize(tfm_cbc)];
size_t zero_padding;
@@ -261,7 +260,7 @@ static int wusb_ccm_mac(struct crypto_skcipher *tfm_cbc,
sg_set_buf(&sg[1], &scratch->b1, sizeof(scratch->b1));
sg_set_buf(&sg[2], b, blen);
/* 0 if well behaved :) */
- sg_set_buf(&sg[3], bzero, zero_padding);
+ sg_set_page(&sg[3], ZERO_PAGE(0), zero_padding, 0);
sg_init_one(&sg_dst, dst_buf, dst_size);
skcipher_request_set_tfm(req, tfm_cbc);
diff --git a/drivers/vfio/vfio_iommu_spapr_tce.c b/drivers/vfio/vfio_iommu_spapr_tce.c
index c882357..128d102 100644
--- a/drivers/vfio/vfio_iommu_spapr_tce.c
+++ b/drivers/vfio/vfio_iommu_spapr_tce.c
@@ -1270,6 +1270,10 @@ static int tce_iommu_attach_group(void *iommu_data,
/* pr_debug("tce_vfio: Attaching group #%u to iommu %p\n",
iommu_group_id(iommu_group), iommu_group); */
table_group = iommu_group_get_iommudata(iommu_group);
+ if (!table_group) {
+ ret = -ENODEV;
+ goto unlock_exit;
+ }
if (tce_groups_attached(container) && (!table_group->ops ||
!table_group->ops->take_ownership ||
diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c
index 9266271..b3cc33f 100644
--- a/drivers/vfio/vfio_iommu_type1.c
+++ b/drivers/vfio/vfio_iommu_type1.c
@@ -36,7 +36,6 @@
#include <linux/uaccess.h>
#include <linux/vfio.h>
#include <linux/workqueue.h>
-#include <linux/pid_namespace.h>
#include <linux/mdev.h>
#include <linux/notifier.h>
@@ -495,8 +494,7 @@ static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
unsigned long *pfn_base, bool do_accounting)
{
unsigned long limit;
- bool lock_cap = ns_capable(task_active_pid_ns(dma->task)->user_ns,
- CAP_IPC_LOCK);
+ bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
struct mm_struct *mm;
int ret;
bool rsvd;
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 5dc3465..c42e9c3 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -351,6 +351,15 @@ static int vhost_net_tx_get_vq_desc(struct vhost_net *net,
return r;
}
+static bool vhost_exceeds_maxpend(struct vhost_net *net)
+{
+ struct vhost_net_virtqueue *nvq = &net->vqs[VHOST_NET_VQ_TX];
+ struct vhost_virtqueue *vq = &nvq->vq;
+
+ return (nvq->upend_idx + vq->num - VHOST_MAX_PEND) % UIO_MAXIOV
+ == nvq->done_idx;
+}
+
/* Expects to be always run from workqueue - which acts as
* read-size critical section for our kind of RCU. */
static void handle_tx(struct vhost_net *net)
@@ -394,8 +403,7 @@ static void handle_tx(struct vhost_net *net)
/* If more outstanding DMAs, queue the work.
* Handle upend_idx wrap around
*/
- if (unlikely((nvq->upend_idx + vq->num - VHOST_MAX_PEND)
- % UIO_MAXIOV == nvq->done_idx))
+ if (unlikely(vhost_exceeds_maxpend(net)))
break;
head = vhost_net_tx_get_vq_desc(net, vq, vq->iov,
@@ -454,6 +462,16 @@ static void handle_tx(struct vhost_net *net)
msg.msg_control = NULL;
ubufs = NULL;
}
+
+ total_len += len;
+ if (total_len < VHOST_NET_WEIGHT &&
+ !vhost_vq_avail_empty(&net->dev, vq) &&
+ likely(!vhost_exceeds_maxpend(net))) {
+ msg.msg_flags |= MSG_MORE;
+ } else {
+ msg.msg_flags &= ~MSG_MORE;
+ }
+
/* TODO: Check specific error and bomb out unless ENOBUFS? */
err = sock->ops->sendmsg(sock, &msg, len);
if (unlikely(err < 0)) {
@@ -472,7 +490,6 @@ static void handle_tx(struct vhost_net *net)
vhost_add_used_and_signal(&net->dev, vq, head, 0);
else
vhost_zerocopy_signal_used(net, vq);
- total_len += len;
vhost_net_tx_packet(net);
if (unlikely(total_len >= VHOST_NET_WEIGHT)) {
vhost_poll_queue(&vq->poll);
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 253310c..fd6c8b6 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -843,7 +843,7 @@ vhost_scsi_handle_vq(struct vhost_scsi *vs, struct vhost_virtqueue *vq)
struct iov_iter out_iter, in_iter, prot_iter, data_iter;
u64 tag;
u32 exp_data_len, data_direction;
- unsigned out, in;
+ unsigned int out = 0, in = 0;
int head, ret, prot_bytes;
size_t req_size, rsp_size = sizeof(struct virtio_scsi_cmd_resp);
size_t out_size, in_size;
@@ -2087,7 +2087,7 @@ static struct configfs_attribute *vhost_scsi_wwn_attrs[] = {
NULL,
};
-static struct target_core_fabric_ops vhost_scsi_ops = {
+static const struct target_core_fabric_ops vhost_scsi_ops = {
.module = THIS_MODULE,
.name = "vhost",
.get_fabric_name = vhost_scsi_get_fabric_name,
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index d643260..9f11838 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -2241,11 +2241,15 @@ bool vhost_vq_avail_empty(struct vhost_dev *dev, struct vhost_virtqueue *vq)
__virtio16 avail_idx;
int r;
+ if (vq->avail_idx != vq->last_avail_idx)
+ return false;
+
r = vhost_get_user(vq, avail_idx, &vq->avail->idx);
- if (r)
+ if (unlikely(r))
return false;
+ vq->avail_idx = vhost16_to_cpu(vq, avail_idx);
- return vhost16_to_cpu(vq, avail_idx) == vq->avail_idx;
+ return vq->avail_idx == vq->last_avail_idx;
}
EXPORT_SYMBOL_GPL(vhost_vq_avail_empty);
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index bbbf5885..ce5e63d 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -373,6 +373,7 @@ static void vhost_vsock_handle_rx_kick(struct vhost_work *work)
static int vhost_vsock_start(struct vhost_vsock *vsock)
{
+ struct vhost_virtqueue *vq;
size_t i;
int ret;
@@ -383,19 +384,20 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
goto err;
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
if (!vhost_vq_access_ok(vq)) {
ret = -EFAULT;
- mutex_unlock(&vq->mutex);
goto err_vq;
}
if (!vq->private_data) {
vq->private_data = vsock;
- vhost_vq_init_access(vq);
+ ret = vhost_vq_init_access(vq);
+ if (ret)
+ goto err_vq;
}
mutex_unlock(&vq->mutex);
@@ -405,8 +407,11 @@ static int vhost_vsock_start(struct vhost_vsock *vsock)
return 0;
err_vq:
+ vq->private_data = NULL;
+ mutex_unlock(&vq->mutex);
+
for (i = 0; i < ARRAY_SIZE(vsock->vqs); i++) {
- struct vhost_virtqueue *vq = &vsock->vqs[i];
+ vq = &vsock->vqs[i];
mutex_lock(&vq->mutex);
vq->private_data = NULL;
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index f89245b..68a1135 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -163,17 +163,18 @@ void fb_dealloc_cmap(struct fb_cmap *cmap)
int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
@@ -187,17 +188,18 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ unsigned int tooff = 0, fromoff = 0;
+ size_t size;
if (to->start > from->start)
fromoff = to->start - from->start;
else
tooff = from->start - to->start;
- size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
- size = from->len - fromoff;
- if (size <= 0)
+ if (fromoff >= from->len || tooff >= to->len)
+ return -EINVAL;
+
+ size = min_t(size_t, to->len - tooff, from->len - fromoff);
+ if (size == 0)
return -EINVAL;
size *= sizeof(u16);
diff --git a/drivers/virtio/virtio_mmio.c b/drivers/virtio/virtio_mmio.c
index d47a2fc..c71fde5 100644
--- a/drivers/virtio/virtio_mmio.c
+++ b/drivers/virtio/virtio_mmio.c
@@ -59,6 +59,7 @@
#define pr_fmt(fmt) "virtio-mmio: " fmt
#include <linux/acpi.h>
+#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -498,6 +499,7 @@ static int virtio_mmio_probe(struct platform_device *pdev)
struct virtio_mmio_device *vm_dev;
struct resource *mem;
unsigned long magic;
+ int rc;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
@@ -547,9 +549,25 @@ static int virtio_mmio_probe(struct platform_device *pdev)
}
vm_dev->vdev.id.vendor = readl(vm_dev->base + VIRTIO_MMIO_VENDOR_ID);
- if (vm_dev->version == 1)
+ if (vm_dev->version == 1) {
writel(PAGE_SIZE, vm_dev->base + VIRTIO_MMIO_GUEST_PAGE_SIZE);
+ rc = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+ /*
+ * In the legacy case, ensure our coherently-allocated virtio
+ * ring will be at an address expressable as a 32-bit PFN.
+ */
+ if (!rc)
+ dma_set_coherent_mask(&pdev->dev,
+ DMA_BIT_MASK(32 + PAGE_SHIFT));
+ } else {
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+ }
+ if (rc)
+ rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+ if (rc)
+ dev_warn(&pdev->dev, "Failed to enable 64-bit or 32-bit DMA. Trying to continue, but this might not work.\n");
+
platform_set_drvdata(pdev, vm_dev);
return register_virtio_device(&vm_dev->vdev);
diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
index 409aeaa..7e38ed7 100644
--- a/drivers/virtio/virtio_ring.c
+++ b/drivers/virtio/virtio_ring.c
@@ -159,6 +159,13 @@ static bool vring_use_dma_api(struct virtio_device *vdev)
if (xen_domain())
return true;
+ /*
+ * On ARM-based machines, the DMA ops will do the right thing,
+ * so always use them with legacy devices.
+ */
+ if (IS_ENABLED(CONFIG_ARM) || IS_ENABLED(CONFIG_ARM64))
+ return !virtio_has_feature(vdev, VIRTIO_F_VERSION_1);
+
return false;
}
diff --git a/drivers/vme/bridges/vme_ca91cx42.c b/drivers/vme/bridges/vme_ca91cx42.c
index 6b5ee89..7cc5122 100644
--- a/drivers/vme/bridges/vme_ca91cx42.c
+++ b/drivers/vme/bridges/vme_ca91cx42.c
@@ -464,7 +464,7 @@ static int ca91cx42_slave_get(struct vme_slave_resource *image, int *enabled,
vme_bound = ioread32(bridge->base + CA91CX42_VSI_BD[i]);
pci_offset = ioread32(bridge->base + CA91CX42_VSI_TO[i]);
- *pci_base = (dma_addr_t)vme_base + pci_offset;
+ *pci_base = (dma_addr_t)*vme_base + pci_offset;
*size = (unsigned long long)((vme_bound - *vme_base) + granularity);
*enabled = 0;
diff --git a/drivers/xen/platform-pci.c b/drivers/xen/platform-pci.c
index 112ce42..2a165cc 100644
--- a/drivers/xen/platform-pci.c
+++ b/drivers/xen/platform-pci.c
@@ -42,6 +42,7 @@
static unsigned long platform_mmio;
static unsigned long platform_mmio_alloc;
static unsigned long platform_mmiolen;
+static uint64_t callback_via;
static unsigned long alloc_xen_mmio(unsigned long len)
{
@@ -54,6 +55,51 @@ static unsigned long alloc_xen_mmio(unsigned long len)
return addr;
}
+static uint64_t get_callback_via(struct pci_dev *pdev)
+{
+ u8 pin;
+ int irq;
+
+ irq = pdev->irq;
+ if (irq < 16)
+ return irq; /* ISA IRQ */
+
+ pin = pdev->pin;
+
+ /* We don't know the GSI. Specify the PCI INTx line instead. */
+ return ((uint64_t)0x01 << HVM_CALLBACK_VIA_TYPE_SHIFT) | /* PCI INTx identifier */
+ ((uint64_t)pci_domain_nr(pdev->bus) << 32) |
+ ((uint64_t)pdev->bus->number << 16) |
+ ((uint64_t)(pdev->devfn & 0xff) << 8) |
+ ((uint64_t)(pin - 1) & 3);
+}
+
+static irqreturn_t do_hvm_evtchn_intr(int irq, void *dev_id)
+{
+ xen_hvm_evtchn_do_upcall();
+ return IRQ_HANDLED;
+}
+
+static int xen_allocate_irq(struct pci_dev *pdev)
+{
+ return request_irq(pdev->irq, do_hvm_evtchn_intr,
+ IRQF_NOBALANCING | IRQF_TRIGGER_RISING,
+ "xen-platform-pci", pdev);
+}
+
+static int platform_pci_resume(struct pci_dev *pdev)
+{
+ int err;
+ if (!xen_pv_domain())
+ return 0;
+ err = xen_set_callback_via(callback_via);
+ if (err) {
+ dev_err(&pdev->dev, "platform_pci_resume failure!\n");
+ return err;
+ }
+ return 0;
+}
+
static int platform_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -92,6 +138,28 @@ static int platform_pci_probe(struct pci_dev *pdev,
platform_mmio = mmio_addr;
platform_mmiolen = mmio_len;
+ /*
+ * Xen HVM guests always use the vector callback mechanism.
+ * L1 Dom0 in a nested Xen environment is a PV guest inside in an
+ * HVM environment. It needs the platform-pci driver to get
+ * notifications from L0 Xen, but it cannot use the vector callback
+ * as it is not exported by L1 Xen.
+ */
+ if (xen_pv_domain()) {
+ ret = xen_allocate_irq(pdev);
+ if (ret) {
+ dev_warn(&pdev->dev, "request_irq failed err=%d\n", ret);
+ goto out;
+ }
+ callback_via = get_callback_via(pdev);
+ ret = xen_set_callback_via(callback_via);
+ if (ret) {
+ dev_warn(&pdev->dev, "Unable to set the evtchn callback "
+ "err=%d\n", ret);
+ goto out;
+ }
+ }
+
max_nr_gframes = gnttab_max_grant_frames();
grant_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
ret = gnttab_setup_auto_xlat_frames(grant_frames);
@@ -123,6 +191,9 @@ static struct pci_driver platform_driver = {
.name = DRV_NAME,
.probe = platform_pci_probe,
.id_table = platform_pci_tbl,
+#ifdef CONFIG_PM
+ .resume_early = platform_pci_resume,
+#endif
};
builtin_pci_driver(platform_driver);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index f905d6e..f8afc6d 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -414,9 +414,9 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
if (map == SWIOTLB_MAP_ERROR)
return DMA_ERROR_CODE;
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr, map & ~PAGE_MASK, size, dir, attrs);
- dev_addr = xen_phys_to_bus(map);
/*
* Ensure that the address returned is DMA'ble
@@ -575,13 +575,14 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
sg_dma_len(sgl) = 0;
return 0;
}
+ dev_addr = xen_phys_to_bus(map);
xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
dev_addr,
map & ~PAGE_MASK,
sg->length,
dir,
attrs);
- sg->dma_address = xen_phys_to_bus(map);
+ sg->dma_address = dev_addr;
} else {
/* we are not interested in the dma_addr returned by
* xen_dma_map_page, only in the potential cache flushes executed
diff --git a/fs/Kconfig b/fs/Kconfig
index c2a377c..83eab52 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -38,6 +38,7 @@ config FS_DAX
bool "Direct Access (DAX) support"
depends on MMU
depends on !(ARM || MIPS || SPARC)
+ select FS_IOMAP
help
Direct Access (DAX) can be used on memory-backed block devices.
If the block device supports DAX and the filesystem supports DAX,
diff --git a/fs/afs/file.c b/fs/afs/file.c
index 82897a7..ba7b71f 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -267,7 +267,9 @@ static int afs_readpage(struct file *file, struct page *page)
*/
static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
{
+#ifdef CONFIG_AFS_FSCACHE
struct afs_vnode *vnode = call->reply;
+#endif
struct page *page = req->pages[req->index];
req->pages[req->index] = NULL;
diff --git a/fs/aio.c b/fs/aio.c
index 4ab67e8..873b4ca 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1085,7 +1085,8 @@ static void aio_complete(struct kiocb *kiocb, long res, long res2)
* Tell lockdep we inherited freeze protection from submission
* thread.
*/
- __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ if (S_ISREG(file_inode(file)->i_mode))
+ __sb_writers_acquired(file_inode(file)->i_sb, SB_FREEZE_WRITE);
file_end_write(file);
}
@@ -1525,7 +1526,8 @@ static ssize_t aio_write(struct kiocb *req, struct iocb *iocb, bool vectored,
* by telling it the lock got released so that it doesn't
* complain about held lock when we return to userspace.
*/
- __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
+ if (S_ISREG(file_inode(file)->i_mode))
+ __sb_writers_release(file_inode(file)->i_sb, SB_FREEZE_WRITE);
}
kfree(iovec);
return ret;
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 29a02da..4223702 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -2298,6 +2298,7 @@ static int elf_core_dump(struct coredump_params *cprm)
goto end_coredump;
}
}
+ dump_truncate(cprm);
if (!elf_core_write_extra_data(cprm))
goto end_coredump;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 5db5d13..3c47614 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -331,7 +331,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
struct blk_plug plug;
struct blkdev_dio *dio;
struct bio *bio;
- bool is_read = (iov_iter_rw(iter) == READ);
+ bool is_read = (iov_iter_rw(iter) == READ), is_sync;
loff_t pos = iocb->ki_pos;
blk_qc_t qc = BLK_QC_T_NONE;
int ret;
@@ -344,7 +344,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
bio_get(bio); /* extra ref for the completion handler */
dio = container_of(bio, struct blkdev_dio, bio);
- dio->is_sync = is_sync_kiocb(iocb);
+ dio->is_sync = is_sync = is_sync_kiocb(iocb);
if (dio->is_sync)
dio->waiter = current;
else
@@ -398,7 +398,7 @@ __blkdev_direct_IO(struct kiocb *iocb, struct iov_iter *iter, int nr_pages)
}
blk_finish_plug(&plug);
- if (!dio->is_sync)
+ if (!is_sync)
return -EIOCBQUEUED;
for (;;) {
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 63d1977..ff0b0be 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -273,6 +273,8 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
unsigned long flags;
while (1) {
+ void *wtag;
+
spin_lock_irqsave(lock, flags);
if (list_empty(list))
break;
@@ -299,11 +301,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
spin_unlock_irqrestore(lock, flags);
/*
- * we don't want to call the ordered free functions
- * with the lock held though
+ * We don't want to call the ordered free functions with the
+ * lock held though. Save the work as tag for the trace event,
+ * because the callback could free the structure.
*/
+ wtag = work;
work->ordered_free(work);
- trace_btrfs_all_work_done(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
}
spin_unlock_irqrestore(lock, flags);
}
@@ -311,6 +315,7 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
static void normal_work_helper(struct btrfs_work *work)
{
struct __btrfs_workqueue *wq;
+ void *wtag;
int need_order = 0;
/*
@@ -324,6 +329,8 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func)
need_order = 1;
wq = work->wq;
+ /* Safe for tracepoints in case work gets freed by the callback */
+ wtag = work;
trace_btrfs_work_sched(work);
thresh_exec_hook(wq);
@@ -333,7 +340,7 @@ static void normal_work_helper(struct btrfs_work *work)
run_ordered_work(wq);
}
if (!need_order)
- trace_btrfs_all_work_done(work);
+ trace_btrfs_all_work_done(wq->fs_info, wtag);
}
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func,
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index e97302f..dcd2e79 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -2522,11 +2522,11 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
if (ref && ref->seq &&
btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
spin_unlock(&locked_ref->lock);
- btrfs_delayed_ref_unlock(locked_ref);
spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
delayed_refs->num_heads_ready++;
spin_unlock(&delayed_refs->lock);
+ btrfs_delayed_ref_unlock(locked_ref);
locked_ref = NULL;
cond_resched();
count++;
@@ -2572,7 +2572,10 @@ static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
*/
if (must_insert_reserved)
locked_ref->must_insert_reserved = 1;
+ spin_lock(&delayed_refs->lock);
locked_ref->processing = 0;
+ delayed_refs->num_heads_ready++;
+ spin_unlock(&delayed_refs->lock);
btrfs_debug(fs_info,
"run_delayed_extent_op returned %d",
ret);
@@ -7384,7 +7387,8 @@ btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
spin_unlock(&cluster->refill_lock);
- down_read(&used_bg->data_rwsem);
+ /* We should only have one-level nested. */
+ down_read_nested(&used_bg->data_rwsem, SINGLE_DEPTH_NESTING);
spin_lock(&cluster->refill_lock);
if (used_bg == cluster->block_group)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f2b281a..1e861a0 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3835,10 +3835,7 @@ cache_acl:
break;
case S_IFDIR:
inode->i_fop = &btrfs_dir_file_operations;
- if (root == fs_info->tree_root)
- inode->i_op = &btrfs_dir_ro_inode_operations;
- else
- inode->i_op = &btrfs_dir_inode_operations;
+ inode->i_op = &btrfs_dir_inode_operations;
break;
case S_IFLNK:
inode->i_op = &btrfs_symlink_inode_operations;
@@ -4505,8 +4502,19 @@ search_again:
if (found_type > min_type) {
del_item = 1;
} else {
- if (item_end < new_size)
+ if (item_end < new_size) {
+ /*
+ * With NO_HOLES mode, for the following mapping
+ *
+ * [0-4k][hole][8k-12k]
+ *
+ * if truncating isize down to 6k, it ends up
+ * isize being 8k.
+ */
+ if (btrfs_fs_incompat(root->fs_info, NO_HOLES))
+ last_size = new_size;
break;
+ }
if (found_key.offset >= new_size)
del_item = 1;
else
@@ -5710,6 +5718,7 @@ static struct inode *new_simple_dir(struct super_block *s,
inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
inode->i_op = &btrfs_dir_ro_inode_operations;
+ inode->i_opflags &= ~IOP_XATTR;
inode->i_fop = &simple_dir_operations;
inode->i_mode = S_IFDIR | S_IRUGO | S_IWUSR | S_IXUGO;
inode->i_mtime = current_time(inode);
@@ -7059,7 +7068,7 @@ insert:
write_unlock(&em_tree->lock);
out:
- trace_btrfs_get_extent(root, em);
+ trace_btrfs_get_extent(root, inode, em);
btrfs_free_path(path);
if (trans) {
@@ -7215,7 +7224,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
struct extent_map *em = NULL;
int ret;
- down_read(&BTRFS_I(inode)->dio_sem);
if (type != BTRFS_ORDERED_NOCOW) {
em = create_pinned_em(inode, start, len, orig_start,
block_start, block_len, orig_block_len,
@@ -7234,7 +7242,6 @@ static struct extent_map *btrfs_create_dio_extent(struct inode *inode,
em = ERR_PTR(ret);
}
out:
- up_read(&BTRFS_I(inode)->dio_sem);
return em;
}
@@ -7623,11 +7630,18 @@ static void adjust_dio_outstanding_extents(struct inode *inode,
* within our reservation, otherwise we need to adjust our inode
* counter appropriately.
*/
- if (dio_data->outstanding_extents) {
+ if (dio_data->outstanding_extents >= num_extents) {
dio_data->outstanding_extents -= num_extents;
} else {
+ /*
+ * If dio write length has been split due to no large enough
+ * contiguous space, we need to compensate our inode counter
+ * appropriately.
+ */
+ u64 num_needed = num_extents - dio_data->outstanding_extents;
+
spin_lock(&BTRFS_I(inode)->lock);
- BTRFS_I(inode)->outstanding_extents += num_extents;
+ BTRFS_I(inode)->outstanding_extents += num_needed;
spin_unlock(&BTRFS_I(inode)->lock);
}
}
@@ -8685,6 +8699,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
dio_data.unsubmitted_oe_range_start = (u64)offset;
dio_data.unsubmitted_oe_range_end = (u64)offset;
current->journal_info = &dio_data;
+ down_read(&BTRFS_I(inode)->dio_sem);
} else if (test_bit(BTRFS_INODE_READDIO_NEED_LOCK,
&BTRFS_I(inode)->runtime_flags)) {
inode_dio_end(inode);
@@ -8697,6 +8712,7 @@ static ssize_t btrfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
iter, btrfs_get_blocks_direct, NULL,
btrfs_submit_direct, flags);
if (iov_iter_rw(iter) == WRITE) {
+ up_read(&BTRFS_I(inode)->dio_sem);
current->journal_info = NULL;
if (ret < 0 && ret != -EIOCBQUEUED) {
if (dio_data.reserve)
@@ -9205,6 +9221,7 @@ static int btrfs_truncate(struct inode *inode)
break;
}
+ btrfs_block_rsv_release(fs_info, rsv, -1);
ret = btrfs_block_rsv_migrate(&fs_info->trans_block_rsv,
rsv, min_size, 0);
BUG_ON(ret); /* shouldn't happen */
@@ -10572,8 +10589,6 @@ static const struct inode_operations btrfs_dir_inode_operations = {
static const struct inode_operations btrfs_dir_ro_inode_operations = {
.lookup = btrfs_lookup,
.permission = btrfs_permission,
- .get_acl = btrfs_get_acl,
- .set_acl = btrfs_set_acl,
.update_time = btrfs_update_time,
};
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index f10bf52..eeffff8 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -37,6 +37,7 @@
*/
#define LOG_INODE_ALL 0
#define LOG_INODE_EXISTS 1
+#define LOG_OTHER_INODE 2
/*
* directory trouble cases
@@ -4641,7 +4642,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
if (S_ISDIR(inode->i_mode) ||
(!test_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags) &&
- inode_only == LOG_INODE_EXISTS))
+ inode_only >= LOG_INODE_EXISTS))
max_key.type = BTRFS_XATTR_ITEM_KEY;
else
max_key.type = (u8)-1;
@@ -4665,7 +4666,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
return ret;
}
- mutex_lock(&BTRFS_I(inode)->log_mutex);
+ if (inode_only == LOG_OTHER_INODE) {
+ inode_only = LOG_INODE_EXISTS;
+ mutex_lock_nested(&BTRFS_I(inode)->log_mutex,
+ SINGLE_DEPTH_NESTING);
+ } else {
+ mutex_lock(&BTRFS_I(inode)->log_mutex);
+ }
/*
* a brute force approach to making sure we get the most uptodate
@@ -4817,7 +4824,7 @@ again:
* unpin it.
*/
err = btrfs_log_inode(trans, root, other_inode,
- LOG_INODE_EXISTS,
+ LOG_OTHER_INODE,
0, LLONG_MAX, ctx);
iput(other_inode);
if (err)
diff --git a/fs/btrfs/uuid-tree.c b/fs/btrfs/uuid-tree.c
index 161342b..726f928 100644
--- a/fs/btrfs/uuid-tree.c
+++ b/fs/btrfs/uuid-tree.c
@@ -352,7 +352,5 @@ skip:
out:
btrfs_free_path(path);
- if (ret)
- btrfs_warn(fs_info, "btrfs_uuid_tree_iterate failed %d", ret);
- return 0;
+ return ret;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 9cd0c0e..e4b066c 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -502,9 +502,9 @@ static struct ceph_snap_context *get_oldest_context(struct inode *inode,
dout(" head snapc %p has %d dirty pages\n",
snapc, ci->i_wrbuffer_ref_head);
if (truncate_size)
- *truncate_size = capsnap->truncate_size;
+ *truncate_size = ci->i_truncate_size;
if (truncate_seq)
- *truncate_seq = capsnap->truncate_seq;
+ *truncate_seq = ci->i_truncate_seq;
}
spin_unlock(&ci->i_ceph_lock);
return snapc;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index baea866..94fd76d 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2591,8 +2591,13 @@ int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
add_wait_queue(&ci->i_cap_wq, &wait);
while (!try_get_cap_refs(ci, need, want, endoff,
- true, &_got, &err))
+ true, &_got, &err)) {
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
+ }
remove_wait_queue(&ci->i_cap_wq, &wait);
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index d7a9369..8ab1fdf 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -1230,7 +1230,8 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
struct ceph_mds_client *mdsc =
ceph_sb_to_client(dir->i_sb)->mdsc;
struct ceph_mds_request *req;
- int op, mask, err;
+ int op, err;
+ u32 mask;
if (flags & LOOKUP_RCU)
return -ECHILD;
@@ -1245,7 +1246,7 @@ static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
mask = CEPH_STAT_CAP_INODE | CEPH_CAP_AUTH_SHARED;
if (ceph_security_xattr_wanted(dir))
mask |= CEPH_CAP_XATTR_SHARED;
- req->r_args.getattr.mask = mask;
+ req->r_args.getattr.mask = cpu_to_le32(mask);
err = ceph_mdsc_do_request(mdsc, NULL, req);
switch (err) {
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 398e532..5e659d0 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -305,7 +305,8 @@ static int frag_tree_split_cmp(const void *l, const void *r)
{
struct ceph_frag_tree_split *ls = (struct ceph_frag_tree_split*)l;
struct ceph_frag_tree_split *rs = (struct ceph_frag_tree_split*)r;
- return ceph_frag_compare(ls->frag, rs->frag);
+ return ceph_frag_compare(le32_to_cpu(ls->frag),
+ le32_to_cpu(rs->frag));
}
static bool is_frag_child(u32 f, struct ceph_inode_frag *frag)
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index 4f49253..c9d2e55 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -288,12 +288,13 @@ static int parse_reply_info_extra(void **p, void *end,
struct ceph_mds_reply_info_parsed *info,
u64 features)
{
- if (info->head->op == CEPH_MDS_OP_GETFILELOCK)
+ u32 op = le32_to_cpu(info->head->op);
+
+ if (op == CEPH_MDS_OP_GETFILELOCK)
return parse_reply_info_filelock(p, end, info, features);
- else if (info->head->op == CEPH_MDS_OP_READDIR ||
- info->head->op == CEPH_MDS_OP_LSSNAP)
+ else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
return parse_reply_info_dir(p, end, info, features);
- else if (info->head->op == CEPH_MDS_OP_CREATE)
+ else if (op == CEPH_MDS_OP_CREATE)
return parse_reply_info_create(p, end, info, features);
else
return -EIO;
@@ -2106,6 +2107,11 @@ static int __do_request(struct ceph_mds_client *mdsc,
dout("do_request mdsmap err %d\n", err);
goto finish;
}
+ if (mdsc->mdsmap->m_epoch == 0) {
+ dout("do_request no mdsmap, waiting for map\n");
+ list_add(&req->r_wait, &mdsc->waiting_for_map);
+ goto finish;
+ }
if (!(mdsc->fsc->mount_options->flags &
CEPH_MOUNT_OPT_MOUNTWAIT) &&
!ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
diff --git a/fs/coredump.c b/fs/coredump.c
index e525b60..ae6b056 100644
--- a/fs/coredump.c
+++ b/fs/coredump.c
@@ -833,3 +833,21 @@ int dump_align(struct coredump_params *cprm, int align)
return mod ? dump_skip(cprm, align - mod) : 1;
}
EXPORT_SYMBOL(dump_align);
+
+/*
+ * Ensures that file size is big enough to contain the current file
+ * postion. This prevents gdb from complaining about a truncated file
+ * if the last "write" to the file was dump_skip.
+ */
+void dump_truncate(struct coredump_params *cprm)
+{
+ struct file *file = cprm->file;
+ loff_t offset;
+
+ if (file->f_op->llseek && file->f_op->llseek != no_llseek) {
+ offset = file->f_op->llseek(file, 0, SEEK_CUR);
+ if (i_size_read(file->f_mapping->host) < offset)
+ do_truncate(file->f_path.dentry, offset, 0, file);
+ }
+}
+EXPORT_SYMBOL(dump_truncate);
diff --git a/fs/dax.c b/fs/dax.c
index 5c74f60..3af2da5 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -691,8 +691,8 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
pgoff_t index, unsigned long pfn)
{
struct vm_area_struct *vma;
- pte_t *ptep;
- pte_t pte;
+ pte_t pte, *ptep = NULL;
+ pmd_t *pmdp = NULL;
spinlock_t *ptl;
bool changed;
@@ -707,21 +707,42 @@ static void dax_mapping_entry_mkclean(struct address_space *mapping,
address = pgoff_address(index, vma);
changed = false;
- if (follow_pte(vma->vm_mm, address, &ptep, &ptl))
+ if (follow_pte_pmd(vma->vm_mm, address, &ptep, &pmdp, &ptl))
continue;
- if (pfn != pte_pfn(*ptep))
- goto unlock;
- if (!pte_dirty(*ptep) && !pte_write(*ptep))
- goto unlock;
- flush_cache_page(vma, address, pfn);
- pte = ptep_clear_flush(vma, address, ptep);
- pte = pte_wrprotect(pte);
- pte = pte_mkclean(pte);
- set_pte_at(vma->vm_mm, address, ptep, pte);
- changed = true;
-unlock:
- pte_unmap_unlock(ptep, ptl);
+ if (pmdp) {
+#ifdef CONFIG_FS_DAX_PMD
+ pmd_t pmd;
+
+ if (pfn != pmd_pfn(*pmdp))
+ goto unlock_pmd;
+ if (!pmd_dirty(*pmdp) && !pmd_write(*pmdp))
+ goto unlock_pmd;
+
+ flush_cache_page(vma, address, pfn);
+ pmd = pmdp_huge_clear_flush(vma, address, pmdp);
+ pmd = pmd_wrprotect(pmd);
+ pmd = pmd_mkclean(pmd);
+ set_pmd_at(vma->vm_mm, address, pmdp, pmd);
+ changed = true;
+unlock_pmd:
+ spin_unlock(ptl);
+#endif
+ } else {
+ if (pfn != pte_pfn(*ptep))
+ goto unlock_pte;
+ if (!pte_dirty(*ptep) && !pte_write(*ptep))
+ goto unlock_pte;
+
+ flush_cache_page(vma, address, pfn);
+ pte = ptep_clear_flush(vma, address, ptep);
+ pte = pte_wrprotect(pte);
+ pte = pte_mkclean(pte);
+ set_pte_at(vma->vm_mm, address, ptep, pte);
+ changed = true;
+unlock_pte:
+ pte_unmap_unlock(ptep, ptl);
+ }
if (changed)
mmu_notifier_invalidate_page(vma->vm_mm, address);
@@ -969,7 +990,6 @@ int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
}
EXPORT_SYMBOL_GPL(__dax_zero_page_range);
-#ifdef CONFIG_FS_IOMAP
static sector_t dax_iomap_sector(struct iomap *iomap, loff_t pos)
{
return iomap->blkno + (((pos & PAGE_MASK) - iomap->offset) >> 9);
@@ -1407,4 +1427,3 @@ int dax_iomap_pmd_fault(struct vm_area_struct *vma, unsigned long address,
}
EXPORT_SYMBOL_GPL(dax_iomap_pmd_fault);
#endif /* CONFIG_FS_DAX_PMD */
-#endif /* CONFIG_FS_IOMAP */
diff --git a/fs/dcache.c b/fs/dcache.c
index 769903d..95d71ed 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1336,8 +1336,11 @@ int d_set_mounted(struct dentry *dentry)
}
spin_lock(&dentry->d_lock);
if (!d_unlinked(dentry)) {
- dentry->d_flags |= DCACHE_MOUNTED;
- ret = 0;
+ ret = -EBUSY;
+ if (!d_mountpoint(dentry)) {
+ dentry->d_flags |= DCACHE_MOUNTED;
+ ret = 0;
+ }
}
spin_unlock(&dentry->d_lock);
out:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index aeae8c0..c87bae4 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -906,6 +906,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
struct buffer_head *map_bh)
{
const unsigned blkbits = sdio->blkbits;
+ const unsigned i_blkbits = blkbits + sdio->blkfactor;
int ret = 0;
while (sdio->block_in_file < sdio->final_block_in_request) {
@@ -949,7 +950,7 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
clean_bdev_aliases(
map_bh->b_bdev,
map_bh->b_blocknr,
- map_bh->b_size >> blkbits);
+ map_bh->b_size >> i_blkbits);
}
if (!sdio->blkfactor)
diff --git a/fs/ext2/Kconfig b/fs/ext2/Kconfig
index 36bea5a..c634874e 100644
--- a/fs/ext2/Kconfig
+++ b/fs/ext2/Kconfig
@@ -1,6 +1,5 @@
config EXT2_FS
tristate "Second extended fs support"
- select FS_IOMAP if FS_DAX
help
Ext2 is a standard Linux file system for hard disks.
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 7b90691..e38039f 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -37,7 +37,6 @@ config EXT4_FS
select CRC16
select CRYPTO
select CRYPTO_CRC32C
- select FS_IOMAP if FS_DAX
help
This is the next generation of the ext3 filesystem.
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 0738f48..0d88024 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -713,8 +713,8 @@ static int __f2fs_issue_discard_zone(struct f2fs_sb_info *sbi,
}
sector = SECTOR_FROM_BLOCK(blkstart);
- if (sector & (bdev_zone_size(bdev) - 1) ||
- nr_sects != bdev_zone_size(bdev)) {
+ if (sector & (bdev_zone_sectors(bdev) - 1) ||
+ nr_sects != bdev_zone_sectors(bdev)) {
f2fs_msg(sbi->sb, KERN_INFO,
"(%d) %s: Unaligned discard attempted (block %x + %x)",
devi, sbi->s_ndevs ? FDEV(devi).path: "",
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 702638e..46fd30d 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1553,16 +1553,16 @@ static int init_blkz_info(struct f2fs_sb_info *sbi, int devi)
return 0;
if (sbi->blocks_per_blkz && sbi->blocks_per_blkz !=
- SECTOR_TO_BLOCK(bdev_zone_size(bdev)))
+ SECTOR_TO_BLOCK(bdev_zone_sectors(bdev)))
return -EINVAL;
- sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_size(bdev));
+ sbi->blocks_per_blkz = SECTOR_TO_BLOCK(bdev_zone_sectors(bdev));
if (sbi->log_blocks_per_blkz && sbi->log_blocks_per_blkz !=
__ilog2_u32(sbi->blocks_per_blkz))
return -EINVAL;
sbi->log_blocks_per_blkz = __ilog2_u32(sbi->blocks_per_blkz);
FDEV(devi).nr_blkz = SECTOR_TO_BLOCK(nr_sectors) >>
sbi->log_blocks_per_blkz;
- if (nr_sectors & (bdev_zone_size(bdev) - 1))
+ if (nr_sectors & (bdev_zone_sectors(bdev) - 1))
FDEV(devi).nr_blkz++;
FDEV(devi).blkz_type = kmalloc(FDEV(devi).nr_blkz, GFP_KERNEL);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 70ea57c..4e06a27 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2025,7 +2025,6 @@ static void end_requests(struct fuse_conn *fc, struct list_head *head)
struct fuse_req *req;
req = list_entry(head->next, struct fuse_req, list);
req->out.h.error = -ECONNABORTED;
- clear_bit(FR_PENDING, &req->flags);
clear_bit(FR_SENT, &req->flags);
list_del_init(&req->list);
request_end(fc, req);
@@ -2103,6 +2102,8 @@ void fuse_abort_conn(struct fuse_conn *fc)
spin_lock(&fiq->waitq.lock);
fiq->connected = 0;
list_splice_init(&fiq->pending, &to_end2);
+ list_for_each_entry(req, &to_end2, list)
+ clear_bit(FR_PENDING, &req->flags);
while (forget_pending(fiq))
kfree(dequeue_forget(fiq, 1, NULL));
wake_up_all_locked(&fiq->waitq);
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 1f7c732..811fd89 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -68,7 +68,7 @@ static u64 time_to_jiffies(u64 sec, u32 nsec)
if (sec || nsec) {
struct timespec64 ts = {
sec,
- max_t(u32, nsec, NSEC_PER_SEC - 1)
+ min_t(u32, nsec, NSEC_PER_SEC - 1)
};
return get_jiffies_64() + timespec64_to_jiffies(&ts);
diff --git a/fs/libfs.c b/fs/libfs.c
index e973cd5..28d6f35 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -245,7 +245,8 @@ struct dentry *mount_pseudo_xattr(struct file_system_type *fs_type, char *name,
struct inode *root;
struct qstr d_name = QSTR_INIT(name, strlen(name));
- s = sget(fs_type, NULL, set_anon_super, MS_NOUSER, NULL);
+ s = sget_userns(fs_type, NULL, set_anon_super, MS_KERNMOUNT|MS_NOUSER,
+ &init_user_ns, NULL);
if (IS_ERR(s))
return ERR_CAST(s);
diff --git a/fs/namespace.c b/fs/namespace.c
index b5b1259..487ba30 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -742,26 +742,50 @@ static struct mountpoint *lookup_mountpoint(struct dentry *dentry)
return NULL;
}
-static struct mountpoint *new_mountpoint(struct dentry *dentry)
+static struct mountpoint *get_mountpoint(struct dentry *dentry)
{
- struct hlist_head *chain = mp_hash(dentry);
- struct mountpoint *mp;
+ struct mountpoint *mp, *new = NULL;
int ret;
- mp = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
- if (!mp)
+ if (d_mountpoint(dentry)) {
+mountpoint:
+ read_seqlock_excl(&mount_lock);
+ mp = lookup_mountpoint(dentry);
+ read_sequnlock_excl(&mount_lock);
+ if (mp)
+ goto done;
+ }
+
+ if (!new)
+ new = kmalloc(sizeof(struct mountpoint), GFP_KERNEL);
+ if (!new)
return ERR_PTR(-ENOMEM);
+
+ /* Exactly one processes may set d_mounted */
ret = d_set_mounted(dentry);
- if (ret) {
- kfree(mp);
- return ERR_PTR(ret);
- }
- mp->m_dentry = dentry;
- mp->m_count = 1;
- hlist_add_head(&mp->m_hash, chain);
- INIT_HLIST_HEAD(&mp->m_list);
+ /* Someone else set d_mounted? */
+ if (ret == -EBUSY)
+ goto mountpoint;
+
+ /* The dentry is not available as a mountpoint? */
+ mp = ERR_PTR(ret);
+ if (ret)
+ goto done;
+
+ /* Add the new mountpoint to the hash table */
+ read_seqlock_excl(&mount_lock);
+ new->m_dentry = dentry;
+ new->m_count = 1;
+ hlist_add_head(&new->m_hash, mp_hash(dentry));
+ INIT_HLIST_HEAD(&new->m_list);
+ read_sequnlock_excl(&mount_lock);
+
+ mp = new;
+ new = NULL;
+done:
+ kfree(new);
return mp;
}
@@ -1595,11 +1619,11 @@ void __detach_mounts(struct dentry *dentry)
struct mount *mnt;
namespace_lock();
+ lock_mount_hash();
mp = lookup_mountpoint(dentry);
if (IS_ERR_OR_NULL(mp))
goto out_unlock;
- lock_mount_hash();
event++;
while (!hlist_empty(&mp->m_list)) {
mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
@@ -1609,9 +1633,9 @@ void __detach_mounts(struct dentry *dentry)
}
else umount_tree(mnt, UMOUNT_CONNECTED);
}
- unlock_mount_hash();
put_mountpoint(mp);
out_unlock:
+ unlock_mount_hash();
namespace_unlock();
}
@@ -2038,9 +2062,7 @@ retry:
namespace_lock();
mnt = lookup_mnt(path);
if (likely(!mnt)) {
- struct mountpoint *mp = lookup_mountpoint(dentry);
- if (!mp)
- mp = new_mountpoint(dentry);
+ struct mountpoint *mp = get_mountpoint(dentry);
if (IS_ERR(mp)) {
namespace_unlock();
inode_unlock(dentry->d_inode);
@@ -2059,7 +2081,11 @@ retry:
static void unlock_mount(struct mountpoint *where)
{
struct dentry *dentry = where->m_dentry;
+
+ read_seqlock_excl(&mount_lock);
put_mountpoint(where);
+ read_sequnlock_excl(&mount_lock);
+
namespace_unlock();
inode_unlock(dentry->d_inode);
}
@@ -3135,9 +3161,9 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
touch_mnt_namespace(current->nsproxy->mnt_ns);
/* A moved mount should not expire automatically */
list_del_init(&new_mnt->mnt_expire);
+ put_mountpoint(root_mp);
unlock_mount_hash();
chroot_fs_refs(&root, &new);
- put_mountpoint(root_mp);
error = 0;
out4:
unlock_mount(old_mp);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 6dcbc5d..ecc1516 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -38,7 +38,6 @@
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/errno.h>
-#include <linux/file.h>
#include <linux/string.h>
#include <linux/ratelimit.h>
#include <linux/printk.h>
@@ -1083,7 +1082,8 @@ int nfs4_call_sync(struct rpc_clnt *clnt,
return nfs4_call_sync_sequence(clnt, server, msg, args, res);
}
-static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
+static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo,
+ unsigned long timestamp)
{
struct nfs_inode *nfsi = NFS_I(dir);
@@ -1099,6 +1099,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
NFS_INO_INVALID_ACL;
}
dir->i_version = cinfo->after;
+ nfsi->read_cache_jiffies = timestamp;
nfsi->attr_gencount = nfs_inc_attr_generation_counter();
nfs_fscache_invalidate(dir);
spin_unlock(&dir->i_lock);
@@ -2391,11 +2392,13 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
nfs_fattr_map_and_free_names(server, &data->f_attr);
if (o_arg->open_flags & O_CREAT) {
- update_changeattr(dir, &o_res->cinfo);
if (o_arg->open_flags & O_EXCL)
data->file_created = 1;
else if (o_res->cinfo.before != o_res->cinfo.after)
data->file_created = 1;
+ if (data->file_created || dir->i_version != o_res->cinfo.after)
+ update_changeattr(dir, &o_res->cinfo,
+ o_res->f_attr->time_start);
}
if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
server->caps &= ~NFS_CAP_POSIX_LOCK;
@@ -4073,11 +4076,12 @@ static int _nfs4_proc_remove(struct inode *dir, const struct qstr *name)
.rpc_argp = &args,
.rpc_resp = &res,
};
+ unsigned long timestamp = jiffies;
int status;
status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
if (status == 0)
- update_changeattr(dir, &res.cinfo);
+ update_changeattr(dir, &res.cinfo, timestamp);
return status;
}
@@ -4125,7 +4129,8 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
if (nfs4_async_handle_error(task, res->server, NULL,
&data->timeout) == -EAGAIN)
return 0;
- update_changeattr(dir, &res->cinfo);
+ if (task->tk_status == 0)
+ update_changeattr(dir, &res->cinfo, res->dir_attr->time_start);
return 1;
}
@@ -4159,8 +4164,11 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
if (nfs4_async_handle_error(task, res->server, NULL, &data->timeout) == -EAGAIN)
return 0;
- update_changeattr(old_dir, &res->old_cinfo);
- update_changeattr(new_dir, &res->new_cinfo);
+ if (task->tk_status == 0) {
+ update_changeattr(old_dir, &res->old_cinfo, res->old_fattr->time_start);
+ if (new_dir != old_dir)
+ update_changeattr(new_dir, &res->new_cinfo, res->new_fattr->time_start);
+ }
return 1;
}
@@ -4197,7 +4205,7 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, const struct
status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
if (!status) {
- update_changeattr(dir, &res.cinfo);
+ update_changeattr(dir, &res.cinfo, res.fattr->time_start);
status = nfs_post_op_update_inode(inode, res.fattr);
if (!status)
nfs_setsecurity(inode, res.fattr, res.label);
@@ -4272,7 +4280,8 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
int status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &data->msg,
&data->arg.seq_args, &data->res.seq_res, 1);
if (status == 0) {
- update_changeattr(dir, &data->res.dir_cinfo);
+ update_changeattr(dir, &data->res.dir_cinfo,
+ data->res.fattr->time_start);
status = nfs_instantiate(dentry, data->res.fh, data->res.fattr, data->res.label);
}
return status;
@@ -6127,7 +6136,6 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->server = server;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
- get_file(fl->fl_file);
memcpy(&p->fl, fl, sizeof(p->fl));
return p;
out_free_seqid:
@@ -6240,7 +6248,6 @@ static void nfs4_lock_release(void *calldata)
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
- fput(data->fl.fl_file);
kfree(data);
dprintk("%s: done!\n", __func__);
}
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 1d152f4..90e6193 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -1729,7 +1729,6 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
break;
case -NFS4ERR_STALE_CLIENTID:
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
- nfs4_state_clear_reclaim_reboot(clp);
nfs4_state_start_reclaim_reboot(clp);
break;
case -NFS4ERR_EXPIRED:
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 7ecf16b..8fae53c 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2440,7 +2440,9 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
p++; /* to be backfilled later */
if (bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
- u32 *supp = nfsd_suppattrs[minorversion];
+ u32 supp[3];
+
+ memcpy(supp, nfsd_suppattrs[minorversion], sizeof(supp));
if (!IS_POSIXACL(dentry->d_inode))
supp[0] &= ~FATTR4_WORD0_ACL;
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 83d576f..77d1632 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -3303,6 +3303,16 @@ static int ocfs2_downconvert_lock(struct ocfs2_super *osb,
mlog(ML_BASTS, "lockres %s, level %d => %d\n", lockres->l_name,
lockres->l_level, new_level);
+ /*
+ * On DLM_LKF_VALBLK, fsdlm behaves differently with o2cb. It always
+ * expects DLM_LKF_VALBLK being set if the LKB has LVB, so that
+ * we can recover correctly from node failure. Otherwise, we may get
+ * invalid LVB in LKB, but without DLM_SBF_VALNOTVALID being set.
+ */
+ if (!ocfs2_is_o2cb_active() &&
+ lockres->l_ops->flags & LOCK_TYPE_USES_LVB)
+ lvb = 1;
+
if (lvb)
dlm_flags |= DLM_LKF_VALBLK;
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 52c07346b..8203590 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -48,6 +48,12 @@ static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
*/
static struct ocfs2_stack_plugin *active_stack;
+inline int ocfs2_is_o2cb_active(void)
+{
+ return !strcmp(active_stack->sp_name, OCFS2_STACK_PLUGIN_O2CB);
+}
+EXPORT_SYMBOL_GPL(ocfs2_is_o2cb_active);
+
static struct ocfs2_stack_plugin *ocfs2_stack_lookup(const char *name)
{
struct ocfs2_stack_plugin *p;
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index f2dce10..e3036e1 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -298,6 +298,9 @@ void ocfs2_stack_glue_set_max_proto_version(struct ocfs2_protocol_version *max_p
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+/* In ocfs2_downconvert_lock(), we need to know which stack we are using */
+int ocfs2_is_o2cb_active(void);
+
extern struct kset *ocfs2_kset;
#endif /* STACKGLUE_H */
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index 9ad48d9..023bb0b 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -154,29 +154,38 @@ out_err:
static int ovl_lookup_layer(struct dentry *base, struct ovl_lookup_data *d,
struct dentry **ret)
{
- const char *s = d->name.name;
+ /* Counting down from the end, since the prefix can change */
+ size_t rem = d->name.len - 1;
struct dentry *dentry = NULL;
int err;
- if (*s != '/')
+ if (d->name.name[0] != '/')
return ovl_lookup_single(base, d, d->name.name, d->name.len,
0, "", ret);
- while (*s++ == '/' && !IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+ while (!IS_ERR_OR_NULL(base) && d_can_lookup(base)) {
+ const char *s = d->name.name + d->name.len - rem;
const char *next = strchrnul(s, '/');
- size_t slen = strlen(s);
+ size_t thislen = next - s;
+ bool end = !next[0];
- if (WARN_ON(slen > d->name.len) ||
- WARN_ON(strcmp(d->name.name + d->name.len - slen, s)))
+ /* Verify we did not go off the rails */
+ if (WARN_ON(s[-1] != '/'))
return -EIO;
- err = ovl_lookup_single(base, d, s, next - s,
- d->name.len - slen, next, &base);
+ err = ovl_lookup_single(base, d, s, thislen,
+ d->name.len - rem, next, &base);
dput(dentry);
if (err)
return err;
dentry = base;
- s = next;
+ if (end)
+ break;
+
+ rem -= thislen + 1;
+
+ if (WARN_ON(rem >= d->name.len))
+ return -EIO;
}
*ret = dentry;
return 0;
diff --git a/fs/posix_acl.c b/fs/posix_acl.c
index 5955220..c9d48dc 100644
--- a/fs/posix_acl.c
+++ b/fs/posix_acl.c
@@ -922,11 +922,10 @@ int simple_set_acl(struct inode *inode, struct posix_acl *acl, int type)
int error;
if (type == ACL_TYPE_ACCESS) {
- error = posix_acl_equiv_mode(acl, &inode->i_mode);
- if (error < 0)
- return 0;
- if (error == 0)
- acl = NULL;
+ error = posix_acl_update_mode(inode,
+ &inode->i_mode, &acl);
+ if (error)
+ return error;
}
inode->i_ctime = current_time(inode);
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 8e7e61b..87c9a9a 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3179,6 +3179,8 @@ int proc_pid_readdir(struct file *file, struct dir_context *ctx)
iter.tgid += 1, iter = next_tgid(ns, iter)) {
char name[PROC_NUMBUF];
int len;
+
+ cond_resched();
if (!has_pid_permissions(ns, iter.task, 2))
continue;
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 55313d9..d4e37ac 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -709,7 +709,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
ctl_dir = container_of(head, struct ctl_dir, header);
if (!dir_emit_dots(file, ctx))
- return 0;
+ goto out;
pos = 2;
@@ -719,6 +719,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
break;
}
}
+out:
sysctl_head_finish(head);
return 0;
}
diff --git a/fs/romfs/super.c b/fs/romfs/super.c
index d0f8a38..0186fe6 100644
--- a/fs/romfs/super.c
+++ b/fs/romfs/super.c
@@ -74,6 +74,7 @@
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/uaccess.h>
+#include <linux/major.h>
#include "internal.h"
static struct kmem_cache *romfs_inode_cachep;
@@ -416,7 +417,22 @@ static void romfs_destroy_inode(struct inode *inode)
static int romfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
- u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
+ u64 id = 0;
+
+ /* When calling huge_encode_dev(),
+ * use sb->s_bdev->bd_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK defined
+ * use sb->s_dev when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD defined
+ * leave id as 0 when,
+ * - CONFIG_ROMFS_ON_BLOCK undefined and
+ * - CONFIG_ROMFS_ON_MTD undefined
+ */
+ if (sb->s_bdev)
+ id = huge_encode_dev(sb->s_bdev->bd_dev);
+ else if (sb->s_dev)
+ id = huge_encode_dev(sb->s_dev);
buf->f_type = ROMFS_MAGIC;
buf->f_namelen = ROMFS_MAXFN;
@@ -489,6 +505,11 @@ static int romfs_fill_super(struct super_block *sb, void *data, int silent)
sb->s_flags |= MS_RDONLY | MS_NOATIME;
sb->s_op = &romfs_super_ops;
+#ifdef CONFIG_ROMFS_ON_MTD
+ /* Use same dev ID from the underlying mtdblock device */
+ if (sb->s_mtd)
+ sb->s_dev = MKDEV(MTD_BLOCK_MAJOR, sb->s_mtd->index);
+#endif
/* read the image superblock and check it */
rsb = kmalloc(512, GFP_KERNEL);
if (!rsb)
diff --git a/fs/ubifs/Kconfig b/fs/ubifs/Kconfig
index 0a908ae..b0d0623 100644
--- a/fs/ubifs/Kconfig
+++ b/fs/ubifs/Kconfig
@@ -53,7 +53,7 @@ config UBIFS_ATIME_SUPPORT
config UBIFS_FS_ENCRYPTION
bool "UBIFS Encryption"
- depends on UBIFS_FS
+ depends on UBIFS_FS && BLOCK
select FS_ENCRYPTION
default n
help
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 1c5331a..528369f 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -390,16 +390,6 @@ static int do_tmpfile(struct inode *dir, struct dentry *dentry,
dbg_gen("dent '%pd', mode %#hx in dir ino %lu",
dentry, mode, dir->i_ino);
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- return err;
-
- if (!fscrypt_has_encryption_key(dir)) {
- return -EPERM;
- }
- }
-
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
return err;
@@ -741,17 +731,9 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
ubifs_assert(inode_is_locked(dir));
ubifs_assert(inode_is_locked(inode));
- if (ubifs_crypt_is_encrypted(dir)) {
- if (!fscrypt_has_permitted_context(dir, inode))
- return -EPERM;
-
- err = fscrypt_get_encryption_info(inode);
- if (err)
- return err;
-
- if (!fscrypt_has_encryption_key(inode))
- return -EPERM;
- }
+ if (ubifs_crypt_is_encrypted(dir) &&
+ !fscrypt_has_permitted_context(dir, inode))
+ return -EPERM;
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
@@ -1000,17 +982,6 @@ static int ubifs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
if (err)
return err;
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- goto out_budg;
-
- if (!fscrypt_has_encryption_key(dir)) {
- err = -EPERM;
- goto out_budg;
- }
- }
-
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
goto out_budg;
@@ -1096,17 +1067,6 @@ static int ubifs_mknod(struct inode *dir, struct dentry *dentry,
return err;
}
- if (ubifs_crypt_is_encrypted(dir)) {
- err = fscrypt_get_encryption_info(dir);
- if (err)
- goto out_budg;
-
- if (!fscrypt_has_encryption_key(dir)) {
- err = -EPERM;
- goto out_budg;
- }
- }
-
err = fscrypt_setup_filename(dir, &dentry->d_name, 0, &nm);
if (err)
goto out_budg;
@@ -1231,18 +1191,6 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
goto out_inode;
}
- err = fscrypt_get_encryption_info(inode);
- if (err) {
- kfree(sd);
- goto out_inode;
- }
-
- if (!fscrypt_has_encryption_key(inode)) {
- kfree(sd);
- err = -EPERM;
- goto out_inode;
- }
-
ostr.name = sd->encrypted_path;
ostr.len = disk_link.len;
diff --git a/fs/ubifs/ioctl.c b/fs/ubifs/ioctl.c
index 78d7136..da519ba 100644
--- a/fs/ubifs/ioctl.c
+++ b/fs/ubifs/ioctl.c
@@ -217,6 +217,9 @@ long ubifs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case FS_IOC32_SETFLAGS:
cmd = FS_IOC_SETFLAGS;
break;
+ case FS_IOC_SET_ENCRYPTION_POLICY:
+ case FS_IOC_GET_ENCRYPTION_POLICY:
+ break;
default:
return -ENOIOCTLCMD;
}
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index a459211..294519b 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -744,6 +744,7 @@ int ubifs_jnl_write_data(struct ubifs_info *c, const struct inode *inode,
} else {
data->compr_size = 0;
+ out_len = compr_len;
}
dlen = UBIFS_DATA_NODE_SZ + out_len;
@@ -1319,6 +1320,7 @@ static int truncate_data_node(const struct ubifs_info *c, const struct inode *in
dn->compr_type = cpu_to_le16(compr_type);
dn->size = cpu_to_le32(*new_len);
*new_len = UBIFS_DATA_NODE_SZ + out_len;
+ err = 0;
out:
kfree(buf);
return err;
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index 74ae2de..709aa09 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -34,6 +34,11 @@
#include <linux/slab.h>
#include "ubifs.h"
+static int try_read_node(const struct ubifs_info *c, void *buf, int type,
+ int len, int lnum, int offs);
+static int fallible_read_node(struct ubifs_info *c, const union ubifs_key *key,
+ struct ubifs_zbranch *zbr, void *node);
+
/*
* Returned codes of 'matches_name()' and 'fallible_matches_name()' functions.
* @NAME_LESS: name corresponding to the first argument is less than second
@@ -402,7 +407,19 @@ static int tnc_read_hashed_node(struct ubifs_info *c, struct ubifs_zbranch *zbr,
return 0;
}
- err = ubifs_tnc_read_node(c, zbr, node);
+ if (c->replaying) {
+ err = fallible_read_node(c, &zbr->key, zbr, node);
+ /*
+ * When the node was not found, return -ENOENT, 0 otherwise.
+ * Negative return codes stay as-is.
+ */
+ if (err == 0)
+ err = -ENOENT;
+ else if (err == 1)
+ err = 0;
+ } else {
+ err = ubifs_tnc_read_node(c, zbr, node);
+ }
if (err)
return err;
@@ -2857,7 +2874,11 @@ struct ubifs_dent_node *ubifs_tnc_next_ent(struct ubifs_info *c,
if (fname_len(nm) > 0) {
if (err) {
/* Handle collisions */
- err = resolve_collision(c, key, &znode, &n, nm);
+ if (c->replaying)
+ err = fallible_resolve_collision(c, key, &znode, &n,
+ nm, 0);
+ else
+ err = resolve_collision(c, key, &znode, &n, nm);
dbg_tnc("rc returned %d, znode %p, n %d",
err, znode, n);
if (unlikely(err < 0))
diff --git a/fs/userfaultfd.c b/fs/userfaultfd.c
index d96e2f3..43953e0 100644
--- a/fs/userfaultfd.c
+++ b/fs/userfaultfd.c
@@ -63,6 +63,7 @@ struct userfaultfd_wait_queue {
struct uffd_msg msg;
wait_queue_t wq;
struct userfaultfd_ctx *ctx;
+ bool waken;
};
struct userfaultfd_wake_range {
@@ -86,6 +87,12 @@ static int userfaultfd_wake_function(wait_queue_t *wq, unsigned mode,
if (len && (start > uwq->msg.arg.pagefault.address ||
start + len <= uwq->msg.arg.pagefault.address))
goto out;
+ WRITE_ONCE(uwq->waken, true);
+ /*
+ * The implicit smp_mb__before_spinlock in try_to_wake_up()
+ * renders uwq->waken visible to other CPUs before the task is
+ * waken.
+ */
ret = wake_up_state(wq->private, mode);
if (ret)
/*
@@ -264,6 +271,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
struct userfaultfd_wait_queue uwq;
int ret;
bool must_wait, return_to_userland;
+ long blocking_state;
BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
@@ -334,10 +342,13 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
uwq.wq.private = current;
uwq.msg = userfault_msg(vmf->address, vmf->flags, reason);
uwq.ctx = ctx;
+ uwq.waken = false;
return_to_userland =
(vmf->flags & (FAULT_FLAG_USER|FAULT_FLAG_KILLABLE)) ==
(FAULT_FLAG_USER|FAULT_FLAG_KILLABLE);
+ blocking_state = return_to_userland ? TASK_INTERRUPTIBLE :
+ TASK_KILLABLE;
spin_lock(&ctx->fault_pending_wqh.lock);
/*
@@ -350,8 +361,7 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
* following the spin_unlock to happen before the list_add in
* __add_wait_queue.
*/
- set_current_state(return_to_userland ? TASK_INTERRUPTIBLE :
- TASK_KILLABLE);
+ set_current_state(blocking_state);
spin_unlock(&ctx->fault_pending_wqh.lock);
must_wait = userfaultfd_must_wait(ctx, vmf->address, vmf->flags,
@@ -364,6 +374,29 @@ int handle_userfault(struct vm_fault *vmf, unsigned long reason)
wake_up_poll(&ctx->fd_wqh, POLLIN);
schedule();
ret |= VM_FAULT_MAJOR;
+
+ /*
+ * False wakeups can orginate even from rwsem before
+ * up_read() however userfaults will wait either for a
+ * targeted wakeup on the specific uwq waitqueue from
+ * wake_userfault() or for signals or for uffd
+ * release.
+ */
+ while (!READ_ONCE(uwq.waken)) {
+ /*
+ * This needs the full smp_store_mb()
+ * guarantee as the state write must be
+ * visible to other CPUs before reading
+ * uwq.waken from other CPUs.
+ */
+ set_current_state(blocking_state);
+ if (READ_ONCE(uwq.waken) ||
+ READ_ONCE(ctx->released) ||
+ (return_to_userland ? signal_pending(current) :
+ fatal_signal_pending(current)))
+ break;
+ schedule();
+ }
}
__set_current_state(TASK_RUNNING);
diff --git a/fs/xfs/libxfs/xfs_ag_resv.c b/fs/xfs/libxfs/xfs_ag_resv.c
index d346d42..33db69b 100644
--- a/fs/xfs/libxfs/xfs_ag_resv.c
+++ b/fs/xfs/libxfs/xfs_ag_resv.c
@@ -39,6 +39,7 @@
#include "xfs_rmap_btree.h"
#include "xfs_btree.h"
#include "xfs_refcount_btree.h"
+#include "xfs_ialloc_btree.h"
/*
* Per-AG Block Reservations
@@ -200,22 +201,30 @@ __xfs_ag_resv_init(
struct xfs_mount *mp = pag->pag_mount;
struct xfs_ag_resv *resv;
int error;
+ xfs_extlen_t reserved;
- resv = xfs_perag_resv(pag, type);
if (used > ask)
ask = used;
- resv->ar_asked = ask;
- resv->ar_reserved = resv->ar_orig_reserved = ask - used;
- mp->m_ag_max_usable -= ask;
+ reserved = ask - used;
- trace_xfs_ag_resv_init(pag, type, ask);
-
- error = xfs_mod_fdblocks(mp, -(int64_t)resv->ar_reserved, true);
- if (error)
+ error = xfs_mod_fdblocks(mp, -(int64_t)reserved, true);
+ if (error) {
trace_xfs_ag_resv_init_error(pag->pag_mount, pag->pag_agno,
error, _RET_IP_);
+ xfs_warn(mp,
+"Per-AG reservation for AG %u failed. Filesystem may run out of space.",
+ pag->pag_agno);
+ return error;
+ }
- return error;
+ mp->m_ag_max_usable -= ask;
+
+ resv = xfs_perag_resv(pag, type);
+ resv->ar_asked = ask;
+ resv->ar_reserved = resv->ar_orig_reserved = reserved;
+
+ trace_xfs_ag_resv_init(pag, type, ask);
+ return 0;
}
/* Create a per-AG block reservation. */
@@ -223,6 +232,8 @@ int
xfs_ag_resv_init(
struct xfs_perag *pag)
{
+ struct xfs_mount *mp = pag->pag_mount;
+ xfs_agnumber_t agno = pag->pag_agno;
xfs_extlen_t ask;
xfs_extlen_t used;
int error = 0;
@@ -231,23 +242,45 @@ xfs_ag_resv_init(
if (pag->pag_meta_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_refcountbt_calc_reserves(pag->pag_mount,
- pag->pag_agno, &ask, &used);
+ error = xfs_refcountbt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
- error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
- ask, used);
+ error = xfs_finobt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+ ask, used);
+ if (error) {
+ /*
+ * Because we didn't have per-AG reservations when the
+ * finobt feature was added we might not be able to
+ * reserve all needed blocks. Warn and fall back to the
+ * old and potentially buggy code in that case, but
+ * ensure we do have the reservation for the refcountbt.
+ */
+ ask = used = 0;
+
+ mp->m_inotbt_nores = true;
+
+ error = xfs_refcountbt_calc_reserves(mp, agno, &ask,
+ &used);
+ if (error)
+ goto out;
+
+ error = __xfs_ag_resv_init(pag, XFS_AG_RESV_METADATA,
+ ask, used);
+ if (error)
+ goto out;
+ }
}
/* Create the AGFL metadata reservation */
if (pag->pag_agfl_resv.ar_asked == 0) {
ask = used = 0;
- error = xfs_rmapbt_calc_reserves(pag->pag_mount, pag->pag_agno,
- &ask, &used);
+ error = xfs_rmapbt_calc_reserves(mp, agno, &ask, &used);
if (error)
goto out;
@@ -256,9 +289,16 @@ xfs_ag_resv_init(
goto out;
}
+#ifdef DEBUG
+ /* need to read in the AGF for the ASSERT below to work */
+ error = xfs_alloc_pagf_init(pag->pag_mount, NULL, pag->pag_agno, 0);
+ if (error)
+ return error;
+
ASSERT(xfs_perag_resv(pag, XFS_AG_RESV_METADATA)->ar_reserved +
xfs_perag_resv(pag, XFS_AG_RESV_AGFL)->ar_reserved <=
pag->pagf_freeblks + pag->pagf_flcount);
+#endif
out:
return error;
}
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index 5050056..9f06a21 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -95,10 +95,7 @@ unsigned int
xfs_alloc_set_aside(
struct xfs_mount *mp)
{
- unsigned int blocks;
-
- blocks = 4 + (mp->m_sb.sb_agcount * XFS_ALLOC_AGFL_RESERVE);
- return blocks;
+ return mp->m_sb.sb_agcount * (XFS_ALLOC_AGFL_RESERVE + 4);
}
/*
@@ -365,36 +362,12 @@ xfs_alloc_fix_len(
return;
ASSERT(rlen >= args->minlen && rlen <= args->maxlen);
ASSERT(rlen % args->prod == args->mod);
+ ASSERT(args->pag->pagf_freeblks + args->pag->pagf_flcount >=
+ rlen + args->minleft);
args->len = rlen;
}
/*
- * Fix up length if there is too little space left in the a.g.
- * Return 1 if ok, 0 if too little, should give up.
- */
-STATIC int
-xfs_alloc_fix_minleft(
- xfs_alloc_arg_t *args) /* allocation argument structure */
-{
- xfs_agf_t *agf; /* a.g. freelist header */
- int diff; /* free space difference */
-
- if (args->minleft == 0)
- return 1;
- agf = XFS_BUF_TO_AGF(args->agbp);
- diff = be32_to_cpu(agf->agf_freeblks)
- - args->len - args->minleft;
- if (diff >= 0)
- return 1;
- args->len += diff; /* shrink the allocated space */
- /* casts to (int) catch length underflows */
- if ((int)args->len >= (int)args->minlen)
- return 1;
- args->agbno = NULLAGBLOCK;
- return 0;
-}
-
-/*
* Update the two btrees, logically removing from freespace the extent
* starting at rbno, rlen blocks. The extent is contained within the
* actual (current) free extent fbno for flen blocks.
@@ -689,8 +662,6 @@ xfs_alloc_ag_vextent(
xfs_alloc_arg_t *args) /* argument structure for allocation */
{
int error=0;
- xfs_extlen_t reservation;
- xfs_extlen_t oldmax;
ASSERT(args->minlen > 0);
ASSERT(args->maxlen > 0);
@@ -699,20 +670,6 @@ xfs_alloc_ag_vextent(
ASSERT(args->alignment > 0);
/*
- * Clamp maxlen to the amount of free space minus any reservations
- * that have been made.
- */
- oldmax = args->maxlen;
- reservation = xfs_ag_resv_needed(args->pag, args->resv);
- if (args->maxlen > args->pag->pagf_freeblks - reservation)
- args->maxlen = args->pag->pagf_freeblks - reservation;
- if (args->maxlen == 0) {
- args->agbno = NULLAGBLOCK;
- args->maxlen = oldmax;
- return 0;
- }
-
- /*
* Branch to correct routine based on the type.
*/
args->wasfromfl = 0;
@@ -731,8 +688,6 @@ xfs_alloc_ag_vextent(
/* NOTREACHED */
}
- args->maxlen = oldmax;
-
if (error || args->agbno == NULLAGBLOCK)
return error;
@@ -841,9 +796,6 @@ xfs_alloc_ag_vextent_exact(
args->len = XFS_AGBLOCK_MIN(tend, args->agbno + args->maxlen)
- args->agbno;
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args))
- goto not_found;
-
ASSERT(args->agbno + args->len <= tend);
/*
@@ -1149,12 +1101,7 @@ restart:
XFS_WANT_CORRUPTED_GOTO(args->mp, i == 1, error0);
ASSERT(ltbno + ltlen <= be32_to_cpu(XFS_BUF_TO_AGF(args->agbp)->agf_length));
args->len = blen;
- if (!xfs_alloc_fix_minleft(args)) {
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- trace_xfs_alloc_near_nominleft(args);
- return 0;
- }
- blen = args->len;
+
/*
* We are allocating starting at bnew for blen blocks.
*/
@@ -1346,12 +1293,6 @@ restart:
*/
args->len = XFS_EXTLEN_MIN(ltlena, args->maxlen);
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args)) {
- trace_xfs_alloc_near_nominleft(args);
- xfs_btree_del_cursor(bno_cur_lt, XFS_BTREE_NOERROR);
- xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
- return 0;
- }
rlen = args->len;
(void)xfs_alloc_compute_diff(args->agbno, rlen, args->alignment,
args->datatype, ltbnoa, ltlena, &ltnew);
@@ -1553,8 +1494,6 @@ restart:
}
xfs_alloc_fix_len(args);
- if (!xfs_alloc_fix_minleft(args))
- goto out_nominleft;
rlen = args->len;
XFS_WANT_CORRUPTED_GOTO(args->mp, rlen <= flen, error0);
/*
@@ -2056,7 +1995,7 @@ xfs_alloc_space_available(
int flags)
{
struct xfs_perag *pag = args->pag;
- xfs_extlen_t longest;
+ xfs_extlen_t alloc_len, longest;
xfs_extlen_t reservation; /* blocks that are still reserved */
int available;
@@ -2066,17 +2005,28 @@ xfs_alloc_space_available(
reservation = xfs_ag_resv_needed(pag, args->resv);
/* do we have enough contiguous free space for the allocation? */
+ alloc_len = args->minlen + (args->alignment - 1) + args->minalignslop;
longest = xfs_alloc_longest_free_extent(args->mp, pag, min_free,
reservation);
- if ((args->minlen + args->alignment + args->minalignslop - 1) > longest)
+ if (longest < alloc_len)
return false;
/* do we have enough free space remaining for the allocation? */
available = (int)(pag->pagf_freeblks + pag->pagf_flcount -
- reservation - min_free - args->total);
- if (available < (int)args->minleft || available <= 0)
+ reservation - min_free - args->minleft);
+ if (available < (int)max(args->total, alloc_len))
return false;
+ /*
+ * Clamp maxlen to the amount of free space available for the actual
+ * extent allocation.
+ */
+ if (available < (int)args->maxlen && !(flags & XFS_ALLOC_FLAG_CHECK)) {
+ args->maxlen = available;
+ ASSERT(args->maxlen > 0);
+ ASSERT(args->maxlen >= args->minlen);
+ }
+
return true;
}
@@ -2122,7 +2072,8 @@ xfs_alloc_fix_freelist(
}
need = xfs_alloc_min_freelist(mp, pag);
- if (!xfs_alloc_space_available(args, need, flags))
+ if (!xfs_alloc_space_available(args, need, flags |
+ XFS_ALLOC_FLAG_CHECK))
goto out_agbp_relse;
/*
@@ -2638,12 +2589,10 @@ xfs_alloc_vextent(
xfs_agblock_t agsize; /* allocation group size */
int error;
int flags; /* XFS_ALLOC_FLAG_... locking flags */
- xfs_extlen_t minleft;/* minimum left value, temp copy */
xfs_mount_t *mp; /* mount structure pointer */
xfs_agnumber_t sagno; /* starting allocation group number */
xfs_alloctype_t type; /* input allocation type */
int bump_rotor = 0;
- int no_min = 0;
xfs_agnumber_t rotorstep = xfs_rotorstep; /* inode32 agf stepper */
mp = args->mp;
@@ -2672,7 +2621,6 @@ xfs_alloc_vextent(
trace_xfs_alloc_vextent_badargs(args);
return 0;
}
- minleft = args->minleft;
switch (type) {
case XFS_ALLOCTYPE_THIS_AG:
@@ -2683,9 +2631,7 @@ xfs_alloc_vextent(
*/
args->agno = XFS_FSB_TO_AGNO(mp, args->fsbno);
args->pag = xfs_perag_get(mp, args->agno);
- args->minleft = 0;
error = xfs_alloc_fix_freelist(args, 0);
- args->minleft = minleft;
if (error) {
trace_xfs_alloc_vextent_nofix(args);
goto error0;
@@ -2750,9 +2696,7 @@ xfs_alloc_vextent(
*/
for (;;) {
args->pag = xfs_perag_get(mp, args->agno);
- if (no_min) args->minleft = 0;
error = xfs_alloc_fix_freelist(args, flags);
- args->minleft = minleft;
if (error) {
trace_xfs_alloc_vextent_nofix(args);
goto error0;
@@ -2792,20 +2736,17 @@ xfs_alloc_vextent(
* or switch to non-trylock mode.
*/
if (args->agno == sagno) {
- if (no_min == 1) {
+ if (flags == 0) {
args->agbno = NULLAGBLOCK;
trace_xfs_alloc_vextent_allfailed(args);
break;
}
- if (flags == 0) {
- no_min = 1;
- } else {
- flags = 0;
- if (type == XFS_ALLOCTYPE_START_BNO) {
- args->agbno = XFS_FSB_TO_AGBNO(mp,
- args->fsbno);
- args->type = XFS_ALLOCTYPE_NEAR_BNO;
- }
+
+ flags = 0;
+ if (type == XFS_ALLOCTYPE_START_BNO) {
+ args->agbno = XFS_FSB_TO_AGBNO(mp,
+ args->fsbno);
+ args->type = XFS_ALLOCTYPE_NEAR_BNO;
}
}
xfs_perag_put(args->pag);
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index 7c404a6..1d0f48a 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -56,7 +56,7 @@ typedef unsigned int xfs_alloctype_t;
#define XFS_ALLOC_FLAG_FREEING 0x00000002 /* indicate caller is freeing extents*/
#define XFS_ALLOC_FLAG_NORMAP 0x00000004 /* don't modify the rmapbt */
#define XFS_ALLOC_FLAG_NOSHRINK 0x00000008 /* don't shrink the freelist */
-
+#define XFS_ALLOC_FLAG_CHECK 0x00000010 /* test only, don't modify args */
/*
* Argument structure for xfs_alloc routines.
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index af1ecb1..6622d46 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -131,9 +131,6 @@ xfs_attr_get(
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return -EIO;
- if (!xfs_inode_hasattr(ip))
- return -ENOATTR;
-
error = xfs_attr_args_init(&args, ip, name, flags);
if (error)
return error;
@@ -392,9 +389,6 @@ xfs_attr_remove(
if (XFS_FORCED_SHUTDOWN(dp->i_mount))
return -EIO;
- if (!xfs_inode_hasattr(dp))
- return -ENOATTR;
-
error = xfs_attr_args_init(&args, dp, name, flags);
if (error)
return error;
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 2760bc3..bfc00de 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -3629,7 +3629,7 @@ xfs_bmap_btalloc(
align = xfs_get_cowextsz_hint(ap->ip);
else if (xfs_alloc_is_userdata(ap->datatype))
align = xfs_get_extsz_hint(ap->ip);
- if (unlikely(align)) {
+ if (align) {
error = xfs_bmap_extsize_align(mp, &ap->got, &ap->prev,
align, 0, ap->eof, 0, ap->conv,
&ap->offset, &ap->length);
@@ -3701,7 +3701,7 @@ xfs_bmap_btalloc(
args.minlen = ap->minlen;
}
/* apply extent size hints if obtained earlier */
- if (unlikely(align)) {
+ if (align) {
args.prod = align;
if ((args.mod = (xfs_extlen_t)do_mod(ap->offset, args.prod)))
args.mod = (xfs_extlen_t)(args.prod - args.mod);
@@ -3812,7 +3812,6 @@ xfs_bmap_btalloc(
args.fsbno = 0;
args.type = XFS_ALLOCTYPE_FIRST_AG;
args.total = ap->minlen;
- args.minleft = 0;
if ((error = xfs_alloc_vextent(&args)))
return error;
ap->dfops->dop_low = true;
@@ -4344,8 +4343,6 @@ xfs_bmapi_allocate(
if (error)
return error;
- if (bma->dfops->dop_low)
- bma->minleft = 0;
if (bma->cur)
bma->cur->bc_private.b.firstblock = *bma->firstblock;
if (bma->blkno == NULLFSBLOCK)
@@ -4517,8 +4514,6 @@ xfs_bmapi_write(
int n; /* current extent index */
xfs_fileoff_t obno; /* old block number (offset) */
int whichfork; /* data or attr fork */
- char inhole; /* current location is hole in file */
- char wasdelay; /* old extent was delayed */
#ifdef DEBUG
xfs_fileoff_t orig_bno; /* original block number value */
@@ -4606,22 +4601,44 @@ xfs_bmapi_write(
bma.firstblock = firstblock;
while (bno < end && n < *nmap) {
- inhole = eof || bma.got.br_startoff > bno;
- wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
+ bool need_alloc = false, wasdelay = false;
- /*
- * Make sure we only reflink into a hole.
- */
- if (flags & XFS_BMAPI_REMAP)
- ASSERT(inhole);
- if (flags & XFS_BMAPI_COWFORK)
- ASSERT(!inhole);
+ /* in hole or beyoned EOF? */
+ if (eof || bma.got.br_startoff > bno) {
+ if (flags & XFS_BMAPI_DELALLOC) {
+ /*
+ * For the COW fork we can reasonably get a
+ * request for converting an extent that races
+ * with other threads already having converted
+ * part of it, as there converting COW to
+ * regular blocks is not protected using the
+ * IOLOCK.
+ */
+ ASSERT(flags & XFS_BMAPI_COWFORK);
+ if (!(flags & XFS_BMAPI_COWFORK)) {
+ error = -EIO;
+ goto error0;
+ }
+
+ if (eof || bno >= end)
+ break;
+ } else {
+ need_alloc = true;
+ }
+ } else {
+ /*
+ * Make sure we only reflink into a hole.
+ */
+ ASSERT(!(flags & XFS_BMAPI_REMAP));
+ if (isnullstartblock(bma.got.br_startblock))
+ wasdelay = true;
+ }
/*
* First, deal with the hole before the allocated space
* that we found, if any.
*/
- if (inhole || wasdelay) {
+ if (need_alloc || wasdelay) {
bma.eof = eof;
bma.conv = !!(flags & XFS_BMAPI_CONVERT);
bma.wasdel = wasdelay;
diff --git a/fs/xfs/libxfs/xfs_bmap.h b/fs/xfs/libxfs/xfs_bmap.h
index cecd094..cdef87d 100644
--- a/fs/xfs/libxfs/xfs_bmap.h
+++ b/fs/xfs/libxfs/xfs_bmap.h
@@ -110,6 +110,9 @@ struct xfs_extent_free_item
/* Map something in the CoW fork. */
#define XFS_BMAPI_COWFORK 0x200
+/* Only convert delalloc space, don't allocate entirely new extents */
+#define XFS_BMAPI_DELALLOC 0x400
+
#define XFS_BMAPI_FLAGS \
{ XFS_BMAPI_ENTIRE, "ENTIRE" }, \
{ XFS_BMAPI_METADATA, "METADATA" }, \
@@ -120,7 +123,8 @@ struct xfs_extent_free_item
{ XFS_BMAPI_CONVERT, "CONVERT" }, \
{ XFS_BMAPI_ZERO, "ZERO" }, \
{ XFS_BMAPI_REMAP, "REMAP" }, \
- { XFS_BMAPI_COWFORK, "COWFORK" }
+ { XFS_BMAPI_COWFORK, "COWFORK" }, \
+ { XFS_BMAPI_DELALLOC, "DELALLOC" }
static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index d6330c2..d9be241 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -502,12 +502,11 @@ try_another_ag:
if (args.fsbno == NULLFSBLOCK && args.minleft) {
/*
* Could not find an AG with enough free space to satisfy
- * a full btree split. Try again without minleft and if
+ * a full btree split. Try again and if
* successful activate the lowspace algorithm.
*/
args.fsbno = 0;
args.type = XFS_ALLOCTYPE_FIRST_AG;
- args.minleft = 0;
error = xfs_alloc_vextent(&args);
if (error)
goto error0;
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index c58d72c..2f389d36 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -36,21 +36,29 @@
struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
/*
- * @mode, if set, indicates that the type field needs to be set up.
- * This uses the transformation from file mode to DT_* as defined in linux/fs.h
- * for file type specification. This will be propagated into the directory
- * structure if appropriate for the given operation and filesystem config.
+ * Convert inode mode to directory entry filetype
*/
-const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
- [0] = XFS_DIR3_FT_UNKNOWN,
- [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
- [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
- [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
- [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
- [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
- [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
- [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
-};
+unsigned char xfs_mode_to_ftype(int mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ return XFS_DIR3_FT_REG_FILE;
+ case S_IFDIR:
+ return XFS_DIR3_FT_DIR;
+ case S_IFCHR:
+ return XFS_DIR3_FT_CHRDEV;
+ case S_IFBLK:
+ return XFS_DIR3_FT_BLKDEV;
+ case S_IFIFO:
+ return XFS_DIR3_FT_FIFO;
+ case S_IFSOCK:
+ return XFS_DIR3_FT_SOCK;
+ case S_IFLNK:
+ return XFS_DIR3_FT_SYMLINK;
+ default:
+ return XFS_DIR3_FT_UNKNOWN;
+ }
+}
/*
* ASCII case-insensitive (ie. A-Z) support for directories that was
@@ -631,7 +639,8 @@ xfs_dir2_isblock(
if ((rval = xfs_bmap_last_offset(args->dp, &last, XFS_DATA_FORK)))
return rval;
rval = XFS_FSB_TO_B(args->dp->i_mount, last) == args->geo->blksize;
- ASSERT(rval == 0 || args->dp->i_d.di_size == args->geo->blksize);
+ if (rval != 0 && args->dp->i_d.di_size != args->geo->blksize)
+ return -EFSCORRUPTED;
*vp = rval;
return 0;
}
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 0197590..d6e6d9d 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -18,6 +18,9 @@
#ifndef __XFS_DIR2_H__
#define __XFS_DIR2_H__
+#include "xfs_da_format.h"
+#include "xfs_da_btree.h"
+
struct xfs_defer_ops;
struct xfs_da_args;
struct xfs_inode;
@@ -32,10 +35,9 @@ struct xfs_dir2_data_unused;
extern struct xfs_name xfs_name_dotdot;
/*
- * directory filetype conversion tables.
+ * Convert inode mode to directory entry filetype
*/
-#define S_SHIFT 12
-extern const unsigned char xfs_mode_to_ftype[];
+extern unsigned char xfs_mode_to_ftype(int mode);
/*
* directory operations vector for encode/decode routines
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index 0fd086d..7c47188 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -82,11 +82,12 @@ xfs_finobt_set_root(
}
STATIC int
-xfs_inobt_alloc_block(
+__xfs_inobt_alloc_block(
struct xfs_btree_cur *cur,
union xfs_btree_ptr *start,
union xfs_btree_ptr *new,
- int *stat)
+ int *stat,
+ enum xfs_ag_resv_type resv)
{
xfs_alloc_arg_t args; /* block allocation args */
int error; /* error return value */
@@ -103,6 +104,7 @@ xfs_inobt_alloc_block(
args.maxlen = 1;
args.prod = 1;
args.type = XFS_ALLOCTYPE_NEAR_BNO;
+ args.resv = resv;
error = xfs_alloc_vextent(&args);
if (error) {
@@ -123,6 +125,27 @@ xfs_inobt_alloc_block(
}
STATIC int
+xfs_inobt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ return __xfs_inobt_alloc_block(cur, start, new, stat, XFS_AG_RESV_NONE);
+}
+
+STATIC int
+xfs_finobt_alloc_block(
+ struct xfs_btree_cur *cur,
+ union xfs_btree_ptr *start,
+ union xfs_btree_ptr *new,
+ int *stat)
+{
+ return __xfs_inobt_alloc_block(cur, start, new, stat,
+ XFS_AG_RESV_METADATA);
+}
+
+STATIC int
xfs_inobt_free_block(
struct xfs_btree_cur *cur,
struct xfs_buf *bp)
@@ -328,7 +351,7 @@ static const struct xfs_btree_ops xfs_finobt_ops = {
.dup_cursor = xfs_inobt_dup_cursor,
.set_root = xfs_finobt_set_root,
- .alloc_block = xfs_inobt_alloc_block,
+ .alloc_block = xfs_finobt_alloc_block,
.free_block = xfs_inobt_free_block,
.get_minrecs = xfs_inobt_get_minrecs,
.get_maxrecs = xfs_inobt_get_maxrecs,
@@ -480,3 +503,64 @@ xfs_inobt_rec_check_count(
return 0;
}
#endif /* DEBUG */
+
+static xfs_extlen_t
+xfs_inobt_max_size(
+ struct xfs_mount *mp)
+{
+ /* Bail out if we're uninitialized, which can happen in mkfs. */
+ if (mp->m_inobt_mxr[0] == 0)
+ return 0;
+
+ return xfs_btree_calc_size(mp, mp->m_inobt_mnr,
+ (uint64_t)mp->m_sb.sb_agblocks * mp->m_sb.sb_inopblock /
+ XFS_INODES_PER_CHUNK);
+}
+
+static int
+xfs_inobt_count_blocks(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_btnum_t btnum,
+ xfs_extlen_t *tree_blocks)
+{
+ struct xfs_buf *agbp;
+ struct xfs_btree_cur *cur;
+ int error;
+
+ error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
+ if (error)
+ return error;
+
+ cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, btnum);
+ error = xfs_btree_count_blocks(cur, tree_blocks);
+ xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
+ xfs_buf_relse(agbp);
+
+ return error;
+}
+
+/*
+ * Figure out how many blocks to reserve and how many are used by this btree.
+ */
+int
+xfs_finobt_calc_reserves(
+ struct xfs_mount *mp,
+ xfs_agnumber_t agno,
+ xfs_extlen_t *ask,
+ xfs_extlen_t *used)
+{
+ xfs_extlen_t tree_len = 0;
+ int error;
+
+ if (!xfs_sb_version_hasfinobt(&mp->m_sb))
+ return 0;
+
+ error = xfs_inobt_count_blocks(mp, agno, XFS_BTNUM_FINO, &tree_len);
+ if (error)
+ return error;
+
+ *ask += xfs_inobt_max_size(mp);
+ *used += tree_len;
+ return 0;
+}
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.h b/fs/xfs/libxfs/xfs_ialloc_btree.h
index bd88453..aa81e2e 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.h
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.h
@@ -72,4 +72,7 @@ int xfs_inobt_rec_check_count(struct xfs_mount *,
#define xfs_inobt_rec_check_count(mp, rec) 0
#endif /* DEBUG */
+int xfs_finobt_calc_reserves(struct xfs_mount *mp, xfs_agnumber_t agno,
+ xfs_extlen_t *ask, xfs_extlen_t *used);
+
#endif /* __XFS_IALLOC_BTREE_H__ */
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index dd483e2..d93f9d9 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -29,6 +29,7 @@
#include "xfs_icache.h"
#include "xfs_trans.h"
#include "xfs_ialloc.h"
+#include "xfs_dir2.h"
/*
* Check that none of the inode's in the buffer have a next
@@ -386,6 +387,7 @@ xfs_dinode_verify(
xfs_ino_t ino,
struct xfs_dinode *dip)
{
+ uint16_t mode;
uint16_t flags;
uint64_t flags2;
@@ -396,8 +398,12 @@ xfs_dinode_verify(
if (be64_to_cpu(dip->di_size) & (1ULL << 63))
return false;
- /* No zero-length symlinks. */
- if (S_ISLNK(be16_to_cpu(dip->di_mode)) && dip->di_size == 0)
+ mode = be16_to_cpu(dip->di_mode);
+ if (mode && xfs_mode_to_ftype(mode) == XFS_DIR3_FT_UNKNOWN)
+ return false;
+
+ /* No zero-length symlinks/dirs. */
+ if ((S_ISLNK(mode) || S_ISDIR(mode)) && dip->di_size == 0)
return false;
/* only version 3 or greater inodes are extensively verified here */
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 2580262..584ec89 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -242,7 +242,7 @@ xfs_mount_validate_sb(
sbp->sb_blocklog < XFS_MIN_BLOCKSIZE_LOG ||
sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_blocksize != (1 << sbp->sb_blocklog) ||
- sbp->sb_dirblklog > XFS_MAX_BLOCKSIZE_LOG ||
+ sbp->sb_dirblklog + sbp->sb_blocklog > XFS_MAX_BLOCKSIZE_LOG ||
sbp->sb_inodesize < XFS_DINODE_MIN_SIZE ||
sbp->sb_inodesize > XFS_DINODE_MAX_SIZE ||
sbp->sb_inodelog < XFS_DINODE_MIN_LOG ||
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 0f56fcd..631e7c0 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -1152,19 +1152,22 @@ xfs_vm_releasepage(
* block_invalidatepage() can send pages that are still marked dirty
* but otherwise have invalidated buffers.
*
- * We've historically freed buffers on the latter. Instead, quietly
- * filter out all dirty pages to avoid spurious buffer state warnings.
- * This can likely be removed once shrink_active_list() is fixed.
+ * We want to release the latter to avoid unnecessary buildup of the
+ * LRU, skip the former and warn if we've left any lingering
+ * delalloc/unwritten buffers on clean pages. Skip pages with delalloc
+ * or unwritten buffers and warn if the page is not dirty. Otherwise
+ * try to release the buffers.
*/
- if (PageDirty(page))
- return 0;
-
xfs_count_page_state(page, &delalloc, &unwritten);
- if (WARN_ON_ONCE(delalloc))
+ if (delalloc) {
+ WARN_ON_ONCE(!PageDirty(page));
return 0;
- if (WARN_ON_ONCE(unwritten))
+ }
+ if (unwritten) {
+ WARN_ON_ONCE(!PageDirty(page));
return 0;
+ }
return try_to_free_buffers(page);
}
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index b9abce5..c141791 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -528,7 +528,6 @@ xfs_getbmap(
xfs_bmbt_irec_t *map; /* buffer for user's data */
xfs_mount_t *mp; /* file system mount point */
int nex; /* # of user extents can do */
- int nexleft; /* # of user extents left */
int subnex; /* # of bmapi's can do */
int nmap; /* number of map entries */
struct getbmapx *out; /* output structure */
@@ -686,10 +685,8 @@ xfs_getbmap(
goto out_free_map;
}
- nexleft = nex;
-
do {
- nmap = (nexleft > subnex) ? subnex : nexleft;
+ nmap = (nex> subnex) ? subnex : nex;
error = xfs_bmapi_read(ip, XFS_BB_TO_FSBT(mp, bmv->bmv_offset),
XFS_BB_TO_FSB(mp, bmv->bmv_length),
map, &nmap, bmapi_flags);
@@ -697,8 +694,8 @@ xfs_getbmap(
goto out_free_map;
ASSERT(nmap <= subnex);
- for (i = 0; i < nmap && nexleft && bmv->bmv_length &&
- cur_ext < bmv->bmv_count; i++) {
+ for (i = 0; i < nmap && bmv->bmv_length &&
+ cur_ext < bmv->bmv_count - 1; i++) {
out[cur_ext].bmv_oflags = 0;
if (map[i].br_state == XFS_EXT_UNWRITTEN)
out[cur_ext].bmv_oflags |= BMV_OF_PREALLOC;
@@ -760,16 +757,27 @@ xfs_getbmap(
continue;
}
+ /*
+ * In order to report shared extents accurately,
+ * we report each distinct shared/unshared part
+ * of a single bmbt record using multiple bmap
+ * extents. To make that happen, we iterate the
+ * same map array item multiple times, each
+ * time trimming out the subextent that we just
+ * reported.
+ *
+ * Because of this, we must check the out array
+ * index (cur_ext) directly against bmv_count-1
+ * to avoid overflows.
+ */
if (inject_map.br_startblock != NULLFSBLOCK) {
map[i] = inject_map;
i--;
- } else
- nexleft--;
+ }
bmv->bmv_entries++;
cur_ext++;
}
- } while (nmap && nexleft && bmv->bmv_length &&
- cur_ext < bmv->bmv_count);
+ } while (nmap && bmv->bmv_length && cur_ext < bmv->bmv_count - 1);
out_free_map:
kmem_free(map);
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 7f0a01f..ac3b4db 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -422,6 +422,7 @@ retry:
out_free_pages:
for (i = 0; i < bp->b_page_count; i++)
__free_page(bp->b_pages[i]);
+ bp->b_flags &= ~_XBF_PAGES;
return error;
}
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 7a30b8f..9d06cc3 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -710,6 +710,10 @@ xfs_dq_get_next_id(
/* Simple advance */
next_id = *id + 1;
+ /* If we'd wrap past the max ID, stop */
+ if (next_id < *id)
+ return -ENOENT;
+
/* If new ID is within the current chunk, advancing it sufficed */
if (next_id % mp->m_quotainfo->qi_dqperchunk) {
*id = next_id;
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index b955779..de32f0f 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1792,22 +1792,23 @@ xfs_inactive_ifree(
int error;
/*
- * The ifree transaction might need to allocate blocks for record
- * insertion to the finobt. We don't want to fail here at ENOSPC, so
- * allow ifree to dip into the reserved block pool if necessary.
- *
- * Freeing large sets of inodes generally means freeing inode chunks,
- * directory and file data blocks, so this should be relatively safe.
- * Only under severe circumstances should it be possible to free enough
- * inodes to exhaust the reserve block pool via finobt expansion while
- * at the same time not creating free space in the filesystem.
+ * We try to use a per-AG reservation for any block needed by the finobt
+ * tree, but as the finobt feature predates the per-AG reservation
+ * support a degraded file system might not have enough space for the
+ * reservation at mount time. In that case try to dip into the reserved
+ * pool and pray.
*
* Send a warning if the reservation does happen to fail, as the inode
* now remains allocated and sits on the unlinked list until the fs is
* repaired.
*/
- error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
- XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp);
+ if (unlikely(mp->m_inotbt_nores)) {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree,
+ XFS_IFREE_SPACE_RES(mp), 0, XFS_TRANS_RESERVE,
+ &tp);
+ } else {
+ error = xfs_trans_alloc(mp, &M_RES(mp)->tr_ifree, 0, 0, 0, &tp);
+ }
if (error) {
if (error == -ENOSPC) {
xfs_warn_ratelimited(mp,
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 0d14742..1aa3abd 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -681,7 +681,7 @@ xfs_iomap_write_allocate(
xfs_trans_t *tp;
int nimaps;
int error = 0;
- int flags = 0;
+ int flags = XFS_BMAPI_DELALLOC;
int nres;
if (whichfork == XFS_COW_FORK)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index 308bebb..22c1615 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -98,12 +98,27 @@ xfs_init_security(
static void
xfs_dentry_to_name(
struct xfs_name *namep,
+ struct dentry *dentry)
+{
+ namep->name = dentry->d_name.name;
+ namep->len = dentry->d_name.len;
+ namep->type = XFS_DIR3_FT_UNKNOWN;
+}
+
+static int
+xfs_dentry_mode_to_name(
+ struct xfs_name *namep,
struct dentry *dentry,
int mode)
{
namep->name = dentry->d_name.name;
namep->len = dentry->d_name.len;
- namep->type = xfs_mode_to_ftype[(mode & S_IFMT) >> S_SHIFT];
+ namep->type = xfs_mode_to_ftype(mode);
+
+ if (unlikely(namep->type == XFS_DIR3_FT_UNKNOWN))
+ return -EFSCORRUPTED;
+
+ return 0;
}
STATIC void
@@ -119,7 +134,7 @@ xfs_cleanup_inode(
* xfs_init_security we must back out.
* ENOSPC can hit here, among other things.
*/
- xfs_dentry_to_name(&teardown, dentry, 0);
+ xfs_dentry_to_name(&teardown, dentry);
xfs_remove(XFS_I(dir), &teardown, XFS_I(inode));
}
@@ -154,8 +169,12 @@ xfs_generic_create(
if (error)
return error;
+ /* Verify mode is valid also for tmpfile case */
+ error = xfs_dentry_mode_to_name(&name, dentry, mode);
+ if (unlikely(error))
+ goto out_free_acl;
+
if (!tmpfile) {
- xfs_dentry_to_name(&name, dentry, mode);
error = xfs_create(XFS_I(dir), &name, mode, rdev, &ip);
} else {
error = xfs_create_tmpfile(XFS_I(dir), dentry, mode, &ip);
@@ -248,7 +267,7 @@ xfs_vn_lookup(
if (dentry->d_name.len >= MAXNAMELEN)
return ERR_PTR(-ENAMETOOLONG);
- xfs_dentry_to_name(&name, dentry, 0);
+ xfs_dentry_to_name(&name, dentry);
error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
if (unlikely(error)) {
if (unlikely(error != -ENOENT))
@@ -275,7 +294,7 @@ xfs_vn_ci_lookup(
if (dentry->d_name.len >= MAXNAMELEN)
return ERR_PTR(-ENAMETOOLONG);
- xfs_dentry_to_name(&xname, dentry, 0);
+ xfs_dentry_to_name(&xname, dentry);
error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
if (unlikely(error)) {
if (unlikely(error != -ENOENT))
@@ -310,7 +329,9 @@ xfs_vn_link(
struct xfs_name name;
int error;
- xfs_dentry_to_name(&name, dentry, inode->i_mode);
+ error = xfs_dentry_mode_to_name(&name, dentry, inode->i_mode);
+ if (unlikely(error))
+ return error;
error = xfs_link(XFS_I(dir), XFS_I(inode), &name);
if (unlikely(error))
@@ -329,7 +350,7 @@ xfs_vn_unlink(
struct xfs_name name;
int error;
- xfs_dentry_to_name(&name, dentry, 0);
+ xfs_dentry_to_name(&name, dentry);
error = xfs_remove(XFS_I(dir), &name, XFS_I(d_inode(dentry)));
if (error)
@@ -359,7 +380,9 @@ xfs_vn_symlink(
mode = S_IFLNK |
(irix_symlink_mode ? 0777 & ~current_umask() : S_IRWXUGO);
- xfs_dentry_to_name(&name, dentry, mode);
+ error = xfs_dentry_mode_to_name(&name, dentry, mode);
+ if (unlikely(error))
+ goto out;
error = xfs_symlink(XFS_I(dir), &name, symname, mode, &cip);
if (unlikely(error))
@@ -395,6 +418,7 @@ xfs_vn_rename(
{
struct inode *new_inode = d_inode(ndentry);
int omode = 0;
+ int error;
struct xfs_name oname;
struct xfs_name nname;
@@ -405,8 +429,14 @@ xfs_vn_rename(
if (flags & RENAME_EXCHANGE)
omode = d_inode(ndentry)->i_mode;
- xfs_dentry_to_name(&oname, odentry, omode);
- xfs_dentry_to_name(&nname, ndentry, d_inode(odentry)->i_mode);
+ error = xfs_dentry_mode_to_name(&oname, odentry, omode);
+ if (omode && unlikely(error))
+ return error;
+
+ error = xfs_dentry_mode_to_name(&nname, ndentry,
+ d_inode(odentry)->i_mode);
+ if (unlikely(error))
+ return error;
return xfs_rename(XFS_I(odir), &oname, XFS_I(d_inode(odentry)),
XFS_I(ndir), &nname,
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index e467218..7a989de 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -331,11 +331,11 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
}
#define ASSERT_ALWAYS(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifdef DEBUG
#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : assfail(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC noinline
@@ -346,7 +346,7 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
#ifdef XFS_WARN
#define ASSERT(expr) \
- (unlikely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
+ (likely(expr) ? (void)0 : asswarn(#expr, __FILE__, __LINE__))
#ifndef STATIC
# define STATIC static noinline
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index c39ac14..b1469f0 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -3317,12 +3317,8 @@ xfs_log_force(
xfs_mount_t *mp,
uint flags)
{
- int error;
-
trace_xfs_log_force(mp, 0, _RET_IP_);
- error = _xfs_log_force(mp, flags, NULL);
- if (error)
- xfs_warn(mp, "%s: error %d returned.", __func__, error);
+ _xfs_log_force(mp, flags, NULL);
}
/*
@@ -3466,12 +3462,8 @@ xfs_log_force_lsn(
xfs_lsn_t lsn,
uint flags)
{
- int error;
-
trace_xfs_log_force(mp, lsn, _RET_IP_);
- error = _xfs_log_force_lsn(mp, lsn, flags, NULL);
- if (error)
- xfs_warn(mp, "%s: error %d returned.", __func__, error);
+ _xfs_log_force_lsn(mp, lsn, flags, NULL);
}
/*
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 84f7852..7f351f7 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -140,6 +140,7 @@ typedef struct xfs_mount {
int m_fixedfsid[2]; /* unchanged for life of FS */
uint m_dmevmask; /* DMI events for this FS */
__uint64_t m_flags; /* global mount flags */
+ bool m_inotbt_nores; /* no per-AG finobt resv. */
int m_ialloc_inos; /* inodes in inode allocation */
int m_ialloc_blks; /* blocks in inode allocation */
int m_ialloc_min_blks;/* min blocks in sparse inode
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index 45e50ea..b669b12 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -1177,7 +1177,8 @@ xfs_qm_dqusage_adjust(
* the case in all other instances. It's OK that we do this because
* quotacheck is done only at mount time.
*/
- error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
+ error = xfs_iget(mp, NULL, ino, XFS_IGET_DONTCACHE, XFS_ILOCK_EXCL,
+ &ip);
if (error) {
*res = BULKSTAT_RV_NOTHING;
return error;
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index d6d241f..56814e8 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
- s64 __user *out_fence_ptr;
+ s32 __user *out_fence_ptr;
};
struct __drm_connnectors_state {
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index bf9991b..1374323 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -488,7 +488,7 @@ struct drm_mode_config {
/**
* @prop_out_fence_ptr: Sync File fd pointer representing the
* outgoing fences for a CRTC. Userspace should provide a pointer to a
- * value of type s64, and then cast that pointer to u64.
+ * value of type s32, and then cast that pointer to u64.
*/
struct drm_property *prop_out_fence_ptr;
/**
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index b717ed9..5c970ce 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -76,4 +76,5 @@ void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
+void kvm_timer_init_vhe(void);
#endif
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8369564..1ca8e8f 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -739,7 +739,7 @@ static inline bool blk_queue_is_zoned(struct request_queue *q)
}
}
-static inline unsigned int blk_queue_zone_size(struct request_queue *q)
+static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
{
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
@@ -1000,6 +1000,19 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> 9;
}
+/*
+ * Some commands like WRITE SAME have a payload or data transfer size which
+ * is different from the size of the request. Any driver that supports such
+ * commands using the RQF_SPECIAL_PAYLOAD flag needs to use this helper to
+ * calculate the data transfer size.
+ */
+static inline unsigned int blk_rq_payload_bytes(struct request *rq)
+{
+ if (rq->rq_flags & RQF_SPECIAL_PAYLOAD)
+ return rq->special_vec.bv_len;
+ return blk_rq_bytes(rq);
+}
+
static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
int op)
{
@@ -1536,12 +1549,12 @@ static inline bool bdev_is_zoned(struct block_device *bdev)
return false;
}
-static inline unsigned int bdev_zone_size(struct block_device *bdev)
+static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
if (q)
- return blk_queue_zone_size(q);
+ return blk_queue_zone_sectors(q);
return 0;
}
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 94ea8d2..57d60dc 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -161,9 +161,10 @@ struct bpf_verifier_ops {
enum bpf_reg_type *reg_type);
int (*gen_prologue)(struct bpf_insn *insn, bool direct_write,
const struct bpf_prog *prog);
- u32 (*convert_ctx_access)(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
- struct bpf_insn *insn, struct bpf_prog *prog);
+ u32 (*convert_ctx_access)(enum bpf_access_type type,
+ const struct bpf_insn *src,
+ struct bpf_insn *dst,
+ struct bpf_prog *prog);
};
struct bpf_prog_type_list {
@@ -216,7 +217,7 @@ u64 bpf_tail_call(u64 ctx, u64 r2, u64 index, u64 r4, u64 r5);
u64 bpf_get_stackid(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
-int bpf_prog_calc_digest(struct bpf_prog *fp);
+int bpf_prog_calc_tag(struct bpf_prog *fp);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -247,6 +248,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+void *bpf_map_area_alloc(size_t size);
+void bpf_map_area_free(void *base);
extern int sysctl_unprivileged_bpf_disabled;
diff --git a/include/linux/bpf_trace.h b/include/linux/bpf_trace.h
new file mode 100644
index 0000000..b22efbd
--- /dev/null
+++ b/include/linux/bpf_trace.h
@@ -0,0 +1,7 @@
+#ifndef __LINUX_BPF_TRACE_H__
+#define __LINUX_BPF_TRACE_H__
+
+#include <trace/events/bpf.h>
+#include <trace/events/xdp.h>
+
+#endif /* __LINUX_BPF_TRACE_H__ */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 4f7d8be..5881d1f 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -17,6 +17,7 @@
#define PHY_ID_BCM5482 0x0143bcb0
#define PHY_ID_BCM5411 0x00206070
#define PHY_ID_BCM5421 0x002060e0
+#define PHY_ID_BCM54210E 0x600d84a0
#define PHY_ID_BCM5464 0x002060b0
#define PHY_ID_BCM5461 0x002060c0
#define PHY_ID_BCM54612E 0x03625e60
@@ -24,6 +25,7 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7278 0xae0251a0
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
#define PHY_ID_BCM7346 0x600d8650
@@ -103,19 +105,17 @@
/*
* AUXILIARY CONTROL SHADOW ACCESS REGISTERS. (PHY REG 0x18)
*/
-#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x0000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL 0x00
#define MII_BCM54XX_AUXCTL_ACTL_TX_6DB 0x0400
#define MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA 0x0800
-#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
-#define MII_BCM54XX_AUXCTL_MISC_RXD_RXC_SKEW 0x0100
-#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
-#define MII_BCM54XX_AUXCTL_MISC_RDSEL_MISC 0x7000
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x0007
-#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN (1 << 8)
-#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN (1 << 4)
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC 0x07
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_WIRESPEED_EN 0x0010
+#define MII_BCM54XX_AUXCTL_SHDWSEL_MISC_RGMII_SKEW_EN 0x0100
+#define MII_BCM54XX_AUXCTL_MISC_FORCE_AMDIX 0x0200
+#define MII_BCM54XX_AUXCTL_MISC_WREN 0x8000
+#define MII_BCM54XX_AUXCTL_SHDWSEL_READ_SHIFT 12
#define MII_BCM54XX_AUXCTL_SHDWSEL_MASK 0x0007
/*
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 5f52709..141b05a 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -38,6 +38,13 @@ struct can_priv {
struct can_bittiming bittiming, data_bittiming;
const struct can_bittiming_const *bittiming_const,
*data_bittiming_const;
+ const u16 *termination_const;
+ unsigned int termination_const_cnt;
+ u16 termination;
+ const u32 *bitrate_const;
+ unsigned int bitrate_const_cnt;
+ const u32 *data_bitrate_const;
+ unsigned int data_bitrate_const_cnt;
struct can_clock clock;
enum can_state state;
@@ -53,6 +60,7 @@ struct can_priv {
int (*do_set_bittiming)(struct net_device *dev);
int (*do_set_data_bittiming)(struct net_device *dev);
int (*do_set_mode)(struct net_device *dev, enum can_mode mode);
+ int (*do_set_termination)(struct net_device *dev, u16 term);
int (*do_get_state)(const struct net_device *dev,
enum can_state *state);
int (*do_get_berr_counter)(const struct net_device *dev,
diff --git a/include/linux/coredump.h b/include/linux/coredump.h
index d016a12..28ffa94 100644
--- a/include/linux/coredump.h
+++ b/include/linux/coredump.h
@@ -14,6 +14,7 @@ struct coredump_params;
extern int dump_skip(struct coredump_params *cprm, size_t nr);
extern int dump_emit(struct coredump_params *cprm, const void *addr, int nr);
extern int dump_align(struct coredump_params *cprm, int align);
+extern void dump_truncate(struct coredump_params *cprm);
#ifdef CONFIG_COREDUMP
extern void do_coredump(const siginfo_t *siginfo);
#else
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 20bfefb..d936a00 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -74,6 +74,8 @@ enum cpuhp_state {
CPUHP_ZCOMP_PREPARE,
CPUHP_TIMERS_DEAD,
CPUHP_MIPS_SOC_PREPARE,
+ CPUHP_BP_PREPARE_DYN,
+ CPUHP_BP_PREPARE_DYN_END = CPUHP_BP_PREPARE_DYN + 20,
CPUHP_BRINGUP_CPU,
CPUHP_AP_IDLE_DEAD,
CPUHP_AP_OFFLINE,
diff --git a/include/linux/device.h b/include/linux/device.h
index 491b4c0c..bd684fc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -88,6 +88,8 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
*
* @suspend: Called when a device on this bus wants to go to sleep mode.
* @resume: Called to bring a device on this bus out of sleep mode.
+ * @num_vf: Called to find out how many virtual functions a device on this
+ * bus supports.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -127,6 +129,8 @@ struct bus_type {
int (*suspend)(struct device *dev, pm_message_t state);
int (*resume)(struct device *dev);
+ int (*num_vf)(struct device *dev);
+
const struct dev_pm_ops *pm;
const struct iommu_ops *iommu_ops;
@@ -1140,6 +1144,13 @@ extern int device_online(struct device *dev);
extern void set_primary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
extern void set_secondary_fwnode(struct device *dev, struct fwnode_handle *fwnode);
+static inline int dev_num_vf(struct device *dev)
+{
+ if (dev->bus && dev->bus->num_vf)
+ return dev->bus->num_vf(dev);
+ return 0;
+}
+
/*
* Root device objects for grouping under /sys/devices
*/
diff --git a/include/linux/efi.h b/include/linux/efi.h
index a07a476..5b1af30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -103,6 +103,7 @@ typedef struct {
#define EFI_PAGE_SHIFT 12
#define EFI_PAGE_SIZE (1UL << EFI_PAGE_SHIFT)
+#define EFI_PAGES_MAX (U64_MAX >> EFI_PAGE_SHIFT)
typedef struct {
u32 type;
@@ -950,6 +951,7 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
+extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 42add77..c62b709 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -54,6 +54,11 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
#define alloc_etherdev(sizeof_priv) alloc_etherdev_mq(sizeof_priv, 1)
#define alloc_etherdev_mq(sizeof_priv, count) alloc_etherdev_mqs(sizeof_priv, count, count)
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+ unsigned int txqs,
+ unsigned int rxqs);
+#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
+
struct sk_buff **eth_gro_receive(struct sk_buff **head,
struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index a0934e6..e4eb254 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -57,6 +57,8 @@ struct bpf_prog_aux;
/* BPF program can access up to 512 bytes of stack space. */
#define MAX_BPF_STACK 512
+#define BPF_TAG_SIZE 8
+
/* Helper macros for filter block array initializers. */
/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
@@ -408,7 +410,7 @@ struct bpf_prog {
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
- u32 digest[SHA_DIGEST_WORDS]; /* Program digest */
+ u8 tag[BPF_TAG_SIZE];
struct bpf_prog_aux *aux; /* Auxiliary fields */
struct sock_fprog_kern *orig_prog; /* Original BPF program */
unsigned int (*bpf_func)(const void *ctx,
@@ -519,7 +521,7 @@ static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
return prog->len * sizeof(struct bpf_insn);
}
-static inline u32 bpf_prog_digest_scratch_size(const struct bpf_prog *prog)
+static inline u32 bpf_prog_tag_scratch_size(const struct bpf_prog *prog)
{
return round_up(bpf_prog_insn_size(prog) +
sizeof(__be64) + 1, SHA_MESSAGE_BYTES);
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 4175dca..0fe0b62 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -38,9 +38,8 @@ struct vm_area_struct;
#define ___GFP_ACCOUNT 0x100000u
#define ___GFP_NOTRACK 0x200000u
#define ___GFP_DIRECT_RECLAIM 0x400000u
-#define ___GFP_OTHER_NODE 0x800000u
-#define ___GFP_WRITE 0x1000000u
-#define ___GFP_KSWAPD_RECLAIM 0x2000000u
+#define ___GFP_WRITE 0x800000u
+#define ___GFP_KSWAPD_RECLAIM 0x1000000u
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -172,11 +171,6 @@ struct vm_area_struct;
* __GFP_NOTRACK_FALSE_POSITIVE is an alias of __GFP_NOTRACK. It's a means of
* distinguishing in the source between false positives and allocations that
* cannot be supported (e.g. page tables).
- *
- * __GFP_OTHER_NODE is for allocations that are on a remote node but that
- * should not be accounted for as a remote allocation in vmstat. A
- * typical user would be khugepaged collapsing a huge page on a remote
- * node.
*/
#define __GFP_COLD ((__force gfp_t)___GFP_COLD)
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
@@ -184,10 +178,9 @@ struct vm_area_struct;
#define __GFP_ZERO ((__force gfp_t)___GFP_ZERO)
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
-#define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE)
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 26
+#define __GFP_BITS_SHIFT 25
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -506,11 +499,10 @@ extern void free_hot_cold_page(struct page *page, bool cold);
extern void free_hot_cold_page_list(struct list_head *list, bool cold);
struct page_frag_cache;
-extern void __page_frag_drain(struct page *page, unsigned int order,
- unsigned int count);
-extern void *__alloc_page_frag(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask);
-extern void __free_page_frag(void *addr);
+extern void __page_frag_cache_drain(struct page *page, unsigned int count);
+extern void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask);
+extern void page_frag_free(void *addr);
#define __free_page(page) __free_pages((page), 0)
#define free_page(addr) free_pages((addr), 0)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index c2748ac..e973fab 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -274,37 +274,67 @@ void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
int parent_irq);
-int _gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type,
+ bool nested,
+ struct lock_class_key *lock_key);
+
+#ifdef CONFIG_LOCKDEP
+
+/*
+ * Lockdep requires that each irqchip instance be created with a
+ * unique key so as to avoid unnecessary warnings. This upfront
+ * boilerplate static inlines provides such a key for each
+ * unique instance.
+ */
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ static struct lock_class_key key;
+
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, false, &key);
+}
+
+static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
- unsigned int type,
- bool nested,
- struct lock_class_key *lock_key);
+ unsigned int type)
+{
+
+ static struct lock_class_key key;
+
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, true, &key);
+}
+#else
+static inline int gpiochip_irqchip_add(struct gpio_chip *gpiochip,
+ struct irq_chip *irqchip,
+ unsigned int first_irq,
+ irq_flow_handler_t handler,
+ unsigned int type)
+{
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, false, NULL);
+}
-/* FIXME: I assume threaded IRQchips do not have the lockdep problem */
static inline int gpiochip_irqchip_add_nested(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int first_irq,
irq_flow_handler_t handler,
unsigned int type)
{
- return _gpiochip_irqchip_add(gpiochip, irqchip, first_irq,
- handler, type, true, NULL);
+ return gpiochip_irqchip_add_key(gpiochip, irqchip, first_irq,
+ handler, type, true, NULL);
}
-
-#ifdef CONFIG_LOCKDEP
-#define gpiochip_irqchip_add(...) \
-( \
- ({ \
- static struct lock_class_key _key; \
- _gpiochip_irqchip_add(__VA_ARGS__, false, &_key); \
- }) \
-)
-#else
-#define gpiochip_irqchip_add(...) \
- _gpiochip_irqchip_add(__VA_ARGS__, false, NULL)
-#endif
+#endif /* CONFIG_LOCKDEP */
#endif /* CONFIG_GPIOLIB_IRQCHIP */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b2109c5..4b45ec4 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -665,6 +665,7 @@ i2c_unlock_adapter(struct i2c_adapter *adapter)
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
/* Must equal I2C_M_TEN below */
#define I2C_CLIENT_SLAVE 0x20 /* we are the slave */
+#define I2C_CLIENT_HOST_NOTIFY 0x40 /* We want to use I2C host notify */
#define I2C_CLIENT_WAKE 0x80 /* for board_info; true iff can wake */
#define I2C_CLIENT_SCCB 0x9000 /* Use Omnivision SCCB protocol */
/* Must match I2C_M_STOP|IGNORE_NAK */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index fe84932..87d1937 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -185,6 +185,8 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
/* number of user priorities 802.11 uses */
#define IEEE80211_NUM_UPS 8
+/* number of ACs */
+#define IEEE80211_NUM_ACS 4
#define IEEE80211_QOS_CTL_LEN 2
/* 1d tag mask */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c6587c0..debc9d5 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -46,6 +46,7 @@ struct br_ip_list {
#define BR_LEARNING_SYNC BIT(9)
#define BR_PROXYARP_WIFI BIT(10)
#define BR_MCAST_FLOOD BIT(11)
+#define BR_MULTICAST_TO_UNICAST BIT(12)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/if_frad.h b/include/linux/if_frad.h
index 4316aa1..46df7e5 100644
--- a/include/linux/if_frad.h
+++ b/include/linux/if_frad.h
@@ -66,8 +66,6 @@ struct dlci_local
struct frad_local
{
- struct net_device_stats stats;
-
/* devices which this FRAD is slaved to */
struct net_device *master[CONFIG_DLCI_MAX];
short dlci[CONFIG_DLCI_MAX];
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 671d014..71be5b3 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -69,6 +69,7 @@ struct ipv6_devconf {
__s32 seg6_require_hmac;
#endif
__u32 enhanced_dad;
+ __u32 addr_gen_mode;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/jump_label_ratelimit.h b/include/linux/jump_label_ratelimit.h
index 089f70f..23da3af 100644
--- a/include/linux/jump_label_ratelimit.h
+++ b/include/linux/jump_label_ratelimit.h
@@ -14,6 +14,7 @@ struct static_key_deferred {
#ifdef HAVE_JUMP_LABEL
extern void static_key_slow_dec_deferred(struct static_key_deferred *key);
+extern void static_key_deferred_flush(struct static_key_deferred *key);
extern void
jump_label_rate_limit(struct static_key_deferred *key, unsigned long rl);
@@ -26,6 +27,10 @@ static inline void static_key_slow_dec_deferred(struct static_key_deferred *key)
STATIC_KEY_CHECK_USE();
static_key_slow_dec(&key->key);
}
+static inline void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+}
static inline void
jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 56aec84..cb09238 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -514,8 +514,8 @@ extern enum system_states {
#define TAINT_FLAGS_COUNT 16
struct taint_flag {
- char true; /* character printed when tainted */
- char false; /* character printed when not tainted */
+ char c_true; /* character printed when tainted */
+ char c_false; /* character printed when not tainted */
bool module; /* also show as a per-module taint flag */
};
diff --git a/include/linux/mdio.h b/include/linux/mdio.h
index b6587a4..55a80d7 100644
--- a/include/linux/mdio.h
+++ b/include/linux/mdio.h
@@ -265,7 +265,7 @@ bool mdiobus_is_registered_device(struct mii_bus *bus, int addr);
struct phy_device *mdiobus_get_phy(struct mii_bus *bus, int addr);
/**
- * module_mdio_driver() - Helper macro for registering mdio drivers
+ * mdio_module_driver() - Helper macro for registering mdio drivers
*
* Helper macro for MDIO drivers which do not do anything special in module
* init/exit. Each module may only use this macro once, and calling it
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 61d20c1..2546988 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -120,7 +120,7 @@ struct mem_cgroup_reclaim_iter {
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
- unsigned long lru_size[NR_LRU_LISTS];
+ unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
@@ -432,7 +432,7 @@ static inline bool mem_cgroup_online(struct mem_cgroup *memcg)
int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int nr_pages);
+ int zid, int nr_pages);
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask);
@@ -441,9 +441,23 @@ static inline
unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
struct mem_cgroup_per_node *mz;
+ unsigned long nr_pages = 0;
+ int zid;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- return mz->lru_size[lru];
+ for (zid = 0; zid < MAX_NR_ZONES; zid++)
+ nr_pages += mz->lru_zone_size[zid][lru];
+ return nr_pages;
+}
+
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
+{
+ struct mem_cgroup_per_node *mz;
+
+ mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
+ return mz->lru_zone_size[zone_idx][lru];
}
void mem_cgroup_handle_over_high(void);
@@ -671,6 +685,12 @@ mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
{
return 0;
}
+static inline
+unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
+ enum lru_list lru, int zone_idx)
+{
+ return 0;
+}
static inline unsigned long
mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 01033fa..c1784c0 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -284,7 +284,7 @@ extern void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
unsigned long pnum);
-extern int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target);
+extern bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+ enum zone_type target, int *zone_shift);
#endif /* __LINUX_MEMORY_HOTPLUG_H */
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 257173e..f541da6 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -35,6 +35,8 @@
#define PHY_ID_KSZ886X 0x00221430
#define PHY_ID_KSZ8863 0x00221435
+#define PHY_ID_KSZ8795 0x00221550
+
/* struct phy_device dev_flags definitions */
#define MICREL_PHY_50MHZ_CLK 0x00000001
#define MICREL_PHY_FXEN 0x00000002
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6533c16..7e66e4f 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -1374,6 +1374,7 @@ int mlx4_get_base_qpn(struct mlx4_dev *dev, u8 port);
int __mlx4_replace_mac(struct mlx4_dev *dev, u8 port, int qpn, u64 new_mac);
int mlx4_SET_PORT_general(struct mlx4_dev *dev, u8 port, int mtu,
u8 pptx, u8 pfctx, u8 pprx, u8 pfcrx);
+int mlx4_SET_PORT_user_mtu(struct mlx4_dev *dev, u8 port, u16 user_mtu);
int mlx4_SET_PORT_qpn_calc(struct mlx4_dev *dev, u8 port, u32 base_qpn,
u8 promisc);
int mlx4_SET_PORT_BEACON(struct mlx4_dev *dev, u8 port, u16 time);
@@ -1539,8 +1540,13 @@ enum mlx4_ptys_proto {
MLX4_PTYS_EN = 1<<2,
};
+enum mlx4_ptys_flags {
+ MLX4_PTYS_AN_DISABLE_CAP = 1 << 5,
+ MLX4_PTYS_AN_DISABLE_ADMIN = 1 << 6,
+};
+
struct mlx4_ptys_reg {
- u8 resrvd1;
+ u8 flags;
u8 local_port;
u8 resrvd2;
u8 proto_mask;
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 7c5265d..7b6cd67 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -289,6 +289,7 @@ enum mlx5_event {
MLX5_EVENT_TYPE_GPIO_EVENT = 0x15,
MLX5_EVENT_TYPE_PORT_MODULE_EVENT = 0x16,
MLX5_EVENT_TYPE_REMOTE_CONFIG = 0x19,
+ MLX5_EVENT_TYPE_PPS_EVENT = 0x25,
MLX5_EVENT_TYPE_DB_BF_CONGESTION = 0x1a,
MLX5_EVENT_TYPE_STALL_EVENT = 0x1b,
@@ -569,6 +570,22 @@ struct mlx5_eqe_port_module {
u8 error_type;
} __packed;
+struct mlx5_eqe_pps {
+ u8 rsvd0[3];
+ u8 pin;
+ u8 rsvd1[4];
+ union {
+ struct {
+ __be32 time_sec;
+ __be32 time_nsec;
+ };
+ struct {
+ __be64 time_stamp;
+ };
+ };
+ u8 rsvd2[12];
+} __packed;
+
union ev_data {
__be32 raw[7];
struct mlx5_eqe_cmd cmd;
@@ -583,6 +600,7 @@ union ev_data {
struct mlx5_eqe_page_fault page_fault;
struct mlx5_eqe_vport_change vport_change;
struct mlx5_eqe_port_module port_module;
+ struct mlx5_eqe_pps pps;
} __packed;
struct mlx5_eqe {
@@ -952,38 +970,54 @@ enum mlx5_cap_type {
MLX5_CAP_NUM
};
+enum mlx5_pcam_reg_groups {
+ MLX5_PCAM_REGS_5000_TO_507F = 0x0,
+};
+
+enum mlx5_pcam_feature_groups {
+ MLX5_PCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
+enum mlx5_mcam_reg_groups {
+ MLX5_MCAM_REGS_FIRST_128 = 0x0,
+};
+
+enum mlx5_mcam_feature_groups {
+ MLX5_MCAM_FEATURE_ENHANCED_FEATURES = 0x0,
+};
+
/* GET Dev Caps macros */
#define MLX5_CAP_GEN(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->hca_caps_cur[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_GEN_MAX(mdev, cap) \
- MLX5_GET(cmd_hca_cap, mdev->hca_caps_max[MLX5_CAP_GENERAL], cap)
+ MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
#define MLX5_CAP_ETH(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->hca_caps_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ETH_MAX(mdev, cap) \
MLX5_GET(per_protocol_networking_offload_caps,\
- mdev->hca_caps_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
+ mdev->caps.hca_max[MLX5_CAP_ETHERNET_OFFLOADS], cap)
#define MLX5_CAP_ROCE(mdev, cap) \
- MLX5_GET(roce_cap, mdev->hca_caps_cur[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca_cur[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ROCE_MAX(mdev, cap) \
- MLX5_GET(roce_cap, mdev->hca_caps_max[MLX5_CAP_ROCE], cap)
+ MLX5_GET(roce_cap, mdev->caps.hca_max[MLX5_CAP_ROCE], cap)
#define MLX5_CAP_ATOMIC(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->hca_caps_cur[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca_cur[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_ATOMIC_MAX(mdev, cap) \
- MLX5_GET(atomic_caps, mdev->hca_caps_max[MLX5_CAP_ATOMIC], cap)
+ MLX5_GET(atomic_caps, mdev->caps.hca_max[MLX5_CAP_ATOMIC], cap)
#define MLX5_CAP_FLOWTABLE(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->hca_caps_cur[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_cur[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_MAX(mdev, cap) \
- MLX5_GET(flow_table_nic_cap, mdev->hca_caps_max[MLX5_CAP_FLOW_TABLE], cap)
+ MLX5_GET(flow_table_nic_cap, mdev->caps.hca_max[MLX5_CAP_FLOW_TABLE], cap)
#define MLX5_CAP_FLOWTABLE_NIC_RX(mdev, cap) \
MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.cap)
@@ -1005,11 +1039,11 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW_FLOWTABLE(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->hca_caps_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_MAX(mdev, cap) \
MLX5_GET(flow_table_eswitch_cap, \
- mdev->hca_caps_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
+ mdev->caps.hca_max[MLX5_CAP_ESWITCH_FLOW_TABLE], cap)
#define MLX5_CAP_ESW_FLOWTABLE_FDB(mdev, cap) \
MLX5_CAP_ESW_FLOWTABLE(mdev, flow_table_properties_nic_esw_fdb.cap)
@@ -1031,21 +1065,27 @@ enum mlx5_cap_type {
#define MLX5_CAP_ESW(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->hca_caps_cur[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca_cur[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ESW_MAX(mdev, cap) \
MLX5_GET(e_switch_cap, \
- mdev->hca_caps_max[MLX5_CAP_ESWITCH], cap)
+ mdev->caps.hca_max[MLX5_CAP_ESWITCH], cap)
#define MLX5_CAP_ODP(mdev, cap)\
- MLX5_GET(odp_cap, mdev->hca_caps_cur[MLX5_CAP_ODP], cap)
+ MLX5_GET(odp_cap, mdev->caps.hca_cur[MLX5_CAP_ODP], cap)
#define MLX5_CAP_VECTOR_CALC(mdev, cap) \
MLX5_GET(vector_calc_cap, \
- mdev->hca_caps_cur[MLX5_CAP_VECTOR_CALC], cap)
+ mdev->caps.hca_cur[MLX5_CAP_VECTOR_CALC], cap)
#define MLX5_CAP_QOS(mdev, cap)\
- MLX5_GET(qos_cap, mdev->hca_caps_cur[MLX5_CAP_QOS], cap)
+ MLX5_GET(qos_cap, mdev->caps.hca_cur[MLX5_CAP_QOS], cap)
+
+#define MLX5_CAP_PCAM_FEATURE(mdev, fld) \
+ MLX5_GET(pcam_reg, (mdev)->caps.pcam, feature_cap_mask.enhanced_features.fld)
+
+#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
enum {
MLX5_CMD_STAT_OK = 0x0,
@@ -1075,9 +1115,14 @@ enum {
MLX5_PER_PRIORITY_COUNTERS_GROUP = 0x10,
MLX5_PER_TRAFFIC_CLASS_COUNTERS_GROUP = 0x11,
MLX5_PHYSICAL_LAYER_COUNTERS_GROUP = 0x12,
+ MLX5_PHYSICAL_LAYER_STATISTICAL_GROUP = 0x16,
MLX5_INFINIBAND_PORT_COUNTERS_GROUP = 0x20,
};
+enum {
+ MLX5_PCIE_PERFORMANCE_COUNTERS_GROUP = 0x0,
+};
+
static inline u16 mlx5_to_sw_pkey_sz(int pkey_sz)
{
if (pkey_sz > MLX5_MAX_LOG_PKEY_TABLE)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3a309f6..1bc4641 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -121,10 +121,15 @@ enum {
MLX5_REG_PVLC = 0x500f,
MLX5_REG_PCMR = 0x5041,
MLX5_REG_PMLP = 0x5002,
+ MLX5_REG_PCAM = 0x507f,
MLX5_REG_NODE_DESC = 0x6001,
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MPCNT = 0x9051,
+ MLX5_REG_MTPPS = 0x9053,
+ MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MCAM = 0x907f,
};
enum mlx5_dcbx_oper_mode {
@@ -172,6 +177,7 @@ enum mlx5_dev_event {
MLX5_DEV_EVENT_PKEY_CHANGE,
MLX5_DEV_EVENT_GUID_CHANGE,
MLX5_DEV_EVENT_CLIENT_REREG,
+ MLX5_DEV_EVENT_PPS,
};
enum mlx5_port_status {
@@ -732,8 +738,12 @@ struct mlx5_core_dev {
char board_id[MLX5_BOARD_ID_LEN];
struct mlx5_cmd cmd;
struct mlx5_port_caps port_caps[MLX5_MAX_PORTS];
- u32 hca_caps_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
- u32 hca_caps_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ struct {
+ u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
+ u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
+ u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ } caps;
phys_addr_t iseg_base;
struct mlx5_init_seg __iomem *iseg;
enum mlx5_device_state state;
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 37327f6..a919dfb 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -365,8 +365,8 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 ip_protocol[0x8];
u8 ip_dscp[0x6];
u8 ip_ecn[0x2];
- u8 vlan_tag[0x1];
- u8 reserved_at_91[0x1];
+ u8 cvlan_tag[0x1];
+ u8 svlan_tag[0x1];
u8 frag[0x1];
u8 reserved_at_93[0x4];
u8 tcp_flags[0x9];
@@ -398,9 +398,11 @@ struct mlx5_ifc_fte_match_set_misc_bits {
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0xc];
- u8 outer_second_vlan_tag[0x1];
- u8 inner_second_vlan_tag[0x1];
- u8 reserved_at_62[0xe];
+ u8 outer_second_cvlan_tag[0x1];
+ u8 inner_second_cvlan_tag[0x1];
+ u8 outer_second_svlan_tag[0x1];
+ u8 inner_second_svlan_tag[0x1];
+ u8 reserved_at_64[0xc];
u8 gre_protocol[0x10];
u8 gre_key_h[0x18];
@@ -545,7 +547,9 @@ struct mlx5_ifc_e_switch_cap_bits {
struct mlx5_ifc_qos_cap_bits {
u8 packet_pacing[0x1];
u8 esw_scheduling[0x1];
- u8 reserved_at_2[0x1e];
+ u8 esw_bw_share[0x1];
+ u8 esw_rate_limit[0x1];
+ u8 reserved_at_4[0x1c];
u8 reserved_at_20[0x20];
@@ -824,7 +828,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
u8 early_vf_enable[0x1];
- u8 reserved_at_1a9[0x2];
+ u8 mcam_reg[0x1];
+ u8 pcam_reg[0x1];
u8 local_ca_ack_delay[0x5];
u8 port_module_event[0x1];
u8 reserved_at_1b1[0x1];
@@ -835,7 +840,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_at_1c0[0x3];
+ u8 reserved_at_1c0[0x1];
+ u8 pps[0x1];
+ u8 pps_modify[0x1];
u8 log_max_msg[0x5];
u8 reserved_at_1c8[0x4];
u8 max_tc[0x4];
@@ -1379,6 +1386,42 @@ struct mlx5_ifc_phys_layer_cntrs_bits {
u8 reserved_at_640[0x180];
};
+struct mlx5_ifc_phys_layer_statistical_cntrs_bits {
+ u8 time_since_last_clear_high[0x20];
+
+ u8 time_since_last_clear_low[0x20];
+
+ u8 phy_received_bits_high[0x20];
+
+ u8 phy_received_bits_low[0x20];
+
+ u8 phy_symbol_errors_high[0x20];
+
+ u8 phy_symbol_errors_low[0x20];
+
+ u8 phy_corrected_bits_high[0x20];
+
+ u8 phy_corrected_bits_low[0x20];
+
+ u8 phy_corrected_bits_lane0_high[0x20];
+
+ u8 phy_corrected_bits_lane0_low[0x20];
+
+ u8 phy_corrected_bits_lane1_high[0x20];
+
+ u8 phy_corrected_bits_lane1_low[0x20];
+
+ u8 phy_corrected_bits_lane2_high[0x20];
+
+ u8 phy_corrected_bits_lane2_low[0x20];
+
+ u8 phy_corrected_bits_lane3_high[0x20];
+
+ u8 phy_corrected_bits_lane3_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
+};
+
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 symbol_error_counter[0x10];
@@ -1761,6 +1804,30 @@ struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
u8 reserved_at_4c0[0x300];
};
+struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits {
+ u8 life_time_counter_high[0x20];
+
+ u8 life_time_counter_low[0x20];
+
+ u8 rx_errors[0x20];
+
+ u8 tx_errors[0x20];
+
+ u8 l0_to_recovery_eieos[0x20];
+
+ u8 l0_to_recovery_ts[0x20];
+
+ u8 l0_to_recovery_framing[0x20];
+
+ u8 l0_to_recovery_retrain[0x20];
+
+ u8 crc_error_dllp[0x20];
+
+ u8 crc_error_tlp[0x20];
+
+ u8 reserved_at_140[0x680];
+};
+
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
@@ -2923,6 +2990,12 @@ union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits ib_port_cntrs_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
+ struct mlx5_ifc_phys_layer_statistical_cntrs_bits phys_layer_statistical_cntrs;
+ u8 reserved_at_0[0x7c0];
+};
+
+union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits {
+ struct mlx5_ifc_pcie_perf_cntrs_grp_data_layout_bits pcie_perf_cntrs_grp_data_layout;
u8 reserved_at_0[0x7c0];
};
@@ -7248,6 +7321,18 @@ struct mlx5_ifc_ppcnt_reg_bits {
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
+struct mlx5_ifc_mpcnt_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 pcie_index[0x8];
+ u8 reserved_at_10[0xa];
+ u8 grp[0x6];
+
+ u8 clr[0x1];
+ u8 reserved_at_21[0x1f];
+
+ union mlx5_ifc_pcie_cntrs_grp_data_layout_auto_bits counter_set;
+};
+
struct mlx5_ifc_ppad_reg_bits {
u8 reserved_at_0[0x3];
u8 single_mac[0x1];
@@ -7477,6 +7562,63 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x7e];
+
+ u8 ppcnt_discard_group[0x1];
+ u8 ppcnt_statistical_group[0x1];
+};
+
+struct mlx5_ifc_pcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ union {
+ u8 reserved_at_0[0x80];
+ } port_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_pcam_enhanced_features_bits enhanced_features;
+ u8 reserved_at_0[0x80];
+ } feature_cap_mask;
+
+ u8 reserved_at_1c0[0xc0];
+};
+
+struct mlx5_ifc_mcam_enhanced_features_bits {
+ u8 reserved_at_0[0x7f];
+
+ u8 pcie_performance_group[0x1];
+};
+
+struct mlx5_ifc_mcam_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 feature_group[0x8];
+ u8 reserved_at_10[0x8];
+ u8 access_reg_group[0x8];
+
+ u8 reserved_at_20[0x20];
+
+ union {
+ u8 reserved_at_0[0x80];
+ } mng_access_reg_cap_mask;
+
+ u8 reserved_at_c0[0x80];
+
+ union {
+ struct mlx5_ifc_mcam_enhanced_features_bits enhanced_features;
+ u8 reserved_at_0[0x80];
+ } mng_feature_cap_mask;
+
+ u8 reserved_at_1c0[0x80];
+};
+
struct mlx5_ifc_pcap_reg_bits {
u8 reserved_at_0[0x8];
u8 local_port[0x8];
@@ -7821,6 +7963,60 @@ struct mlx5_ifc_initial_seg_bits {
u8 reserved_at_80a0[0x17fc0];
};
+struct mlx5_ifc_mtpps_reg_bits {
+ u8 reserved_at_0[0xc];
+ u8 cap_number_of_pps_pins[0x4];
+ u8 reserved_at_10[0x4];
+ u8 cap_max_num_of_pps_in_pins[0x4];
+ u8 reserved_at_18[0x4];
+ u8 cap_max_num_of_pps_out_pins[0x4];
+
+ u8 reserved_at_20[0x24];
+ u8 cap_pin_3_mode[0x4];
+ u8 reserved_at_48[0x4];
+ u8 cap_pin_2_mode[0x4];
+ u8 reserved_at_50[0x4];
+ u8 cap_pin_1_mode[0x4];
+ u8 reserved_at_58[0x4];
+ u8 cap_pin_0_mode[0x4];
+
+ u8 reserved_at_60[0x4];
+ u8 cap_pin_7_mode[0x4];
+ u8 reserved_at_68[0x4];
+ u8 cap_pin_6_mode[0x4];
+ u8 reserved_at_70[0x4];
+ u8 cap_pin_5_mode[0x4];
+ u8 reserved_at_78[0x4];
+ u8 cap_pin_4_mode[0x4];
+
+ u8 reserved_at_80[0x80];
+
+ u8 enable[0x1];
+ u8 reserved_at_101[0xb];
+ u8 pattern[0x4];
+ u8 reserved_at_110[0x4];
+ u8 pin_mode[0x4];
+ u8 pin[0x8];
+
+ u8 reserved_at_120[0x20];
+
+ u8 time_stamp[0x40];
+
+ u8 out_pulse_duration[0x10];
+ u8 out_periodic_adjustment[0x10];
+
+ u8 reserved_at_1a0[0x60];
+};
+
+struct mlx5_ifc_mtppse_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 pin[0x8];
+ u8 event_arm[0x1];
+ u8 reserved_at_21[0x1b];
+ u8 event_generation_mode[0x4];
+ u8 reserved_at_40[0x40];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -7853,6 +8049,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pmtu_reg_bits pmtu_reg;
struct mlx5_ifc_ppad_reg_bits ppad_reg;
struct mlx5_ifc_ppcnt_reg_bits ppcnt_reg;
+ struct mlx5_ifc_mpcnt_reg_bits mpcnt_reg;
struct mlx5_ifc_pplm_reg_bits pplm_reg;
struct mlx5_ifc_pplr_reg_bits pplr_reg;
struct mlx5_ifc_ppsc_reg_bits ppsc_reg;
@@ -7865,6 +8062,8 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
struct mlx5_ifc_sltp_reg_bits sltp_reg;
+ struct mlx5_ifc_mtpps_reg_bits mtpps_reg;
+ struct mlx5_ifc_mtppse_reg_bits mtppse_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index ec35157..656c70b 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -51,6 +51,7 @@ int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
u16 vport, u8 *addr);
int mlx5_query_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 *min_inline);
+void mlx5_query_min_inline(struct mlx5_core_dev *mdev, u8 *min_inline);
int mlx5_modify_nic_vport_min_inline(struct mlx5_core_dev *mdev,
u16 vport, u8 min_inline);
int mlx5_modify_nic_vport_mac_address(struct mlx5_core_dev *dev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index fe6b403..b84615b 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1210,8 +1210,8 @@ int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
struct vm_area_struct *vma);
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
- spinlock_t **ptlp);
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp);
int follow_pfn(struct vm_area_struct *vma, unsigned long address,
unsigned long *pfn);
int follow_phys(struct vm_area_struct *vma, unsigned long address,
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 71613e8..41d376e 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -39,7 +39,7 @@ static __always_inline void update_lru_size(struct lruvec *lruvec,
{
__update_lru_size(lruvec, lru, zid, nr_pages);
#ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
+ mem_cgroup_update_lru_size(lruvec, lru, zid, nr_pages);
#endif
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 36d9896..f4aac87 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -972,12 +972,16 @@ static __always_inline struct zoneref *next_zones_zonelist(struct zoneref *z,
* @zonelist - The zonelist to search for a suitable zone
* @highest_zoneidx - The zone index of the highest zone to return
* @nodes - An optional nodemask to filter the zonelist with
- * @zone - The first suitable zone found is returned via this parameter
+ * @return - Zoneref pointer for the first suitable zone found (see below)
*
* This function returns the first zone at or below a given zone index that is
* within the allowed nodemask. The zoneref returned is a cursor that can be
* used to iterate the zonelist with next_zones_zonelist by advancing it by
* one before calling.
+ *
+ * When no eligible zone is found, zoneref->zone is NULL (zoneref itself is
+ * never NULL). This may happen either genuinely, or due to concurrent nodemask
+ * update due to cpuset modification.
*/
static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
enum zone_type highest_zoneidx,
diff --git a/include/linux/mroute.h b/include/linux/mroute.h
index f019b62..d7f6333 100644
--- a/include/linux/mroute.h
+++ b/include/linux/mroute.h
@@ -3,6 +3,7 @@
#include <linux/in.h>
#include <linux/pim.h>
+#include <linux/rhashtable.h>
#include <net/sock.h>
#include <uapi/linux/mroute.h>
@@ -60,7 +61,6 @@ struct vif_device {
#define VIFF_STATIC 0x8000
#define VIF_EXISTS(_mrt, _idx) ((_mrt)->vif_table[_idx].dev != NULL)
-#define MFC_LINES 64
struct mr_table {
struct list_head list;
@@ -69,8 +69,9 @@ struct mr_table {
struct sock __rcu *mroute_sk;
struct timer_list ipmr_expire_timer;
struct list_head mfc_unres_queue;
- struct list_head mfc_cache_array[MFC_LINES];
struct vif_device vif_table[MAXVIFS];
+ struct rhltable mfc_hash;
+ struct list_head mfc_cache_list;
int maxvif;
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
@@ -85,17 +86,48 @@ enum {
MFC_STATIC = BIT(0),
};
+struct mfc_cache_cmp_arg {
+ __be32 mfc_mcastgrp;
+ __be32 mfc_origin;
+};
+
+/**
+ * struct mfc_cache - multicast routing entries
+ * @mnode: rhashtable list
+ * @mfc_mcastgrp: destination multicast group address
+ * @mfc_origin: source address
+ * @cmparg: used for rhashtable comparisons
+ * @mfc_parent: source interface (iif)
+ * @mfc_flags: entry flags
+ * @expires: unresolved entry expire time
+ * @unresolved: unresolved cached skbs
+ * @last_assert: time of last assert
+ * @minvif: minimum VIF id
+ * @maxvif: maximum VIF id
+ * @bytes: bytes that have passed for this entry
+ * @pkt: packets that have passed for this entry
+ * @wrong_if: number of wrong source interface hits
+ * @lastuse: time of last use of the group (traffic or update)
+ * @ttls: OIF TTL threshold array
+ * @list: global entry list
+ * @rcu: used for entry destruction
+ */
struct mfc_cache {
- struct list_head list;
- __be32 mfc_mcastgrp; /* Group the entry belongs to */
- __be32 mfc_origin; /* Source of packet */
- vifi_t mfc_parent; /* Source interface */
- int mfc_flags; /* Flags on line */
+ struct rhlist_head mnode;
+ union {
+ struct {
+ __be32 mfc_mcastgrp;
+ __be32 mfc_origin;
+ };
+ struct mfc_cache_cmp_arg cmparg;
+ };
+ vifi_t mfc_parent;
+ int mfc_flags;
union {
struct {
unsigned long expires;
- struct sk_buff_head unresolved; /* Unresolved buffers */
+ struct sk_buff_head unresolved;
} unres;
struct {
unsigned long last_assert;
@@ -105,18 +137,13 @@ struct mfc_cache {
unsigned long pkt;
unsigned long wrong_if;
unsigned long lastuse;
- unsigned char ttls[MAXVIFS]; /* TTL thresholds */
+ unsigned char ttls[MAXVIFS];
} res;
} mfc_un;
+ struct list_head list;
struct rcu_head rcu;
};
-#ifdef __BIG_ENDIAN
-#define MFC_HASH(a,b) (((((__force u32)(__be32)a)>>24)^(((__force u32)(__be32)b)>>26))&(MFC_LINES-1))
-#else
-#define MFC_HASH(a,b) ((((__force u32)(__be32)a)^(((__force u32)(__be32)b)>>2))&(MFC_LINES-1))
-#endif
-
struct rtmsg;
int ipmr_get_route(struct net *net, struct sk_buff *skb,
__be32 saddr, __be32 daddr,
diff --git a/include/linux/mroute6.h b/include/linux/mroute6.h
index 19a1c0c..ce44e3e 100644
--- a/include/linux/mroute6.h
+++ b/include/linux/mroute6.h
@@ -116,7 +116,7 @@ struct mfc6_cache {
struct rtmsg;
extern int ip6mr_get_route(struct net *net, struct sk_buff *skb,
- struct rtmsg *rtm, int nowait, u32 portid);
+ struct rtmsg *rtm, u32 portid);
#ifdef CONFIG_IPV6_MROUTE
extern struct sock *mroute6_socket(struct net *net, struct sk_buff *skb);
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index b14ad9c..9511e5a 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -964,11 +964,12 @@ struct netdev_xdp {
* with PF and querying it may introduce a theoretical security risk.
* int (*ndo_set_vf_rss_query_en)(struct net_device *dev, int vf, bool setting);
* int (*ndo_get_vf_port)(struct net_device *dev, int vf, struct sk_buff *skb);
- * int (*ndo_setup_tc)(struct net_device *dev, u8 tc)
- * Called to setup 'tc' number of traffic classes in the net device. This
- * is always called from the stack with the rtnl lock held and netif tx
- * queues stopped. This allows the netdevice to perform queue management
- * safely.
+ * int (*ndo_setup_tc)(struct net_device *dev, u32 handle,
+ * __be16 protocol, struct tc_to_netdev *tc);
+ * Called to setup any 'tc' scheduler, classifier or action on @dev.
+ * This is always called from the stack with the rtnl lock held and netif
+ * tx queues stopped. This allows the netdevice to perform queue
+ * management safely.
*
* Fiber Channel over Ethernet (FCoE) offload functions.
* int (*ndo_fcoe_enable)(struct net_device *dev);
@@ -1551,7 +1552,6 @@ enum netdev_priv_flags {
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
*
- * @last_rx: Time of last Rx
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
*
@@ -1777,8 +1777,6 @@ struct net_device {
/*
* Cache lines mostly used on receive path (including eth_type_trans())
*/
- unsigned long last_rx;
-
/* Interface address info used in eth_type_trans() */
unsigned char *dev_addr;
@@ -2477,14 +2475,19 @@ static inline int skb_gro_header_hard(struct sk_buff *skb, unsigned int hlen)
return NAPI_GRO_CB(skb)->frag0_len < hlen;
}
+static inline void skb_gro_frag0_invalidate(struct sk_buff *skb)
+{
+ NAPI_GRO_CB(skb)->frag0 = NULL;
+ NAPI_GRO_CB(skb)->frag0_len = 0;
+}
+
static inline void *skb_gro_header_slow(struct sk_buff *skb, unsigned int hlen,
unsigned int offset)
{
if (!pskb_may_pull(skb, hlen))
return NULL;
- NAPI_GRO_CB(skb)->frag0 = NULL;
- NAPI_GRO_CB(skb)->frag0_len = 0;
+ skb_gro_frag0_invalidate(skb);
return skb->data + offset;
}
@@ -3101,7 +3104,19 @@ static inline bool netif_subqueue_stopped(const struct net_device *dev,
return __netif_subqueue_stopped(dev, skb_get_queue_mapping(skb));
}
-void netif_wake_subqueue(struct net_device *dev, u16 queue_index);
+/**
+ * netif_wake_subqueue - allow sending packets on subqueue
+ * @dev: network device
+ * @queue_index: sub queue index
+ *
+ * Resume individual transmit queue of a device with multiple transmit queues.
+ */
+static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
+{
+ struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
+
+ netif_tx_wake_queue(txq);
+}
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
@@ -4332,6 +4347,15 @@ do { \
})
#endif
+/* if @cond then downgrade to debug, else print at @level */
+#define netif_cond_dbg(priv, type, netdev, cond, level, fmt, args...) \
+ do { \
+ if (cond) \
+ netif_dbg(priv, type, netdev, fmt, ##args); \
+ else \
+ netif_ ## level(priv, type, netdev, fmt, ##args); \
+ } while (0)
+
#if defined(VERBOSE_DEBUG)
#define netif_vdbg netif_dbg
#else
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index aacca82..0a3fadc 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -110,6 +110,7 @@ extern int watchdog_user_enabled;
extern int watchdog_thresh;
extern unsigned long watchdog_enabled;
extern unsigned long *watchdog_cpumask_bits;
+extern atomic_t watchdog_park_in_progress;
#ifdef CONFIG_SMP
extern int sysctl_softlockup_all_cpu_backtrace;
extern int sysctl_hardlockup_all_cpu_backtrace;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index e2d1a12..adbc859 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -885,7 +885,6 @@ void pcibios_setup_bridge(struct pci_bus *bus, unsigned long type);
void pci_sort_breadthfirst(void);
#define dev_is_pci(d) ((d)->bus == &pci_bus_type)
#define dev_is_pf(d) ((dev_is_pci(d) ? to_pci_dev(d)->is_physfn : false))
-#define dev_num_vf(d) ((dev_is_pci(d) ? pci_num_vf(to_pci_dev(d)) : 0))
/* Generic PCI functions exported to card drivers */
@@ -1630,7 +1629,6 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#define dev_is_pci(d) (false)
#define dev_is_pf(d) (false)
-#define dev_num_vf(d) (0)
#endif /* CONFIG_PCI */
/* Include architecture-dependent settings and functions */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 4741ecd..78ed810 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1259,6 +1259,7 @@ extern void perf_event_disable(struct perf_event *event);
extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
+extern int perf_event_account_interrupt(struct perf_event *event);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
diff --git a/include/linux/phy.h b/include/linux/phy.h
index f7d95f6..43474f3 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -25,7 +25,6 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
-#include <linux/phy_led_triggers.h>
#include <linux/atomic.h>
@@ -158,11 +157,7 @@ static inline const char *phy_modes(phy_interface_t interface)
/* Used when trying to connect to a specific phy (mii bus id:phy device id) */
#define PHY_ID_FMT "%s:%02x"
-/*
- * Need to be a little smaller than phydev->dev.bus_id to leave room
- * for the ":%02x"
- */
-#define MII_BUS_ID_SIZE (20 - 3)
+#define MII_BUS_ID_SIZE 61
/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips. */
@@ -632,7 +627,7 @@ struct phy_driver {
/* A Structure for boards to register fixups with the PHY Lib */
struct phy_fixup {
struct list_head list;
- char bus_id[20];
+ char bus_id[MII_BUS_ID_SIZE + 3];
u32 phy_uid;
u32 phy_uid_mask;
int (*run)(struct phy_device *phydev);
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index a2daea0..b37b05b 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -18,11 +18,11 @@ struct phy_device;
#ifdef CONFIG_LED_TRIGGER_PHY
#include <linux/leds.h>
+#include <linux/phy.h>
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 10
-#define PHY_MII_BUS_ID_SIZE (20 - 3)
-#define PHY_LINK_LED_TRIGGER_NAME_SIZE (PHY_MII_BUS_ID_SIZE + \
+#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
FIELD_SIZEOF(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 321f9ed..01f71e1 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -444,6 +444,10 @@ bool __rcu_is_watching(void);
#error "Unknown RCU implementation specified to kernel configuration"
#endif
+#define RCU_SCHEDULER_INACTIVE 0
+#define RCU_SCHEDULER_INIT 1
+#define RCU_SCHEDULER_RUNNING 2
+
/*
* init_rcu_head_on_stack()/destroy_rcu_head_on_stack() are needed for dynamic
* initialization and destruction of rcu_head on the stack. rcu_head structures
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index e2f3a32..8265d35 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -408,7 +408,8 @@ enum rproc_crash_type {
* @crash_comp: completion used to sync crash handler and the rproc reload
* @recovery_disabled: flag that state if recovery was disabled
* @max_notifyid: largest allocated notify id.
- * @table_ptr: our copy of the resource table
+ * @table_ptr: pointer to the resource table in effect
+ * @cached_table: copy of the resource table
* @has_iommu: flag to indicate if remote processor is behind an MMU
*/
struct rproc {
@@ -440,6 +441,7 @@ struct rproc {
bool recovery_disabled;
int max_notifyid;
struct resource_table *table_ptr;
+ struct resource_table *cached_table;
bool has_iommu;
bool auto_boot;
};
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4d19052..ad3ec9e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -854,6 +854,16 @@ struct signal_struct {
#define SIGNAL_UNKILLABLE 0x00000040 /* for init: ignore fatal signals */
+#define SIGNAL_STOP_MASK (SIGNAL_CLD_MASK | SIGNAL_STOP_STOPPED | \
+ SIGNAL_STOP_CONTINUED)
+
+static inline void signal_set_stop_flags(struct signal_struct *sig,
+ unsigned int flags)
+{
+ WARN_ON(sig->flags & (SIGNAL_GROUP_EXIT|SIGNAL_GROUP_COREDUMP));
+ sig->flags = (sig->flags & ~SIGNAL_STOP_MASK) | flags;
+}
+
/* If true, all threads except ->group_exit_task have pending SIGKILL */
static inline int signal_group_exit(const struct signal_struct *sig)
{
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index fcb4c36..a9e7906 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -108,6 +108,7 @@ typedef enum {
/* Use hex, as defined in ADDIP sec. 3.1 */
SCTP_CID_ASCONF = 0xC1,
SCTP_CID_ASCONF_ACK = 0x80,
+ SCTP_CID_RECONF = 0x82,
} sctp_cid_t; /* enum */
@@ -199,6 +200,13 @@ typedef enum {
SCTP_PARAM_SUCCESS_REPORT = cpu_to_be16(0xc005),
SCTP_PARAM_ADAPTATION_LAYER_IND = cpu_to_be16(0xc006),
+ /* RE-CONFIG. Section 4 */
+ SCTP_PARAM_RESET_OUT_REQUEST = cpu_to_be16(0x000d),
+ SCTP_PARAM_RESET_IN_REQUEST = cpu_to_be16(0x000e),
+ SCTP_PARAM_RESET_TSN_REQUEST = cpu_to_be16(0x000f),
+ SCTP_PARAM_RESET_RESPONSE = cpu_to_be16(0x0010),
+ SCTP_PARAM_RESET_ADD_OUT_STREAMS = cpu_to_be16(0x0011),
+ SCTP_PARAM_RESET_ADD_IN_STREAMS = cpu_to_be16(0x0012),
} sctp_param_t; /* enum */
@@ -710,4 +718,23 @@ struct sctp_infox {
struct sctp_association *asoc;
};
+struct sctp_reconf_chunk {
+ sctp_chunkhdr_t chunk_hdr;
+ __u8 params[0];
+} __packed;
+
+struct sctp_strreset_outreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u32 response_seq;
+ __u32 send_reset_at_tsn;
+ __u16 list_of_streams[0];
+} __packed;
+
+struct sctp_strreset_inreq {
+ sctp_paramhdr_t param_hdr;
+ __u32 request_seq;
+ __u16 list_of_streams[0];
+} __packed;
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 3149a88..6f63b7e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2485,7 +2485,7 @@ static inline struct sk_buff *netdev_alloc_skb_ip_align(struct net_device *dev,
static inline void skb_free_frag(void *addr)
{
- __free_page_frag(addr);
+ page_frag_free(addr);
}
void *napi_alloc_frag(unsigned int fragsz);
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 084b12b..4c53635 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -226,7 +226,7 @@ static inline const char *__check_heap_object(const void *ptr,
* (PAGE_SIZE*2). Larger requests are passed to the page allocator.
*/
#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
-#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT)
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
@@ -239,7 +239,7 @@ static inline const char *__check_heap_object(const void *ptr,
* be allocated from the same page.
*/
#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
-#define KMALLOC_SHIFT_MAX 30
+#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
#ifndef KMALLOC_SHIFT_LOW
#define KMALLOC_SHIFT_LOW 3
#endif
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index e5d1934..7440290 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -66,6 +66,7 @@ struct svc_xprt {
#define XPT_LISTENER 10 /* listening endpoint */
#define XPT_CACHE_AUTH 11 /* cache auth info */
#define XPT_LOCAL 12 /* connection from loopback interface */
+#define XPT_KILL_TEMP 13 /* call xpo_kill_temp_xprt before closing */
struct svc_serv *xpt_server; /* service for transport */
atomic_t xpt_reserved; /* space on outq that is rsvd */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 0c729c3..d971837 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -194,8 +194,6 @@ struct platform_freeze_ops {
};
#ifdef CONFIG_SUSPEND
-extern suspend_state_t mem_sleep_default;
-
/**
* suspend_set_ops - set platform dependent suspend operations
* @ops: The new suspend operations to set.
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 09f4be1..7f47b70 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -150,8 +150,9 @@ enum {
SWP_FILE = (1 << 7), /* set after swap_activate success */
SWP_AREA_DISCARD = (1 << 8), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 9), /* freed swap page-cluster discards */
+ SWP_STABLE_WRITES = (1 << 10), /* no overwrite PG_writeback pages */
/* add others here before... */
- SWP_SCANNING = (1 << 10), /* refcount in scan_swap_map */
+ SWP_SCANNING = (1 << 11), /* refcount in scan_swap_map */
};
#define SWAP_CLUSTER_MAX 32UL
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index fc5848d..f88f464 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -62,8 +62,13 @@ static inline unsigned int tcp_optlen(const struct sk_buff *skb)
/* TCP Fast Open Cookie as stored in memory */
struct tcp_fastopen_cookie {
+ union {
+ u8 val[TCP_FASTOPEN_COOKIE_MAX];
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr addr;
+#endif
+ };
s8 len;
- u8 val[TCP_FASTOPEN_COOKIE_MAX];
bool exp; /* In RFC6994 experimental option format */
};
@@ -207,6 +212,8 @@ struct tcp_sock {
/* Information of the most recently (s)acked skb */
struct tcp_rack {
struct skb_mstamp mstamp; /* (Re)sent time of the skb */
+ u32 rtt_us; /* Associated RTT */
+ u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */
u8 reord; /* reordering detected */
} rack;
@@ -215,15 +222,15 @@ struct tcp_sock {
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
rate_app_limited:1, /* rate_{delivered,interval_us} limited? */
- unused:5;
+ fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
+ unused:4;
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
- thin_dupack : 1,/* Fast retransmit on first dupack */
+ unused1 : 1,
repair : 1,
frto : 1;/* F-RTO (RFC5682) activated in CA_Loss */
u8 repair_queue;
- u8 do_early_retrans:1,/* Enable RFC5827 early-retransmit */
- syn_data:1, /* SYN includes data */
+ u8 syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
@@ -305,7 +312,6 @@ struct tcp_sock {
*/
int lost_cnt_hint;
- u32 retransmit_high; /* L-bits may be on up to this seqno */
u32 prior_ssthresh; /* ssthresh saved at recovery start */
u32 high_seq; /* snd_nxt at onset of congestion */
diff --git a/include/linux/timerfd.h b/include/linux/timerfd.h
index bd36ce4..bab0b1a 100644
--- a/include/linux/timerfd.h
+++ b/include/linux/timerfd.h
@@ -8,23 +8,7 @@
#ifndef _LINUX_TIMERFD_H
#define _LINUX_TIMERFD_H
-/* For O_CLOEXEC and O_NONBLOCK */
-#include <linux/fcntl.h>
-
-/* For _IO helpers */
-#include <linux/ioctl.h>
-
-/*
- * CAREFUL: Check include/asm-generic/fcntl.h when defining
- * new flags, since they might collide with O_* ones. We want
- * to re-use O_* flags that couldn't possibly have a meaning
- * from eventfd, in order to leave a free define-space for
- * shared O_* flags.
- */
-#define TFD_TIMER_ABSTIME (1 << 0)
-#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
-#define TFD_CLOEXEC O_CLOEXEC
-#define TFD_NONBLOCK O_NONBLOCK
+#include <uapi/linux/timerfd.h>
#define TFD_SHARED_FCNTL_FLAGS (TFD_CLOEXEC | TFD_NONBLOCK)
/* Flags for timerfd_create. */
@@ -32,6 +16,4 @@
/* Flags for timerfd_settime. */
#define TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_TIMER_CANCEL_ON_SET)
-#define TFD_IOC_SET_TICKS _IOW('T', 0, u64)
-
#endif /* _LINUX_TIMERFD_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index be00761..cfa475a 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -33,7 +33,8 @@ const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
unsigned int bitmask_size);
const char *trace_print_hex_seq(struct trace_seq *p,
- const unsigned char *buf, int len);
+ const unsigned char *buf, int len,
+ bool spacing);
const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 6620400..5209b5e 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -56,7 +56,8 @@ static inline int virtio_net_hdr_to_skb(struct sk_buff *skb,
static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
struct virtio_net_hdr *hdr,
- bool little_endian)
+ bool little_endian,
+ bool has_data_valid)
{
memset(hdr, 0, sizeof(*hdr)); /* no info leak */
@@ -91,7 +92,8 @@ static inline int virtio_net_hdr_from_skb(const struct sk_buff *skb,
skb_checksum_start_offset(skb));
hdr->csum_offset = __cpu_to_virtio16(little_endian,
skb->csum_offset);
- } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
+ } else if (has_data_valid &&
+ skb->ip_summed == CHECKSUM_UNNECESSARY) {
hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 1d71644..cfa2ae3 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -41,6 +41,7 @@ struct tc_action {
struct rcu_head tcfa_rcu;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
+ struct tc_cookie *act_cookie;
};
#define tcf_head common.tcfa_head
#define tcf_index common.tcfa_index
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 8f998af..17c6fd8 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -88,9 +88,7 @@ int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
u32 banned_flags);
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
-int ipv4_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
- bool match_wildcard);
-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
+int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 814be4b..b7aba6e 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -311,6 +311,34 @@ struct ieee80211_supported_band {
struct ieee80211_sta_vht_cap vht_cap;
};
+/**
+ * wiphy_read_of_freq_limits - read frequency limits from device tree
+ *
+ * @wiphy: the wireless device to get extra limits for
+ *
+ * Some devices may have extra limitations specified in DT. This may be useful
+ * for chipsets that normally support more bands but are limited due to board
+ * design (e.g. by antennas or external power amplifier).
+ *
+ * This function reads info from DT and uses it to *modify* channels (disable
+ * unavailable ones). It's usually a *bad* idea to use it in drivers with
+ * shared channel data as DT limitations are device specific. You should make
+ * sure to call it only if channels in wiphy are copied and can be modified
+ * without affecting other devices.
+ *
+ * As this function access device node it has to be called after set_wiphy_dev.
+ * It also modifies channels so they have to be set first.
+ * If using this helper, call it before wiphy_register().
+ */
+#ifdef CONFIG_OF
+void wiphy_read_of_freq_limits(struct wiphy *wiphy);
+#else /* CONFIG_OF */
+static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy)
+{
+}
+#endif /* !CONFIG_OF */
+
+
/*
* Wireless hardware/device configuration structures and methods
*/
@@ -1592,6 +1620,17 @@ struct cfg80211_sched_scan_plan {
};
/**
+ * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
+ *
+ * @band: band of BSS which should match for RSSI level adjustment.
+ * @delta: value of RSSI level adjustment.
+ */
+struct cfg80211_bss_select_adjust {
+ enum nl80211_band band;
+ s8 delta;
+};
+
+/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
@@ -1626,6 +1665,16 @@ struct cfg80211_sched_scan_plan {
* cycle. The driver may ignore this parameter and start
* immediately (or at any other time), if this feature is not
* supported.
+ * @relative_rssi_set: Indicates whether @relative_rssi is set or not.
+ * @relative_rssi: Relative RSSI threshold in dB to restrict scan result
+ * reporting in connected state to cases where a matching BSS is determined
+ * to have better or slightly worse RSSI than the current connected BSS.
+ * The relative RSSI threshold values are ignored in disconnected state.
+ * @rssi_adjust: delta dB of RSSI preference to be given to the BSSs that belong
+ * to the specified band while deciding whether a better BSS is reported
+ * using @relative_rssi. If delta is a negative number, the BSSs that
+ * belong to the specified band will be penalized by delta dB in relative
+ * comparisions.
*/
struct cfg80211_sched_scan_request {
struct cfg80211_ssid *ssids;
@@ -1645,6 +1694,10 @@ struct cfg80211_sched_scan_request {
u8 mac_addr[ETH_ALEN] __aligned(2);
u8 mac_addr_mask[ETH_ALEN] __aligned(2);
+ bool relative_rssi_set;
+ s8 relative_rssi;
+ struct cfg80211_bss_select_adjust rssi_adjust;
+
/* internal */
struct wiphy *wiphy;
struct net_device *dev;
@@ -1953,17 +2006,6 @@ struct cfg80211_ibss_params {
};
/**
- * struct cfg80211_bss_select_adjust - BSS selection with RSSI adjustment.
- *
- * @band: band of BSS which should match for RSSI level adjustment.
- * @delta: value of RSSI level adjustment.
- */
-struct cfg80211_bss_select_adjust {
- enum nl80211_band band;
- s8 delta;
-};
-
-/**
* struct cfg80211_bss_selection - connection parameters for BSS selection.
*
* @behaviour: requested BSS selection behaviour.
@@ -3837,6 +3879,9 @@ struct cfg80211_cached_keys;
* @conn: (private) cfg80211 software SME connection state machine data
* @connect_keys: (private) keys to set after connection is established
* @conn_bss_type: connecting/connected BSS type
+ * @conn_owner_nlportid: (private) connection owner socket port ID
+ * @disconnect_wk: (private) auto-disconnect work
+ * @disconnect_bssid: (private) the BSSID to use for auto-disconnect
* @ibss_fixed: (private) IBSS is using fixed BSSID
* @ibss_dfs_possible: (private) IBSS may change to a DFS channel
* @event_list: (private) list for internal event processing
@@ -3868,6 +3913,10 @@ struct wireless_dev {
struct cfg80211_conn *conn;
struct cfg80211_cached_keys *connect_keys;
enum ieee80211_bss_type conn_bss_type;
+ u32 conn_owner_nlportid;
+
+ struct work_struct disconnect_wk;
+ u8 disconnect_bssid[ETH_ALEN];
struct list_head event_list;
spinlock_t event_lock;
@@ -3955,26 +4004,15 @@ int ieee80211_channel_to_frequency(int chan, enum nl80211_band band);
*/
int ieee80211_frequency_to_channel(int freq);
-/*
- * Name indirection necessary because the ieee80211 code also has
- * a function named "ieee80211_get_channel", so if you include
- * cfg80211's header file you get cfg80211's version, if you try
- * to include both header files you'll (rightfully!) get a symbol
- * clash.
- */
-struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
- int freq);
/**
* ieee80211_get_channel - get channel struct from wiphy for specified frequency
+ *
* @wiphy: the struct wiphy to get the channel for
* @freq: the center frequency of the channel
+ *
* Return: The channel struct from @wiphy at @freq.
*/
-static inline struct ieee80211_channel *
-ieee80211_get_channel(struct wiphy *wiphy, int freq)
-{
- return __ieee80211_get_channel(wiphy, freq);
-}
+struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq);
/**
* ieee80211_get_response_rate - get basic rate for a given rate
@@ -5048,20 +5086,32 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
- * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
- * the real status code for failures.
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
* @gfp: allocation flags
- *
- * It should be called by the underlying driver whenever connect() has
- * succeeded. This is similar to cfg80211_connect_result(), but with the
- * option of identifying the exact bss entry for the connection. Only one of
- * these functions should be called.
+ * @timeout_reason: reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_result(), but with the option of identifying the exact bss
+ * entry for the connection. Only one of these functions should be called.
*/
void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
struct cfg80211_bss *bss, const u8 *req_ie,
size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp);
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason);
/**
* cfg80211_connect_result - notify cfg80211 of connection result
@@ -5072,13 +5122,15 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
- * @status: status code, 0 for successful connection, use
+ * @status: status code, %WLAN_STATUS_SUCCESS for successful connection, use
* %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
* the real status code for failures.
* @gfp: allocation flags
*
- * It should be called by the underlying driver whenever connect() has
- * succeeded.
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
+ * one of these functions should be called.
*/
static inline void
cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -5087,7 +5139,8 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
u16 status, gfp_t gfp)
{
cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, resp_ie,
- resp_ie_len, status, gfp);
+ resp_ie_len, status, gfp,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
/**
@@ -5098,6 +5151,7 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @gfp: allocation flags
+ * @timeout_reason: reason for connection timeout.
*
* It should be called by the underlying driver whenever connect() has failed
* in a sequence where no explicit authentication/association rejection was
@@ -5107,10 +5161,11 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
*/
static inline void
cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len, gfp_t gfp)
+ const u8 *req_ie, size_t req_ie_len, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
{
cfg80211_connect_bss(dev, bssid, NULL, req_ie, req_ie_len, NULL, 0, -1,
- gfp);
+ gfp, timeout_reason);
}
/**
diff --git a/include/net/checksum.h b/include/net/checksum.h
index 35d0fab..aef2b2b 100644
--- a/include/net/checksum.h
+++ b/include/net/checksum.h
@@ -179,7 +179,7 @@ static inline __wsum remcsum_adjust(void *ptr, __wsum csum,
static inline void remcsum_unadjust(__sum16 *psum, __wsum delta)
{
- *psum = csum_fold(csum_sub(delta, *psum));
+ *psum = csum_fold(csum_sub(delta, (__force __wsum)*psum));
}
#endif
diff --git a/include/net/dsa.h b/include/net/dsa.h
index b94d1f2..2cb77e6 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -20,6 +20,8 @@
#include <linux/phy_fixed.h>
#include <linux/ethtool.h>
+struct tc_action;
+
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = 0,
DSA_TAG_PROTO_DSA,
@@ -124,7 +126,7 @@ struct dsa_switch_tree {
/*
* The switch and port to which the CPU is attached.
*/
- s8 cpu_switch;
+ struct dsa_switch *cpu_switch;
s8 cpu_port;
/*
@@ -139,11 +141,36 @@ struct dsa_switch_tree {
const struct dsa_device_ops *tag_ops;
};
+/* TC matchall action types, only mirroring for now */
+enum dsa_port_mall_action_type {
+ DSA_PORT_MALL_MIRROR,
+};
+
+/* TC mirroring entry */
+struct dsa_mall_mirror_tc_entry {
+ u8 to_local_port;
+ bool ingress;
+};
+
+/* TC matchall entry */
+struct dsa_mall_tc_entry {
+ struct list_head list;
+ unsigned long cookie;
+ enum dsa_port_mall_action_type type;
+ union {
+ struct dsa_mall_mirror_tc_entry mirror;
+ };
+};
+
+
struct dsa_port {
+ struct dsa_switch *ds;
+ unsigned int index;
struct net_device *netdev;
struct device_node *dn;
unsigned int ageing_time;
u8 stp_state;
+ struct net_device *bridge_dev;
};
struct dsa_switch {
@@ -178,14 +205,6 @@ struct dsa_switch {
*/
s8 rtable[DSA_MAX_SWITCHES];
-#ifdef CONFIG_NET_DSA_HWMON
- /*
- * Hardware monitoring information
- */
- char hwmon_name[IFNAMSIZ + 8];
- struct device *hwmon_dev;
-#endif
-
/*
* The lower device this switch uses to talk to the host
*/
@@ -198,13 +217,16 @@ struct dsa_switch {
u32 cpu_port_mask;
u32 enabled_port_mask;
u32 phys_mii_mask;
- struct dsa_port ports[DSA_MAX_PORTS];
struct mii_bus *slave_mii_bus;
+
+ /* Dynamically allocated ports, keep last */
+ size_t num_ports;
+ struct dsa_port ports[];
};
static inline bool dsa_is_cpu_port(struct dsa_switch *ds, int p)
{
- return !!(ds->index == ds->dst->cpu_switch && p == ds->dst->cpu_port);
+ return !!(ds == ds->dst->cpu_switch && p == ds->dst->cpu_port);
}
static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
@@ -227,10 +249,10 @@ static inline u8 dsa_upstream_port(struct dsa_switch *ds)
* Else return the (DSA) port number that connects to the
* switch that is one hop closer to the cpu.
*/
- if (dst->cpu_switch == ds->index)
+ if (dst->cpu_switch == ds)
return dst->cpu_port;
else
- return ds->rtable[dst->cpu_switch];
+ return ds->rtable[dst->cpu_switch->index];
}
struct switchdev_trans;
@@ -307,14 +329,6 @@ struct dsa_switch_ops {
int (*get_eee)(struct dsa_switch *ds, int port,
struct ethtool_eee *e);
-#ifdef CONFIG_NET_DSA_HWMON
- /* Hardware monitoring */
- int (*get_temp)(struct dsa_switch *ds, int *temp);
- int (*get_temp_limit)(struct dsa_switch *ds, int *temp);
- int (*set_temp_limit)(struct dsa_switch *ds, int temp);
- int (*get_temp_alarm)(struct dsa_switch *ds, bool *alarm);
-#endif
-
/* EEPROM access */
int (*get_eeprom_len)(struct dsa_switch *ds);
int (*get_eeprom)(struct dsa_switch *ds,
@@ -335,7 +349,8 @@ struct dsa_switch_ops {
int (*set_ageing_time)(struct dsa_switch *ds, unsigned int msecs);
int (*port_bridge_join)(struct dsa_switch *ds, int port,
struct net_device *bridge);
- void (*port_bridge_leave)(struct dsa_switch *ds, int port);
+ void (*port_bridge_leave)(struct dsa_switch *ds, int port,
+ struct net_device *bridge);
void (*port_stp_state_set)(struct dsa_switch *ds, int port,
u8 state);
void (*port_fast_age)(struct dsa_switch *ds, int port);
@@ -386,6 +401,23 @@ struct dsa_switch_ops {
int (*port_mdb_dump)(struct dsa_switch *ds, int port,
struct switchdev_obj_port_mdb *mdb,
int (*cb)(struct switchdev_obj *obj));
+
+ /*
+ * RXNFC
+ */
+ int (*get_rxnfc)(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs);
+ int (*set_rxnfc)(struct dsa_switch *ds, int port,
+ struct ethtool_rxnfc *nfc);
+
+ /*
+ * TC integration
+ */
+ int (*port_mirror_add)(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror,
+ bool ingress);
+ void (*port_mirror_del)(struct dsa_switch *ds, int port,
+ struct dsa_mall_mirror_tc_entry *mirror);
};
struct dsa_switch_driver {
@@ -402,8 +434,9 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
return dst->rcv != NULL;
}
+struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
-int dsa_register_switch(struct dsa_switch *ds, struct device_node *np);
+int dsa_register_switch(struct dsa_switch *ds, struct device *dev);
#ifdef CONFIG_PM_SLEEP
int dsa_switch_suspend(struct dsa_switch *ds);
int dsa_switch_resume(struct dsa_switch *ds);
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index a0d443c..8a2b66d 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -46,19 +46,12 @@ static inline int dst_entries_get_fast(struct dst_ops *dst)
static inline int dst_entries_get_slow(struct dst_ops *dst)
{
- int res;
-
- local_bh_disable();
- res = percpu_counter_sum_positive(&dst->pcpuc_entries);
- local_bh_enable();
- return res;
+ return percpu_counter_sum_positive(&dst->pcpuc_entries);
}
static inline void dst_entries_add(struct dst_ops *dst, int val)
{
- local_bh_disable();
percpu_counter_add(&dst->pcpuc_entries, val);
- local_bh_enable();
}
static inline int dst_entries_init(struct dst_ops *dst)
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d896a33..ac97030 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -89,6 +89,24 @@ struct flow_dissector_key_addrs {
};
/**
+ * flow_dissector_key_arp:
+ * @ports: Operation, source and target addresses for an ARP header
+ * for Ethernet hardware addresses and IPv4 protocol addresses
+ * sip: Sender IP address
+ * tip: Target IP address
+ * op: Operation
+ * sha: Sender hardware address
+ * tpa: Target hardware address
+ */
+struct flow_dissector_key_arp {
+ __u32 sip;
+ __u32 tip;
+ __u8 op;
+ unsigned char sha[ETH_ALEN];
+ unsigned char tha[ETH_ALEN];
+};
+
+/**
* flow_dissector_key_tp_ports:
* @ports: port numbers of Transport header
* src: source port number
@@ -141,6 +159,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC_ADDRS, /* struct flow_dissector_key_tipc_addrs */
+ FLOW_DISSECTOR_KEY_ARP, /* struct flow_dissector_key_arp */
FLOW_DISSECTOR_KEY_VLAN, /* struct flow_dissector_key_flow_vlan */
FLOW_DISSECTOR_KEY_FLOW_LABEL, /* struct flow_dissector_key_flow_tags */
FLOW_DISSECTOR_KEY_GRE_KEYID, /* struct flow_dissector_key_keyid */
diff --git a/include/net/if_inet6.h b/include/net/if_inet6.h
index 0fa4c32..f656f90 100644
--- a/include/net/if_inet6.h
+++ b/include/net/if_inet6.h
@@ -205,7 +205,6 @@ struct inet6_dev {
__s32 rs_interval; /* in jiffies */
__u8 rs_probes;
- __u8 addr_gen_mode;
unsigned long tstamp; /* ipv6InterfaceTable update timestamp */
struct rcu_head rcu;
};
diff --git a/include/net/inet6_connection_sock.h b/include/net/inet6_connection_sock.h
index 3212b39..8ec87b6 100644
--- a/include/net/inet6_connection_sock.h
+++ b/include/net/inet6_connection_sock.h
@@ -15,16 +15,11 @@
#include <linux/types.h>
-struct inet_bind_bucket;
struct request_sock;
struct sk_buff;
struct sock;
struct sockaddr;
-int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax,
- bool soreuseport_ok);
-
struct dst_entry *inet6_csk_route_req(const struct sock *sk, struct flowi6 *fl6,
const struct request_sock *req, u8 proto);
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 5d68342..b7952d5 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -17,7 +17,7 @@ int inet_release(struct socket *sock);
int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags);
+ int addr_len, int flags, int is_sendmsg);
int inet_dgram_connect(struct socket *sock, struct sockaddr *uaddr,
int addr_len, int flags);
int inet_accept(struct socket *sock, struct socket *newsock, int flags);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 85ee387..826f198 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -62,9 +62,6 @@ struct inet_connection_sock_af_ops {
char __user *optval, int __user *optlen);
#endif
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
- int (*bind_conflict)(const struct sock *sk,
- const struct inet_bind_bucket *tb,
- bool relax, bool soreuseport_ok);
void (*mtu_reduced)(struct sock *sk);
};
@@ -144,6 +141,7 @@ struct inet_connection_sock {
#define ICSK_TIME_PROBE0 3 /* Zero window probe timer */
#define ICSK_TIME_EARLY_RETRANS 4 /* Early retransmit timer */
#define ICSK_TIME_LOSS_PROBE 5 /* Tail loss probe timer */
+#define ICSK_TIME_REO_TIMEOUT 6 /* Reordering timer */
static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
{
@@ -234,7 +232,8 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
}
if (what == ICSK_TIME_RETRANS || what == ICSK_TIME_PROBE0 ||
- what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE) {
+ what == ICSK_TIME_EARLY_RETRANS || what == ICSK_TIME_LOSS_PROBE ||
+ what == ICSK_TIME_REO_TIMEOUT) {
icsk->icsk_pending = what;
icsk->icsk_timeout = jiffies + when;
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, icsk->icsk_timeout);
@@ -261,9 +260,6 @@ inet_csk_rto_backoff(const struct inet_connection_sock *icsk,
struct sock *inet_csk_accept(struct sock *sk, int flags, int *err);
-int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax,
- bool soreuseport_ok);
int inet_csk_get_port(struct sock *sk, unsigned short snum);
struct dst_entry *inet_csk_route_req(const struct sock *sk, struct flowi4 *fl4,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 909972a..5894730 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -164,13 +164,7 @@ static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
{
- unsigned int res;
-
- local_bh_disable();
- res = percpu_counter_sum_positive(&nf->mem);
- local_bh_enable();
-
- return res;
+ return percpu_counter_sum_positive(&nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index 0574493..1178931 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -74,13 +74,21 @@ struct inet_ehash_bucket {
* users logged onto your box, isn't it nice to know that new data
* ports are created in O(1) time? I thought so. ;-) -DaveM
*/
+#define FASTREUSEPORT_ANY 1
+#define FASTREUSEPORT_STRICT 2
+
struct inet_bind_bucket {
possible_net_t ib_net;
unsigned short port;
signed char fastreuse;
signed char fastreuseport;
kuid_t fastuid;
- int num_owners;
+#if IS_ENABLED(CONFIG_IPV6)
+ struct in6_addr fast_v6_rcv_saddr;
+#endif
+ __be32 fast_rcv_saddr;
+ unsigned short fast_sk_family;
+ bool fast_ipv6_only;
struct hlist_node node;
struct hlist_head owners;
};
@@ -203,10 +211,7 @@ void inet_hashinfo_init(struct inet_hashinfo *h);
bool inet_ehash_insert(struct sock *sk, struct sock *osk);
bool inet_ehash_nolisten(struct sock *sk, struct sock *osk);
-int __inet_hash(struct sock *sk, struct sock *osk,
- int (*saddr_same)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard));
+int __inet_hash(struct sock *sk, struct sock *osk);
int inet_hash(struct sock *sk);
void inet_unhash(struct sock *sk);
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index c9cff97..aa95053 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -206,7 +206,11 @@ struct inet_sock {
transparent:1,
mc_all:1,
nodefrag:1;
- __u8 bind_address_no_port:1;
+ __u8 bind_address_no_port:1,
+ defer_connect:1; /* Indicates that fastopen_connect is set
+ * and cookie exists so we defer connect
+ * until first data frame is written
+ */
__u8 rcv_tos;
__u8 convert_csum;
int uc_index;
diff --git a/include/net/ip.h b/include/net/ip.h
index ab6761a..bf264a8 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -263,11 +263,21 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
}
+static inline int inet_prot_sock(struct net *net)
+{
+ return net->ipv4.sysctl_ip_prot_sock;
+}
+
#else
static inline int inet_is_local_reserved_port(struct net *net, int port)
{
return 0;
}
+
+static inline int inet_prot_sock(struct net *net)
+{
+ return PROT_SOCK;
+}
#endif
__be32 inet_current_timestamp(void);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 487e573..7afe991 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -871,7 +871,7 @@ int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
* upper-layer output functions
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt, int tclass);
+ __u32 mark, struct ipv6_txoptions *opt, int tclass);
int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr);
diff --git a/include/net/iw_handler.h b/include/net/iw_handler.h
index e0f4109..2509728 100644
--- a/include/net/iw_handler.h
+++ b/include/net/iw_handler.h
@@ -505,25 +505,8 @@ static inline int iwe_stream_event_len_adjust(struct iw_request_info *info,
/*
* Wrapper to add an Wireless Event to a stream of events.
*/
-static inline char *
-iwe_stream_add_event(struct iw_request_info *info, char *stream, char *ends,
- struct iw_event *iwe, int event_len)
-{
- int lcp_len = iwe_stream_lcp_len(info);
-
- event_len = iwe_stream_event_len_adjust(info, event_len);
-
- /* Check if it's possible */
- if(likely((stream + event_len) < ends)) {
- iwe->len = event_len;
- /* Beware of alignement issues on 64 bits */
- memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
- memcpy(stream + lcp_len, &iwe->u,
- event_len - lcp_len);
- stream += event_len;
- }
- return stream;
-}
+char *iwe_stream_add_event(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len);
static inline char *
iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
@@ -541,26 +524,8 @@ iwe_stream_add_event_check(struct iw_request_info *info, char *stream,
* Wrapper to add an short Wireless Event containing a pointer to a
* stream of events.
*/
-static inline char *
-iwe_stream_add_point(struct iw_request_info *info, char *stream, char *ends,
- struct iw_event *iwe, char *extra)
-{
- int event_len = iwe_stream_point_len(info) + iwe->u.data.length;
- int point_len = iwe_stream_point_len(info);
- int lcp_len = iwe_stream_lcp_len(info);
-
- /* Check if it's possible */
- if(likely((stream + event_len) < ends)) {
- iwe->len = event_len;
- memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
- memcpy(stream + lcp_len,
- ((char *) &iwe->u) + IW_EV_POINT_OFF,
- IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
- memcpy(stream + point_len, extra, iwe->u.data.length);
- stream += event_len;
- }
- return stream;
-}
+char *iwe_stream_add_point(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra);
static inline char *
iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
@@ -579,25 +544,8 @@ iwe_stream_add_point_check(struct iw_request_info *info, char *stream,
* Be careful, this one is tricky to use properly :
* At the first run, you need to have (value = event + IW_EV_LCP_LEN).
*/
-static inline char *
-iwe_stream_add_value(struct iw_request_info *info, char *event, char *value,
- char *ends, struct iw_event *iwe, int event_len)
-{
- int lcp_len = iwe_stream_lcp_len(info);
-
- /* Don't duplicate LCP */
- event_len -= IW_EV_LCP_LEN;
-
- /* Check if it's possible */
- if(likely((value + event_len) < ends)) {
- /* Add new value */
- memcpy(value, &iwe->u, event_len);
- value += event_len;
- /* Patch LCP */
- iwe->len = value - event;
- memcpy(event, (char *) iwe, lcp_len);
- }
- return value;
-}
+char *iwe_stream_add_value(struct iw_request_info *info, char *event,
+ char *value, char *ends, struct iw_event *iwe,
+ int event_len);
#endif /* _IW_HANDLER_H */
diff --git a/include/net/lwtunnel.h b/include/net/lwtunnel.h
index d4c1c75..45399ed 100644
--- a/include/net/lwtunnel.h
+++ b/include/net/lwtunnel.h
@@ -33,7 +33,7 @@ struct lwtunnel_state {
};
struct lwtunnel_encap_ops {
- int (*build_state)(struct net_device *dev, struct nlattr *encap,
+ int (*build_state)(struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts);
void (*destroy_state)(struct lwtunnel_state *lws);
@@ -44,6 +44,8 @@ struct lwtunnel_encap_ops {
int (*get_encap_size)(struct lwtunnel_state *lwtstate);
int (*cmp_encap)(struct lwtunnel_state *a, struct lwtunnel_state *b);
int (*xmit)(struct sk_buff *skb);
+
+ struct module *owner;
};
#ifdef CONFIG_LWTUNNEL
@@ -105,7 +107,9 @@ int lwtunnel_encap_add_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
unsigned int num);
-int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+int lwtunnel_valid_encap_type(u16 encap_type);
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len);
+int lwtunnel_build_state(u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws);
@@ -168,7 +172,16 @@ static inline int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *op,
return -EOPNOTSUPP;
}
-static inline int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+static inline int lwtunnel_valid_encap_type(u16 encap_type)
+{
+ return -EOPNOTSUPP;
+}
+static inline int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int len)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int lwtunnel_build_state(u16 encap_type,
struct nlattr *encap,
unsigned int family, const void *cfg,
struct lwtunnel_state **lws)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 5345d35..86967b8 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -147,7 +147,6 @@ enum ieee80211_ac_numbers {
IEEE80211_AC_BE = 2,
IEEE80211_AC_BK = 3,
};
-#define IEEE80211_NUM_ACS 4
/**
* struct ieee80211_tx_queue_params - transmit queue configuration
@@ -1018,7 +1017,7 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_DECRYPTED: This frame was decrypted in hardware.
* @RX_FLAG_MMIC_STRIPPED: the Michael MIC is stripped off this frame,
* verification has been done by the hardware.
- * @RX_FLAG_IV_STRIPPED: The IV/ICV are stripped from this frame.
+ * @RX_FLAG_IV_STRIPPED: The IV and ICV are stripped from this frame.
* If this flag is set, the stack cannot do any replay detection
* hence the driver or hardware will have to do that.
* @RX_FLAG_PN_VALIDATED: Currently only valid for CCMP/GCMP frames, this
@@ -1089,6 +1088,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_ALLOW_SAME_PN: Allow the same PN as same packet before.
* This is used for AMSDU subframes which can have the same PN as
* the first subframe.
+ * @RX_FLAG_ICV_STRIPPED: The ICV is stripped from this frame. CRC checking must
+ * be done in the hardware.
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1124,6 +1125,7 @@ enum mac80211_rx_flags {
RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
RX_FLAG_MIC_STRIPPED = BIT_ULL(32),
RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33),
+ RX_FLAG_ICV_STRIPPED = BIT_ULL(34),
};
#define RX_FLAG_STBC_SHIFT 26
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 924325c..7dfdb51 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -207,9 +207,9 @@ struct nft_set_iter {
unsigned int skip;
int err;
int (*fn)(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem);
+ struct nft_set_elem *elem);
};
/**
@@ -301,7 +301,7 @@ struct nft_set_ops {
void (*remove)(const struct nft_set *set,
const struct nft_set_elem *elem);
void (*walk)(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
struct nft_set_iter *iter);
unsigned int (*privsize)(const struct nlattr * const nla[]);
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index cbedda0..5ceb220 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -9,6 +9,12 @@ struct nft_fib {
extern const struct nla_policy nft_fib_policy[];
+static inline bool
+nft_fib_is_loopback(const struct sk_buff *skb, const struct net_device *in)
+{
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
+}
+
int nft_fib_dump(struct sk_buff *skb, const struct nft_expr *expr);
int nft_fib_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
const struct nlattr * const tb[]);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 8e3f5b6..622d2da 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -124,6 +124,10 @@ struct netns_ipv4 {
struct inet_timewait_death_row tcp_death_row;
int sysctl_max_syn_backlog;
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ int sysctl_udp_l3mdev_accept;
+#endif
+
int sysctl_igmp_max_memberships;
int sysctl_igmp_max_msf;
int sysctl_igmp_llm_reports;
@@ -135,6 +139,7 @@ struct netns_ipv4 {
#ifdef CONFIG_SYSCTL
unsigned long *sysctl_local_reserved_ports;
+ int sysctl_ip_prot_sock;
#endif
#ifdef CONFIG_IP_MROUTE
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index c501d67..b7871d0 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -118,6 +118,9 @@ struct netns_sctp {
/* Flag to indicate if PR-SCTP is enabled. */
int prsctp_enable;
+ /* Flag to indicate if PR-CONFIG is enabled. */
+ int reconf_enable;
+
/* Flag to idicate if SCTP-AUTH is enabled */
int auth_enable;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index f0a0514..b43077e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -515,4 +515,12 @@ struct tc_cls_bpf_offload {
u32 gen_flags;
};
+
+/* This structure holds cookie structure that is passed from user
+ * to the kernel for actions and classifiers
+ */
+struct tc_cookie {
+ u8 *data;
+ u32 len;
+};
#endif
diff --git a/include/net/psample.h b/include/net/psample.h
new file mode 100644
index 0000000..8888b0e
--- /dev/null
+++ b/include/net/psample.h
@@ -0,0 +1,36 @@
+#ifndef __NET_PSAMPLE_H
+#define __NET_PSAMPLE_H
+
+#include <uapi/linux/psample.h>
+#include <linux/module.h>
+#include <linux/list.h>
+
+struct psample_group {
+ struct list_head list;
+ struct net *net;
+ u32 group_num;
+ u32 refcount;
+ u32 seq;
+};
+
+struct psample_group *psample_group_get(struct net *net, u32 group_num);
+void psample_group_put(struct psample_group *group);
+
+#if IS_ENABLED(CONFIG_PSAMPLE)
+
+void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
+ u32 trunc_size, int in_ifindex, int out_ifindex,
+ u32 sample_rate);
+
+#else
+
+static inline void psample_sample_packet(struct psample_group *group,
+ struct sk_buff *skb, u32 trunc_size,
+ int in_ifindex, int out_ifindex,
+ u32 sample_rate)
+{
+}
+
+#endif
+
+#endif /* __NET_PSAMPLE_H */
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 4113916..106de5f 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -139,6 +139,10 @@ struct rtnl_af_ops {
const struct nlattr *attr);
int (*set_link_af)(struct net_device *dev,
const struct nlattr *attr);
+
+ int (*fill_stats_af)(struct sk_buff *skb,
+ const struct net_device *dev);
+ size_t (*get_stats_af_size)(const struct net_device *dev);
};
void __rtnl_af_unregister(struct rtnl_af_ops *ops);
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 5b847e4..3567c97 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -90,6 +90,7 @@ typedef enum {
SCTP_EVENT_TIMEOUT_T4_RTO,
SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
SCTP_EVENT_TIMEOUT_HEARTBEAT,
+ SCTP_EVENT_TIMEOUT_RECONF,
SCTP_EVENT_TIMEOUT_SACK,
SCTP_EVENT_TIMEOUT_AUTOCLOSE,
} sctp_event_timeout_t;
@@ -113,9 +114,10 @@ typedef enum {
SCTP_PRIMITIVE_SEND,
SCTP_PRIMITIVE_REQUESTHEARTBEAT,
SCTP_PRIMITIVE_ASCONF,
+ SCTP_PRIMITIVE_RECONF,
} sctp_event_primitive_t;
-#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_ASCONF
+#define SCTP_EVENT_PRIMITIVE_MAX SCTP_PRIMITIVE_RECONF
#define SCTP_NUM_PRIMITIVE_TYPES (SCTP_EVENT_PRIMITIVE_MAX + 1)
/* We define here a utility type for manipulating subtypes.
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 598d938..3cfd365b 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -141,6 +141,8 @@ int sctp_primitive_ABORT(struct net *, struct sctp_association *, void *arg);
int sctp_primitive_SEND(struct net *, struct sctp_association *, void *arg);
int sctp_primitive_REQUESTHEARTBEAT(struct net *, struct sctp_association *, void *arg);
int sctp_primitive_ASCONF(struct net *, struct sctp_association *, void *arg);
+int sctp_primitive_RECONF(struct net *net, struct sctp_association *asoc,
+ void *arg);
/*
* sctp/input.c
@@ -192,6 +194,12 @@ void sctp_remaddr_proc_exit(struct net *net);
int sctp_offload_init(void);
/*
+ * sctp/stream.c
+ */
+int sctp_send_reset_streams(struct sctp_association *asoc,
+ struct sctp_reset_streams *params);
+
+/*
* Module global variables
*/
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index ca6c971..430ed13 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -157,6 +157,7 @@ sctp_state_fn_t sctp_sf_error_shutdown;
sctp_state_fn_t sctp_sf_ignore_primitive;
sctp_state_fn_t sctp_sf_do_prm_requestheartbeat;
sctp_state_fn_t sctp_sf_do_prm_asconf;
+sctp_state_fn_t sctp_sf_do_prm_reconf;
/* Prototypes for other event state functions. */
sctp_state_fn_t sctp_sf_do_no_pending_tsn;
@@ -167,6 +168,7 @@ sctp_state_fn_t sctp_sf_cookie_wait_icmp_abort;
/* Prototypes for timeout event state functions. */
sctp_state_fn_t sctp_sf_do_6_3_3_rtx;
+sctp_state_fn_t sctp_sf_send_reconf;
sctp_state_fn_t sctp_sf_do_6_2_sack;
sctp_state_fn_t sctp_sf_autoclose_timer_expire;
@@ -259,7 +261,10 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
__u32 new_cum_tsn, size_t nstreams,
struct sctp_fwdtsn_skip *skiplist);
struct sctp_chunk *sctp_make_auth(const struct sctp_association *asoc);
-
+struct sctp_chunk *sctp_make_strreset_req(
+ const struct sctp_association *asoc,
+ __u16 stream_num, __u16 *stream_list,
+ bool out, bool in);
void sctp_chunk_assign_tsn(struct sctp_chunk *);
void sctp_chunk_assign_ssn(struct sctp_chunk *);
@@ -275,6 +280,7 @@ int sctp_do_sm(struct net *net, sctp_event_t event_type, sctp_subtype_t subtype,
/* 2nd level prototypes */
void sctp_generate_t3_rtx_event(unsigned long peer);
void sctp_generate_heartbeat_event(unsigned long peer);
+void sctp_generate_reconf_event(unsigned long peer);
void sctp_generate_proto_unreach_event(unsigned long peer);
void sctp_ootb_pkt_free(struct sctp_packet *);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 4741ec2..231fa9ac 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -877,6 +877,9 @@ struct sctp_transport {
/* Timer to handle ICMP proto unreachable envets */
struct timer_list proto_unreach_timer;
+ /* Timer to handler reconf chunk rtx */
+ struct timer_list reconf_timer;
+
/* Since we're using per-destination retransmission timers
* (see above), we're also using per-destination "transmitted"
* queues. This probably ought to be a private struct
@@ -935,6 +938,7 @@ void sctp_transport_pmtu(struct sctp_transport *, struct sock *sk);
void sctp_transport_free(struct sctp_transport *);
void sctp_transport_reset_t3_rtx(struct sctp_transport *);
void sctp_transport_reset_hb_timer(struct sctp_transport *);
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport);
int sctp_transport_hold(struct sctp_transport *);
void sctp_transport_put(struct sctp_transport *);
void sctp_transport_update_rto(struct sctp_transport *, __u32);
@@ -1251,7 +1255,10 @@ struct sctp_endpoint {
struct list_head endpoint_shared_keys;
__u16 active_key_id;
__u8 auth_enable:1,
- prsctp_enable:1;
+ prsctp_enable:1,
+ reconf_enable:1;
+
+ __u8 strreset_enable;
};
/* Recover the outter endpoint structure. */
@@ -1504,6 +1511,7 @@ struct sctp_association {
hostname_address:1, /* Peer understands DNS addresses? */
asconf_capable:1, /* Does peer support ADDIP? */
prsctp_capable:1, /* Can peer do PR-SCTP? */
+ reconf_capable:1, /* Can peer do RE-CONFIG? */
auth_capable:1; /* Is peer doing SCTP-AUTH? */
/* sack_needed : This flag indicates if the next received
@@ -1863,7 +1871,16 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
- prsctp_enable:1;
+ prsctp_enable:1,
+ reconf_enable:1;
+
+ __u8 strreset_enable;
+ __u8 strreset_outstanding; /* request param count on the fly */
+
+ __u32 strreset_outseq; /* Update after receiving response */
+ __u32 strreset_inseq; /* Update after receiving request */
+
+ struct sctp_chunk *strreset_chunk; /* save request chunk */
struct sctp_priv_assoc_stats stats;
diff --git a/include/net/sock.h b/include/net/sock.h
index 389a0a6..94e65fd 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -544,8 +544,7 @@ static inline struct sock *sk_nulls_head(const struct hlist_nulls_head *head)
static inline struct sock *sk_next(const struct sock *sk)
{
- return sk->sk_node.next ?
- hlist_entry(sk->sk_node.next, struct sock, sk_node) : NULL;
+ return hlist_entry_safe(sk->sk_node.next, struct sock, sk_node);
}
static inline struct sock *sk_nulls_next(const struct sock *sk)
@@ -1535,7 +1534,7 @@ void sock_efree(struct sk_buff *skb);
#ifdef CONFIG_INET
void sock_edemux(struct sk_buff *skb);
#else
-#define sock_edemux(skb) sock_efree(skb)
+#define sock_edemux sock_efree
#endif
int sock_setsockopt(struct socket *sock, int level, int op,
diff --git a/include/net/tc_act/tc_sample.h b/include/net/tc_act/tc_sample.h
new file mode 100644
index 0000000..89e9305
--- /dev/null
+++ b/include/net/tc_act/tc_sample.h
@@ -0,0 +1,50 @@
+#ifndef __NET_TC_SAMPLE_H
+#define __NET_TC_SAMPLE_H
+
+#include <net/act_api.h>
+#include <linux/tc_act/tc_sample.h>
+#include <net/psample.h>
+
+struct tcf_sample {
+ struct tc_action common;
+ u32 rate;
+ bool truncate;
+ u32 trunc_size;
+ struct psample_group __rcu *psample_group;
+ u32 psample_group_num;
+ struct list_head tcfm_list;
+ struct rcu_head rcu;
+};
+#define to_sample(a) ((struct tcf_sample *)a)
+
+static inline bool is_tcf_sample(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return a->ops && a->ops->type == TCA_ACT_SAMPLE;
+#else
+ return false;
+#endif
+}
+
+static inline __u32 tcf_sample_rate(const struct tc_action *a)
+{
+ return to_sample(a)->rate;
+}
+
+static inline bool tcf_sample_truncate(const struct tc_action *a)
+{
+ return to_sample(a)->truncate;
+}
+
+static inline int tcf_sample_trunc_size(const struct tc_action *a)
+{
+ return to_sample(a)->trunc_size;
+}
+
+static inline struct psample_group *
+tcf_sample_psample_group(const struct tc_action *a)
+{
+ return rcu_dereference(to_sample(a)->psample_group);
+}
+
+#endif /* __NET_TC_SAMPLE_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 1da0aa7..6ec4ea6 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -143,6 +143,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
* for local resources.
*/
+#define TCP_REO_TIMEOUT_MIN (2000) /* Min RACK reordering timeout in usec */
#define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
#define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
@@ -261,6 +262,9 @@ extern int sysctl_tcp_slow_start_after_idle;
extern int sysctl_tcp_thin_linear_timeouts;
extern int sysctl_tcp_thin_dupack;
extern int sysctl_tcp_early_retrans;
+extern int sysctl_tcp_recovery;
+#define TCP_RACK_LOSS_DETECTION 0x1 /* Use RACK to detect losses */
+
extern int sysctl_tcp_limit_output_bytes;
extern int sysctl_tcp_challenge_ack_limit;
extern int sysctl_tcp_min_tso_segs;
@@ -397,6 +401,7 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
int tcp_child_process(struct sock *parent, struct sock *child,
struct sk_buff *skb);
void tcp_enter_loss(struct sock *sk);
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag);
void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
@@ -541,6 +546,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
void tcp_retransmit_timer(struct sock *sk);
void tcp_xmit_retransmit_queue(struct sock *);
void tcp_simple_retransmit(struct sock *);
+void tcp_enter_recovery(struct sock *sk, bool ece_ack);
int tcp_trim_head(struct sock *, struct sk_buff *, u32);
int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
@@ -559,7 +565,6 @@ void tcp_skb_collapse_tstamp(struct sk_buff *skb,
const struct sk_buff *next_skb);
/* tcp_input.c */
-void tcp_resume_early_retransmit(struct sock *sk);
void tcp_rearm_rto(struct sock *sk);
void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
void tcp_reset(struct sock *sk);
@@ -1031,23 +1036,6 @@ static inline void tcp_enable_fack(struct tcp_sock *tp)
tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
}
-/* TCP early-retransmit (ER) is similar to but more conservative than
- * the thin-dupack feature. Enable ER only if thin-dupack is disabled.
- */
-static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
-{
- struct net *net = sock_net((struct sock *)tp);
-
- tp->do_early_retrans = sysctl_tcp_early_retrans &&
- sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
- net->ipv4.sysctl_tcp_reordering == 3;
-}
-
-static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
-{
- tp->do_early_retrans = 0;
-}
-
static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
{
return tp->sacked_out + tp->lost_out;
@@ -1505,6 +1493,9 @@ struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
struct tcp_fastopen_cookie *foc,
struct dst_entry *dst);
void tcp_fastopen_init_key_once(bool publish);
+bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie);
+bool tcp_fastopen_defer_connect(struct sock *sk, int *err);
#define TCP_FASTOPEN_KEY_LENGTH 16
/* Fastopen key context */
@@ -1856,17 +1847,11 @@ void tcp_v4_init(void);
void tcp_init(void);
/* tcp_recovery.c */
-
-/* Flags to enable various loss recovery features. See below */
-extern int sysctl_tcp_recovery;
-
-/* Use TCP RACK to detect (some) tail and retransmit losses */
-#define TCP_RACK_LOST_RETRANS 0x1
-
-extern int tcp_rack_mark_lost(struct sock *sk);
-
-extern void tcp_rack_advance(struct tcp_sock *tp,
- const struct skb_mstamp *xmit_time, u8 sacked);
+extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
+extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
+ const struct skb_mstamp *xmit_time,
+ const struct skb_mstamp *ack_time);
+extern void tcp_rack_reo_timeout(struct sock *sk);
/*
* Save and compile IPv4 options, return a pointer to it
diff --git a/include/net/udp.h b/include/net/udp.h
index 1661791..c9d8b8e 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -204,7 +204,6 @@ static inline void udp_lib_close(struct sock *sk, long timeout)
}
int udp_lib_get_port(struct sock *sk, unsigned short snum,
- int (*)(const struct sock *, const struct sock *, bool),
unsigned int hash2_nulladdr);
u32 udp_flow_hashrnd(void);
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 31947b9..d9a81dc 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -213,6 +213,8 @@ struct xfrm_state {
/* Last used time */
unsigned long lastused;
+ struct page_frag xfrag;
+
/* Reference to data common to all the instances of this
* transformer. */
const struct xfrm_type *type;
@@ -343,7 +345,7 @@ struct xfrm_state_afinfo {
int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family);
-void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo);
+struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family);
struct xfrm_input_afinfo {
unsigned int family;
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 958a24d..b567e44 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -352,6 +352,20 @@ static inline int ib_mtu_enum_to_int(enum ib_mtu mtu)
}
}
+static inline enum ib_mtu ib_mtu_int_to_enum(int mtu)
+{
+ if (mtu >= 4096)
+ return IB_MTU_4096;
+ else if (mtu >= 2048)
+ return IB_MTU_2048;
+ else if (mtu >= 1024)
+ return IB_MTU_1024;
+ else if (mtu >= 512)
+ return IB_MTU_512;
+ else
+ return IB_MTU_256;
+}
+
enum ib_port_state {
IB_PORT_NOP = 0,
IB_PORT_DOWN = 1,
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 96dd0b3..da5033d 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -809,11 +809,11 @@ static inline void fc_set_wwnn(struct fc_lport *lport, u64 wwnn)
/**
* fc_set_wwpn() - Set the World Wide Port Name of a local port
* @lport: The local port whose WWPN is to be set
- * @wwnn: The new WWPN
+ * @wwpn: The new WWPN
*/
-static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwnn)
+static inline void fc_set_wwpn(struct fc_lport *lport, u64 wwpn)
{
- lport->wwpn = wwnn;
+ lport->wwpn = wwpn;
}
/**
diff --git a/include/sound/hdmi-codec.h b/include/sound/hdmi-codec.h
index 530c57b..915c435 100644
--- a/include/sound/hdmi-codec.h
+++ b/include/sound/hdmi-codec.h
@@ -36,10 +36,10 @@ struct hdmi_codec_daifmt {
HDMI_AC97,
HDMI_SPDIF,
} fmt;
- int bit_clk_inv:1;
- int frame_clk_inv:1;
- int bit_clk_master:1;
- int frame_clk_master:1;
+ unsigned int bit_clk_inv:1;
+ unsigned int frame_clk_inv:1;
+ unsigned int bit_clk_master:1;
+ unsigned int frame_clk_master:1;
};
/*
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 2b502f6..b86168a 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -813,6 +813,7 @@ struct snd_soc_component {
unsigned int suspended:1; /* is in suspend PM state */
struct list_head list;
+ struct list_head card_aux_list; /* for auxiliary bound components */
struct list_head card_list;
struct snd_soc_dai_driver *dai_drv;
@@ -1152,6 +1153,7 @@ struct snd_soc_card {
*/
struct snd_soc_aux_dev *aux_dev;
int num_aux_devs;
+ struct list_head aux_comp_list;
const struct snd_kcontrol_new *controls;
int num_controls;
@@ -1547,6 +1549,7 @@ static inline void snd_soc_initialize_card_lists(struct snd_soc_card *card)
INIT_LIST_HEAD(&card->widgets);
INIT_LIST_HEAD(&card->paths);
INIT_LIST_HEAD(&card->dapm_list);
+ INIT_LIST_HEAD(&card->aux_comp_list);
INIT_LIST_HEAD(&card->component_dev_list);
}
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 29e6858..43edf82 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -174,6 +174,10 @@ enum tcm_sense_reason_table {
TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED = R(0x16),
TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED = R(0x17),
TCM_COPY_TARGET_DEVICE_NOT_REACHABLE = R(0x18),
+ TCM_TOO_MANY_TARGET_DESCS = R(0x19),
+ TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE = R(0x1a),
+ TCM_TOO_MANY_SEGMENT_DESCS = R(0x1b),
+ TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE = R(0x1c),
#undef R
};
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
new file mode 100644
index 0000000..c3a53fd
--- /dev/null
+++ b/include/trace/events/bpf.h
@@ -0,0 +1,347 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM bpf
+
+#if !defined(_TRACE_BPF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_BPF_H
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/fs.h>
+#include <linux/tracepoint.h>
+
+#define __PROG_TYPE_MAP(FN) \
+ FN(SOCKET_FILTER) \
+ FN(KPROBE) \
+ FN(SCHED_CLS) \
+ FN(SCHED_ACT) \
+ FN(TRACEPOINT) \
+ FN(XDP) \
+ FN(PERF_EVENT) \
+ FN(CGROUP_SKB) \
+ FN(CGROUP_SOCK) \
+ FN(LWT_IN) \
+ FN(LWT_OUT) \
+ FN(LWT_XMIT)
+
+#define __MAP_TYPE_MAP(FN) \
+ FN(HASH) \
+ FN(ARRAY) \
+ FN(PROG_ARRAY) \
+ FN(PERF_EVENT_ARRAY) \
+ FN(PERCPU_HASH) \
+ FN(PERCPU_ARRAY) \
+ FN(STACK_TRACE) \
+ FN(CGROUP_ARRAY) \
+ FN(LRU_HASH) \
+ FN(LRU_PERCPU_HASH) \
+ FN(LPM_TRIE)
+
+#define __PROG_TYPE_TP_FN(x) \
+ TRACE_DEFINE_ENUM(BPF_PROG_TYPE_##x);
+#define __PROG_TYPE_SYM_FN(x) \
+ { BPF_PROG_TYPE_##x, #x },
+#define __PROG_TYPE_SYM_TAB \
+ __PROG_TYPE_MAP(__PROG_TYPE_SYM_FN) { -1, 0 }
+__PROG_TYPE_MAP(__PROG_TYPE_TP_FN)
+
+#define __MAP_TYPE_TP_FN(x) \
+ TRACE_DEFINE_ENUM(BPF_MAP_TYPE_##x);
+#define __MAP_TYPE_SYM_FN(x) \
+ { BPF_MAP_TYPE_##x, #x },
+#define __MAP_TYPE_SYM_TAB \
+ __MAP_TYPE_MAP(__MAP_TYPE_SYM_FN) { -1, 0 }
+__MAP_TYPE_MAP(__MAP_TYPE_TP_FN)
+
+DECLARE_EVENT_CLASS(bpf_prog_event,
+
+ TP_PROTO(const struct bpf_prog *prg),
+
+ TP_ARGS(prg),
+
+ TP_STRUCT__entry(
+ __array(u8, prog_tag, 8)
+ __field(u32, type)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+ memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+ __entry->type = prg->type;
+ ),
+
+ TP_printk("prog=%s type=%s",
+ __print_hex_str(__entry->prog_tag, 8),
+ __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB))
+);
+
+DEFINE_EVENT(bpf_prog_event, bpf_prog_get_type,
+
+ TP_PROTO(const struct bpf_prog *prg),
+
+ TP_ARGS(prg)
+);
+
+DEFINE_EVENT(bpf_prog_event, bpf_prog_put_rcu,
+
+ TP_PROTO(const struct bpf_prog *prg),
+
+ TP_ARGS(prg)
+);
+
+TRACE_EVENT(bpf_prog_load,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd),
+
+ TP_ARGS(prg, ufd),
+
+ TP_STRUCT__entry(
+ __array(u8, prog_tag, 8)
+ __field(u32, type)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+ memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+ __entry->type = prg->type;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("prog=%s type=%s ufd=%d",
+ __print_hex_str(__entry->prog_tag, 8),
+ __print_symbolic(__entry->type, __PROG_TYPE_SYM_TAB),
+ __entry->ufd)
+);
+
+TRACE_EVENT(bpf_map_create,
+
+ TP_PROTO(const struct bpf_map *map, int ufd),
+
+ TP_ARGS(map, ufd),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, size_key)
+ __field(u32, size_value)
+ __field(u32, max_entries)
+ __field(u32, flags)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ __entry->type = map->map_type;
+ __entry->size_key = map->key_size;
+ __entry->size_value = map->value_size;
+ __entry->max_entries = map->max_entries;
+ __entry->flags = map->map_flags;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=%u val=%u max=%u flags=%x",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd, __entry->size_key, __entry->size_value,
+ __entry->max_entries, __entry->flags)
+);
+
+DECLARE_EVENT_CLASS(bpf_obj_prog,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(prg, ufd, pname),
+
+ TP_STRUCT__entry(
+ __array(u8, prog_tag, 8)
+ __field(int, ufd)
+ __string(path, pname->name)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(prg->tag));
+ memcpy(__entry->prog_tag, prg->tag, sizeof(prg->tag));
+ __assign_str(path, pname->name);
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("prog=%s path=%s ufd=%d",
+ __print_hex_str(__entry->prog_tag, 8),
+ __get_str(path), __entry->ufd)
+);
+
+DEFINE_EVENT(bpf_obj_prog, bpf_obj_pin_prog,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(prg, ufd, pname)
+);
+
+DEFINE_EVENT(bpf_obj_prog, bpf_obj_get_prog,
+
+ TP_PROTO(const struct bpf_prog *prg, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(prg, ufd, pname)
+);
+
+DECLARE_EVENT_CLASS(bpf_obj_map,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(map, ufd, pname),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(int, ufd)
+ __string(path, pname->name)
+ ),
+
+ TP_fast_assign(
+ __assign_str(path, pname->name);
+ __entry->type = map->map_type;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d path=%s",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd, __get_str(path))
+);
+
+DEFINE_EVENT(bpf_obj_map, bpf_obj_pin_map,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(map, ufd, pname)
+);
+
+DEFINE_EVENT(bpf_obj_map, bpf_obj_get_map,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const struct filename *pname),
+
+ TP_ARGS(map, ufd, pname)
+);
+
+DECLARE_EVENT_CLASS(bpf_map_keyval,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *val),
+
+ TP_ARGS(map, ufd, key, val),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, key_len)
+ __dynamic_array(u8, key, map->key_size)
+ __field(bool, key_trunc)
+ __field(u32, val_len)
+ __dynamic_array(u8, val, map->value_size)
+ __field(bool, val_trunc)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ memcpy(__get_dynamic_array(val), val, map->value_size);
+ __entry->type = map->map_type;
+ __entry->key_len = min(map->key_size, 16U);
+ __entry->key_trunc = map->key_size != __entry->key_len;
+ __entry->val_len = min(map->value_size, 16U);
+ __entry->val_trunc = map->value_size != __entry->val_len;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=[%s%s] val=[%s%s]",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd,
+ __print_hex(__get_dynamic_array(key), __entry->key_len),
+ __entry->key_trunc ? " ..." : "",
+ __print_hex(__get_dynamic_array(val), __entry->val_len),
+ __entry->val_trunc ? " ..." : "")
+);
+
+DEFINE_EVENT(bpf_map_keyval, bpf_map_lookup_elem,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *val),
+
+ TP_ARGS(map, ufd, key, val)
+);
+
+DEFINE_EVENT(bpf_map_keyval, bpf_map_update_elem,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *val),
+
+ TP_ARGS(map, ufd, key, val)
+);
+
+TRACE_EVENT(bpf_map_delete_elem,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key),
+
+ TP_ARGS(map, ufd, key),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, key_len)
+ __dynamic_array(u8, key, map->key_size)
+ __field(bool, key_trunc)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ __entry->type = map->map_type;
+ __entry->key_len = min(map->key_size, 16U);
+ __entry->key_trunc = map->key_size != __entry->key_len;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=[%s%s]",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd,
+ __print_hex(__get_dynamic_array(key), __entry->key_len),
+ __entry->key_trunc ? " ..." : "")
+);
+
+TRACE_EVENT(bpf_map_next_key,
+
+ TP_PROTO(const struct bpf_map *map, int ufd,
+ const void *key, const void *key_next),
+
+ TP_ARGS(map, ufd, key, key_next),
+
+ TP_STRUCT__entry(
+ __field(u32, type)
+ __field(u32, key_len)
+ __dynamic_array(u8, key, map->key_size)
+ __dynamic_array(u8, nxt, map->key_size)
+ __field(bool, key_trunc)
+ __field(int, ufd)
+ ),
+
+ TP_fast_assign(
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
+ __entry->type = map->map_type;
+ __entry->key_len = min(map->key_size, 16U);
+ __entry->key_trunc = map->key_size != __entry->key_len;
+ __entry->ufd = ufd;
+ ),
+
+ TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
+ __print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
+ __entry->ufd,
+ __print_hex(__get_dynamic_array(key), __entry->key_len),
+ __entry->key_trunc ? " ..." : "",
+ __print_hex(__get_dynamic_array(nxt), __entry->key_len),
+ __entry->key_trunc ? " ..." : "")
+);
+
+#endif /* _TRACE_BPF_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index c14bed4..88d18a8ce 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -130,8 +130,8 @@ DECLARE_EVENT_CLASS(btrfs__inode,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), gen = %llu, ino = %lu, blocks = %llu, "
- "disk_i_size = %llu, last_trans = %llu, logged_trans = %llu",
+ TP_printk_btrfs("root=%llu(%s) gen=%llu ino=%lu blocks=%llu "
+ "disk_i_size=%llu last_trans=%llu logged_trans=%llu",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->generation,
(unsigned long)__entry->ino,
@@ -184,14 +184,16 @@ DEFINE_EVENT(btrfs__inode, btrfs_inode_evict,
TRACE_EVENT_CONDITION(btrfs_get_extent,
- TP_PROTO(struct btrfs_root *root, struct extent_map *map),
+ TP_PROTO(struct btrfs_root *root, struct inode *inode,
+ struct extent_map *map),
- TP_ARGS(root, map),
+ TP_ARGS(root, inode, map),
TP_CONDITION(map),
TP_STRUCT__entry_btrfs(
__field( u64, root_objectid )
+ __field( u64, ino )
__field( u64, start )
__field( u64, len )
__field( u64, orig_start )
@@ -204,7 +206,8 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
TP_fast_assign_btrfs(root->fs_info,
__entry->root_objectid = root->root_key.objectid;
- __entry->start = map->start;
+ __entry->ino = btrfs_ino(inode);
+ __entry->start = map->start;
__entry->len = map->len;
__entry->orig_start = map->orig_start;
__entry->block_start = map->block_start;
@@ -214,11 +217,12 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->compress_type = map->compress_type;
),
- TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu, "
- "orig_start = %llu, block_start = %llu(%s), "
- "block_len = %llu, flags = %s, refs = %u, "
- "compress_type = %u",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu start=%llu len=%llu "
+ "orig_start=%llu block_start=%llu(%s) "
+ "block_len=%llu flags=%s refs=%u "
+ "compress_type=%u",
show_root_type(__entry->root_objectid),
+ (unsigned long long)__entry->ino,
(unsigned long long)__entry->start,
(unsigned long long)__entry->len,
(unsigned long long)__entry->orig_start,
@@ -259,6 +263,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__field( int, compress_type )
__field( int, refs )
__field( u64, root_objectid )
+ __field( u64, truncated_len )
),
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
@@ -273,18 +278,21 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->refs = atomic_read(&ordered->refs);
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
+ __entry->truncated_len = ordered->truncated_len;
),
- TP_printk_btrfs("root = %llu(%s), ino = %llu, file_offset = %llu, "
- "start = %llu, len = %llu, disk_len = %llu, "
- "bytes_left = %llu, flags = %s, compress_type = %d, "
- "refs = %d",
+ TP_printk_btrfs("root=%llu(%s) ino=%llu file_offset=%llu "
+ "start=%llu len=%llu disk_len=%llu "
+ "truncated_len=%llu "
+ "bytes_left=%llu flags=%s compress_type=%d "
+ "refs=%d",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->ino,
(unsigned long long)__entry->file_offset,
(unsigned long long)__entry->start,
(unsigned long long)__entry->len,
(unsigned long long)__entry->disk_len,
+ (unsigned long long)__entry->truncated_len,
(unsigned long long)__entry->bytes_left,
show_ordered_flags(__entry->flags),
__entry->compress_type, __entry->refs)
@@ -354,10 +362,10 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, "
- "nr_to_write = %ld, pages_skipped = %ld, range_start = %llu, "
- "range_end = %llu, for_kupdate = %d, "
- "for_reclaim = %d, range_cyclic = %d, writeback_index = %lu",
+ TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu "
+ "nr_to_write=%ld pages_skipped=%ld range_start=%llu "
+ "range_end=%llu for_kupdate=%d "
+ "for_reclaim=%d range_cyclic=%d writeback_index=%lu",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, __entry->index,
__entry->nr_to_write, __entry->pages_skipped,
@@ -400,8 +408,8 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
BTRFS_I(page->mapping->host)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), ino = %lu, page_index = %lu, start = %llu, "
- "end = %llu, uptodate = %d",
+ TP_printk_btrfs("root=%llu(%s) ino=%lu page_index=%lu start=%llu "
+ "end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->index,
(unsigned long long)__entry->start,
@@ -433,7 +441,7 @@ TRACE_EVENT(btrfs_sync_file,
BTRFS_I(inode)->root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), ino = %ld, parent = %ld, datasync = %d",
+ TP_printk_btrfs("root=%llu(%s) ino=%ld parent=%ld datasync=%d",
show_root_type(__entry->root_objectid),
(unsigned long)__entry->ino, (unsigned long)__entry->parent,
__entry->datasync)
@@ -484,9 +492,9 @@ TRACE_EVENT(btrfs_add_block_group,
__entry->create = create;
),
- TP_printk("%pU: block_group offset = %llu, size = %llu, "
- "flags = %llu(%s), bytes_used = %llu, bytes_super = %llu, "
- "create = %d", __entry->fsid,
+ TP_printk("%pU: block_group offset=%llu size=%llu "
+ "flags=%llu(%s) bytes_used=%llu bytes_super=%llu "
+ "create=%d", __entry->fsid,
(unsigned long long)__entry->offset,
(unsigned long long)__entry->size,
(unsigned long long)__entry->flags,
@@ -535,9 +543,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_tree_ref,
__entry->seq = ref->seq;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
- "parent = %llu(%s), ref_root = %llu(%s), level = %d, "
- "type = %s, seq = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+ "parent=%llu(%s) ref_root=%llu(%s) level=%d "
+ "type=%s seq=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -600,9 +608,9 @@ DECLARE_EVENT_CLASS(btrfs_delayed_data_ref,
__entry->seq = ref->seq;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, "
- "parent = %llu(%s), ref_root = %llu(%s), owner = %llu, "
- "offset = %llu, type = %s, seq = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s "
+ "parent=%llu(%s) ref_root=%llu(%s) owner=%llu "
+ "offset=%llu type=%s seq=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -657,7 +665,7 @@ DECLARE_EVENT_CLASS(btrfs_delayed_ref_head,
__entry->is_data = head_ref->is_data;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, action = %s, is_data = %d",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu action=%s is_data=%d",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes,
show_ref_action(__entry->action),
@@ -721,8 +729,8 @@ DECLARE_EVENT_CLASS(btrfs__chunk,
__entry->root_objectid = fs_info->chunk_root->root_key.objectid;
),
- TP_printk_btrfs("root = %llu(%s), offset = %llu, size = %llu, "
- "num_stripes = %d, sub_stripes = %d, type = %s",
+ TP_printk_btrfs("root=%llu(%s) offset=%llu size=%llu "
+ "num_stripes=%d sub_stripes=%d type=%s",
show_root_type(__entry->root_objectid),
(unsigned long long)__entry->offset,
(unsigned long long)__entry->size,
@@ -771,8 +779,8 @@ TRACE_EVENT(btrfs_cow_block,
__entry->cow_level = btrfs_header_level(cow);
),
- TP_printk_btrfs("root = %llu(%s), refs = %d, orig_buf = %llu "
- "(orig_level = %d), cow_buf = %llu (cow_level = %d)",
+ TP_printk_btrfs("root=%llu(%s) refs=%d orig_buf=%llu "
+ "(orig_level=%d) cow_buf=%llu (cow_level=%d)",
show_root_type(__entry->root_objectid),
__entry->refs,
(unsigned long long)__entry->buf_start,
@@ -836,7 +844,7 @@ TRACE_EVENT(btrfs_trigger_flush,
__assign_str(reason, reason)
),
- TP_printk("%pU: %s: flush = %d(%s), flags = %llu(%s), bytes = %llu",
+ TP_printk("%pU: %s: flush=%d(%s) flags=%llu(%s) bytes=%llu",
__entry->fsid, __get_str(reason), __entry->flush,
show_flush_action(__entry->flush),
(unsigned long long)__entry->flags,
@@ -879,8 +887,8 @@ TRACE_EVENT(btrfs_flush_space,
__entry->ret = ret;
),
- TP_printk("%pU: state = %d(%s), flags = %llu(%s), num_bytes = %llu, "
- "orig_bytes = %llu, ret = %d", __entry->fsid, __entry->state,
+ TP_printk("%pU: state=%d(%s) flags=%llu(%s) num_bytes=%llu "
+ "orig_bytes=%llu ret=%d", __entry->fsid, __entry->state,
show_flush_state(__entry->state),
(unsigned long long)__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
@@ -905,7 +913,7 @@ DECLARE_EVENT_CLASS(btrfs__reserved_extent,
__entry->len = len;
),
- TP_printk_btrfs("root = %llu(%s), start = %llu, len = %llu",
+ TP_printk_btrfs("root=%llu(%s) start=%llu len=%llu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
(unsigned long long)__entry->start,
(unsigned long long)__entry->len)
@@ -944,7 +952,7 @@ TRACE_EVENT(find_free_extent,
__entry->data = data;
),
- TP_printk_btrfs("root = %Lu(%s), len = %Lu, empty_size = %Lu, flags = %Lu(%s)",
+ TP_printk_btrfs("root=%Lu(%s) len=%Lu empty_size=%Lu flags=%Lu(%s)",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->num_bytes, __entry->empty_size, __entry->data,
__print_flags((unsigned long)__entry->data, "|",
@@ -973,8 +981,8 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
__entry->len = len;
),
- TP_printk_btrfs("root = %Lu(%s), block_group = %Lu, flags = %Lu(%s), "
- "start = %Lu, len = %Lu",
+ TP_printk_btrfs("root=%Lu(%s) block_group=%Lu flags=%Lu(%s) "
+ "start=%Lu len=%Lu",
show_root_type(BTRFS_EXTENT_TREE_OBJECTID),
__entry->bg_objectid,
__entry->flags, __print_flags((unsigned long)__entry->flags,
@@ -1025,8 +1033,8 @@ TRACE_EVENT(btrfs_find_cluster,
__entry->min_bytes = min_bytes;
),
- TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), start = %Lu, len = %Lu,"
- " empty_size = %Lu, min_bytes = %Lu", __entry->bg_objectid,
+ TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) start=%Lu len=%Lu "
+ "empty_size=%Lu min_bytes=%Lu", __entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
BTRFS_GROUP_FLAGS), __entry->start,
@@ -1047,7 +1055,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
__entry->bg_objectid = block_group->key.objectid;
),
- TP_printk_btrfs("block_group = %Lu", __entry->bg_objectid)
+ TP_printk_btrfs("block_group=%Lu", __entry->bg_objectid)
);
TRACE_EVENT(btrfs_setup_cluster,
@@ -1075,8 +1083,8 @@ TRACE_EVENT(btrfs_setup_cluster,
__entry->bitmap = bitmap;
),
- TP_printk_btrfs("block_group = %Lu, flags = %Lu(%s), window_start = %Lu, "
- "size = %Lu, max_size = %Lu, bitmap = %d",
+ TP_printk_btrfs("block_group=%Lu flags=%Lu(%s) window_start=%Lu "
+ "size=%Lu max_size=%Lu bitmap=%d",
__entry->bg_objectid,
__entry->flags,
__print_flags((unsigned long)__entry->flags, "|",
@@ -1103,7 +1111,7 @@ TRACE_EVENT(alloc_extent_state,
__entry->ip = IP
),
- TP_printk("state=%p; mask = %s; caller = %pS", __entry->state,
+ TP_printk("state=%p mask=%s caller=%pS", __entry->state,
show_gfp_flags(__entry->mask), (void *)__entry->ip)
);
@@ -1123,7 +1131,7 @@ TRACE_EVENT(free_extent_state,
__entry->ip = IP
),
- TP_printk(" state=%p; caller = %pS", __entry->state,
+ TP_printk("state=%p caller=%pS", __entry->state,
(void *)__entry->ip)
);
@@ -1151,28 +1159,32 @@ DECLARE_EVENT_CLASS(btrfs__work,
__entry->normal_work = &work->normal_work;
),
- TP_printk_btrfs("work=%p (normal_work=%p), wq=%p, func=%pf, ordered_func=%p,"
- " ordered_free=%p",
+ TP_printk_btrfs("work=%p (normal_work=%p) wq=%p func=%pf ordered_func=%p "
+ "ordered_free=%p",
__entry->work, __entry->normal_work, __entry->wq,
__entry->func, __entry->ordered_func, __entry->ordered_free)
);
-/* For situiations that the work is freed */
+/*
+ * For situiations when the work is freed, we pass fs_info and a tag that that
+ * matches address of the work structure so it can be paired with the
+ * scheduling event.
+ */
DECLARE_EVENT_CLASS(btrfs__work__done,
- TP_PROTO(struct btrfs_work *work),
+ TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
- TP_ARGS(work),
+ TP_ARGS(fs_info, wtag),
TP_STRUCT__entry_btrfs(
- __field( void *, work )
+ __field( void *, wtag )
),
- TP_fast_assign_btrfs(btrfs_work_owner(work),
- __entry->work = work;
+ TP_fast_assign_btrfs(fs_info,
+ __entry->wtag = wtag;
),
- TP_printk_btrfs("work->%p", __entry->work)
+ TP_printk_btrfs("work->%p", __entry->wtag)
);
DEFINE_EVENT(btrfs__work, btrfs_work_queued,
@@ -1191,9 +1203,9 @@ DEFINE_EVENT(btrfs__work, btrfs_work_sched,
DEFINE_EVENT(btrfs__work__done, btrfs_all_work_done,
- TP_PROTO(struct btrfs_work *work),
+ TP_PROTO(struct btrfs_fs_info *fs_info, void *wtag),
- TP_ARGS(work)
+ TP_ARGS(fs_info, wtag)
);
DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
@@ -1221,7 +1233,7 @@ DECLARE_EVENT_CLASS(btrfs__workqueue,
__entry->high = high;
),
- TP_printk_btrfs("name=%s%s, wq=%p", __get_str(name),
+ TP_printk_btrfs("name=%s%s wq=%p", __get_str(name),
__print_flags(__entry->high, "",
{(WQ_HIGHPRI), "-high"}),
__entry->wq)
@@ -1276,7 +1288,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_data_map,
__entry->free_reserved = free_reserved;
),
- TP_printk_btrfs("rootid=%llu, ino=%lu, free_reserved=%llu",
+ TP_printk_btrfs("rootid=%llu ino=%lu free_reserved=%llu",
__entry->rootid, __entry->ino, __entry->free_reserved)
);
@@ -1323,7 +1335,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_rsv_data,
__entry->op = op;
),
- TP_printk_btrfs("root=%llu, ino=%lu, start=%llu, len=%llu, reserved=%llu, op=%s",
+ TP_printk_btrfs("root=%llu ino=%lu start=%llu len=%llu reserved=%llu op=%s",
__entry->rootid, __entry->ino, __entry->start, __entry->len,
__entry->reserved,
__print_flags((unsigned long)__entry->op, "",
@@ -1361,7 +1373,7 @@ DECLARE_EVENT_CLASS(btrfs__qgroup_delayed_ref,
__entry->reserved = reserved;
),
- TP_printk_btrfs("root=%llu, reserved=%llu, op=free",
+ TP_printk_btrfs("root=%llu reserved=%llu op=free",
__entry->ref_root, __entry->reserved)
);
@@ -1388,7 +1400,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
__entry->num_bytes = rec->num_bytes;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
(unsigned long long)__entry->bytenr,
(unsigned long long)__entry->num_bytes)
);
@@ -1430,8 +1442,8 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
__entry->nr_new_roots = nr_new_roots;
),
- TP_printk_btrfs("bytenr = %llu, num_bytes = %llu, nr_old_roots = %llu, "
- "nr_new_roots = %llu",
+ TP_printk_btrfs("bytenr=%llu num_bytes=%llu nr_old_roots=%llu "
+ "nr_new_roots=%llu",
__entry->bytenr,
__entry->num_bytes,
__entry->nr_old_roots,
@@ -1457,7 +1469,7 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_new_count = cur_new_count;
),
- TP_printk_btrfs("qgid = %llu, cur_old_count = %llu, cur_new_count = %llu",
+ TP_printk_btrfs("qgid=%llu cur_old_count=%llu cur_new_count=%llu",
__entry->qgid,
__entry->cur_old_count,
__entry->cur_new_count)
diff --git a/include/trace/events/mmflags.h b/include/trace/events/mmflags.h
index 9e687ca..15bf875 100644
--- a/include/trace/events/mmflags.h
+++ b/include/trace/events/mmflags.h
@@ -47,8 +47,7 @@
{(unsigned long)__GFP_WRITE, "__GFP_WRITE"}, \
{(unsigned long)__GFP_RECLAIM, "__GFP_RECLAIM"}, \
{(unsigned long)__GFP_DIRECT_RECLAIM, "__GFP_DIRECT_RECLAIM"},\
- {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"},\
- {(unsigned long)__GFP_OTHER_NODE, "__GFP_OTHER_NODE"} \
+ {(unsigned long)__GFP_KSWAPD_RECLAIM, "__GFP_KSWAPD_RECLAIM"}\
#define show_gfp_flags(flags) \
(flags) ? __print_flags(flags, "|", \
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
new file mode 100644
index 0000000..1b61357
--- /dev/null
+++ b/include/trace/events/xdp.h
@@ -0,0 +1,53 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM xdp
+
+#if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_XDP_H
+
+#include <linux/netdevice.h>
+#include <linux/filter.h>
+#include <linux/tracepoint.h>
+
+#define __XDP_ACT_MAP(FN) \
+ FN(ABORTED) \
+ FN(DROP) \
+ FN(PASS) \
+ FN(TX)
+
+#define __XDP_ACT_TP_FN(x) \
+ TRACE_DEFINE_ENUM(XDP_##x);
+#define __XDP_ACT_SYM_FN(x) \
+ { XDP_##x, #x },
+#define __XDP_ACT_SYM_TAB \
+ __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
+__XDP_ACT_MAP(__XDP_ACT_TP_FN)
+
+TRACE_EVENT(xdp_exception,
+
+ TP_PROTO(const struct net_device *dev,
+ const struct bpf_prog *xdp, u32 act),
+
+ TP_ARGS(dev, xdp, act),
+
+ TP_STRUCT__entry(
+ __string(name, dev->name)
+ __array(u8, prog_tag, 8)
+ __field(u32, act)
+ ),
+
+ TP_fast_assign(
+ BUILD_BUG_ON(sizeof(__entry->prog_tag) != sizeof(xdp->tag));
+ memcpy(__entry->prog_tag, xdp->tag, sizeof(xdp->tag));
+ __assign_str(name, dev->name);
+ __entry->act = act;
+ ),
+
+ TP_printk("prog=%s device=%s action=%s",
+ __print_hex_str(__entry->prog_tag, 8),
+ __get_str(name),
+ __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB))
+);
+
+#endif /* _TRACE_XDP_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 467e12f..9f68462 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -297,7 +297,12 @@ TRACE_MAKE_SYSTEM_STR();
#endif
#undef __print_hex
-#define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
+#define __print_hex(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, true)
+
+#undef __print_hex_str
+#define __print_hex_str(buf, buf_len) \
+ trace_print_hex_seq(p, buf, buf_len, false)
#undef __print_array
#define __print_array(array, count, el_size) \
@@ -711,6 +716,7 @@ static inline void ftrace_test_probe_##call(void) \
#undef __print_flags
#undef __print_symbolic
#undef __print_hex
+#undef __print_hex_str
#undef __get_dynamic_array
#undef __get_dynamic_array_len
#undef __get_str
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index a8b93e6..486e050 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -64,6 +64,7 @@ header-y += auto_fs.h
header-y += auxvec.h
header-y += ax25.h
header-y += b1lli.h
+header-y += batman_adv.h
header-y += baycom.h
header-y += bcm933xx_hcs.h
header-y += bfs_fs.h
@@ -305,6 +306,7 @@ header-y += netrom.h
header-y += net_namespace.h
header-y += net_tstamp.h
header-y += nfc.h
+header-y += psample.h
header-y += nfs2.h
header-y += nfs3.h
header-y += nfs4.h
@@ -379,6 +381,10 @@ header-y += sctp.h
header-y += sdla.h
header-y += seccomp.h
header-y += securebits.h
+header-y += seg6_genl.h
+header-y += seg6.h
+header-y += seg6_hmac.h
+header-y += seg6_iptunnel.h
header-y += selinux_netlink.h
header-y += sem.h
header-y += serial_core.h
@@ -414,6 +420,7 @@ header-y += telephony.h
header-y += termios.h
header-y += thermal.h
header-y += time.h
+header-y += timerfd.h
header-y += times.h
header-y += timex.h
header-y += tiocl.h
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 734fe83..a83ddb7 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0eb0e87..e07fd5a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -63,6 +63,12 @@ struct bpf_insn {
__s32 imm; /* signed immediate constant */
};
+/* Key of an a BPF_MAP_TYPE_LPM_TRIE entry */
+struct bpf_lpm_trie_key {
+ __u32 prefixlen; /* up to 32 for AF_INET, 128 for AF_INET6 */
+ __u8 data[0]; /* Arbitrary size */
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -89,6 +95,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_CGROUP_ARRAY,
BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH,
+ BPF_MAP_TYPE_LPM_TRIE,
};
enum bpf_prog_type {
@@ -430,6 +437,18 @@ union bpf_attr {
* @xdp_md: pointer to xdp_md
* @delta: An positive/negative integer to be added to xdp_md.data
* Return: 0 on success or negative on error
+ *
+ * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * Copy a NUL terminated string from unsafe address. In case the string
+ * length is smaller than size, the target is not padded with further NUL
+ * bytes. In case the string length is larger than size, just count-1
+ * bytes are copied and the last byte is set to NUL.
+ * @dst: destination address
+ * @size: maximum number of bytes to copy, including the trailing NUL
+ * @unsafe_ptr: unsafe address
+ * Return:
+ * > 0 length of the string including the trailing NUL on success
+ * < 0 error
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -476,7 +495,8 @@ union bpf_attr {
FN(set_hash_invalid), \
FN(get_numa_node_id), \
FN(skb_change_head), \
- FN(xdp_adjust_head),
+ FN(xdp_adjust_head), \
+ FN(probe_read_str),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -502,6 +522,7 @@ enum bpf_func_id {
/* BPF_FUNC_l4_csum_replace flags. */
#define BPF_F_PSEUDO_HDR (1ULL << 4)
#define BPF_F_MARK_MANGLED_0 (1ULL << 5)
+#define BPF_F_MARK_ENFORCE (1ULL << 6)
/* BPF_FUNC_clone_redirect and BPF_FUNC_redirect flags. */
#define BPF_F_INGRESS (1ULL << 0)
diff --git a/include/uapi/linux/can/netlink.h b/include/uapi/linux/can/netlink.h
index 94ffe0c..fdf75f7 100644
--- a/include/uapi/linux/can/netlink.h
+++ b/include/uapi/linux/can/netlink.h
@@ -127,9 +127,16 @@ enum {
IFLA_CAN_BERR_COUNTER,
IFLA_CAN_DATA_BITTIMING,
IFLA_CAN_DATA_BITTIMING_CONST,
+ IFLA_CAN_TERMINATION,
+ IFLA_CAN_TERMINATION_CONST,
+ IFLA_CAN_BITRATE_CONST,
+ IFLA_CAN_DATA_BITRATE_CONST,
__IFLA_CAN_MAX
};
#define IFLA_CAN_MAX (__IFLA_CAN_MAX - 1)
+/* u16 termination range: 1..65535 Ohms */
+#define CAN_TERMINATION_DISABLED 0
+
#endif /* !_UAPI_CAN_NETLINK_H */
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 3cbc327..c451eec 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -1665,14 +1665,15 @@ static inline void cec_msg_report_current_latency(struct cec_msg *msg,
__u8 audio_out_compensated,
__u8 audio_out_delay)
{
- msg->len = 7;
+ msg->len = 6;
msg->msg[0] |= 0xf; /* broadcast */
msg->msg[1] = CEC_MSG_REPORT_CURRENT_LATENCY;
msg->msg[2] = phys_addr >> 8;
msg->msg[3] = phys_addr & 0xff;
msg->msg[4] = video_latency;
msg->msg[5] = (low_latency_mode << 2) | audio_out_compensated;
- msg->msg[6] = audio_out_delay;
+ if (audio_out_compensated == 3)
+ msg->msg[msg->len++] = audio_out_delay;
}
static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
@@ -1686,7 +1687,10 @@ static inline void cec_ops_report_current_latency(const struct cec_msg *msg,
*video_latency = msg->msg[4];
*low_latency_mode = (msg->msg[5] >> 2) & 1;
*audio_out_compensated = msg->msg[5] & 3;
- *audio_out_delay = msg->msg[6];
+ if (*audio_out_compensated == 3 && msg->len >= 7)
+ *audio_out_delay = msg->msg[6];
+ else
+ *audio_out_delay = 0;
}
static inline void cec_msg_request_current_latency(struct cec_msg *msg,
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 6b13e59..b9aa564 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -321,6 +321,7 @@ enum {
IFLA_BRPORT_MULTICAST_ROUTER,
IFLA_BRPORT_PAD,
IFLA_BRPORT_MCAST_FLOOD,
+ IFLA_BRPORT_MCAST_TO_UCAST,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -847,6 +848,7 @@ enum {
IFLA_STATS_LINK_XSTATS,
IFLA_STATS_LINK_XSTATS_SLAVE,
IFLA_STATS_LINK_OFFLOAD_XSTATS,
+ IFLA_STATS_AF_SPEC,
__IFLA_STATS_MAX,
};
diff --git a/include/uapi/linux/igmp.h b/include/uapi/linux/igmp.h
index ccbb32a..a97f9a7 100644
--- a/include/uapi/linux/igmp.h
+++ b/include/uapi/linux/igmp.h
@@ -53,7 +53,7 @@ struct igmpv3_grec {
struct igmpv3_report {
__u8 type;
__u8 resv1;
- __be16 csum;
+ __sum16 csum;
__be16 resv2;
__be16 ngrec;
struct igmpv3_grec grec[0];
@@ -62,7 +62,7 @@ struct igmpv3_report {
struct igmpv3_query {
__u8 type;
__u8 code;
- __be16 csum;
+ __sum16 csum;
__be32 group;
#if defined(__LITTLE_ENDIAN_BITFIELD)
__u8 qrv:3,
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index eaf65dc..8ef9e75 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -182,6 +182,7 @@ enum {
DEVCONF_SEG6_ENABLED,
DEVCONF_SEG6_REQUIRE_HMAC,
DEVCONF_ENHANCED_DAD,
+ DEVCONF_ADDR_GEN_MODE,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/mpls.h b/include/uapi/linux/mpls.h
index 24a6cb1..77a19df 100644
--- a/include/uapi/linux/mpls.h
+++ b/include/uapi/linux/mpls.h
@@ -43,4 +43,34 @@ struct mpls_label {
#define MPLS_LABEL_FIRST_UNRESERVED 16 /* RFC3032 */
+/* These are embedded into IFLA_STATS_AF_SPEC:
+ * [IFLA_STATS_AF_SPEC]
+ * -> [AF_MPLS]
+ * -> [MPLS_STATS_xxx]
+ *
+ * Attributes:
+ * [MPLS_STATS_LINK] = {
+ * struct mpls_link_stats
+ * }
+ */
+enum {
+ MPLS_STATS_UNSPEC, /* also used as 64bit pad attribute */
+ MPLS_STATS_LINK,
+ __MPLS_STATS_MAX,
+};
+
+#define MPLS_STATS_MAX (__MPLS_STATS_MAX - 1)
+
+struct mpls_link_stats {
+ __u64 rx_packets; /* total packets received */
+ __u64 tx_packets; /* total packets transmitted */
+ __u64 rx_bytes; /* total bytes received */
+ __u64 tx_bytes; /* total bytes transmitted */
+ __u64 rx_errors; /* bad packets received */
+ __u64 tx_errors; /* packet transmit problems */
+ __u64 rx_dropped; /* packet dropped on receive */
+ __u64 tx_dropped; /* packet dropped on transmit */
+ __u64 rx_noroute; /* no route for packet dest */
+};
+
#endif /* _UAPI_MPLS_H */
diff --git a/include/uapi/linux/netfilter/nf_log.h b/include/uapi/linux/netfilter/nf_log.h
index 8be21e0..d0b5fa9 100644
--- a/include/uapi/linux/netfilter/nf_log.h
+++ b/include/uapi/linux/netfilter/nf_log.h
@@ -9,4 +9,6 @@
#define NF_LOG_MACDECODE 0x20 /* Decode MAC header */
#define NF_LOG_MASK 0x2f
+#define NF_LOG_PREFIXLEN 128
+
#endif /* _NETFILTER_NF_LOG_H */
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 881d49e..e3f27e0 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -235,7 +235,7 @@ enum nft_rule_compat_flags {
/**
* enum nft_rule_compat_attributes - nf_tables rule compat attributes
*
- * @NFTA_RULE_COMPAT_PROTO: numerice value of handled protocol (NLA_U32)
+ * @NFTA_RULE_COMPAT_PROTO: numeric value of handled protocol (NLA_U32)
* @NFTA_RULE_COMPAT_FLAGS: bitmask of enum nft_rule_compat_flags (NLA_U32)
*/
enum nft_rule_compat_attributes {
@@ -499,7 +499,7 @@ enum nft_bitwise_attributes {
* enum nft_byteorder_ops - nf_tables byteorder operators
*
* @NFT_BYTEORDER_NTOH: network to host operator
- * @NFT_BYTEORDER_HTON: host to network opertaor
+ * @NFT_BYTEORDER_HTON: host to network operator
*/
enum nft_byteorder_ops {
NFT_BYTEORDER_NTOH,
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 6b76e3b..d6c62ee 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -1772,7 +1772,9 @@ enum nl80211_commands {
*
* @NL80211_ATTR_OPMODE_NOTIF: Operating mode field from Operating Mode
* Notification Element based on association request when used with
- * %NL80211_CMD_NEW_STATION; u8 attribute.
+ * %NL80211_CMD_NEW_STATION or %NL80211_CMD_SET_STATION (only when
+ * %NL80211_FEATURE_FULL_AP_CLIENT_STATE is supported, or with TDLS);
+ * u8 attribute.
*
* @NL80211_ATTR_VENDOR_ID: The vendor ID, either a 24-bit OUI or, if
* %NL80211_VENDOR_ID_IS_LINUX is set, a special Linux ID (not used yet)
@@ -1820,6 +1822,8 @@ enum nl80211_commands {
* and remove functions. NAN notifications will be sent in unicast to that
* socket. Without this attribute, any socket can add functions and the
* notifications will be sent to the %NL80211_MCGRP_NAN multicast group.
+ * If set during %NL80211_CMD_ASSOCIATE or %NL80211_CMD_CONNECT the
+ * station will deauthenticate when the socket is closed.
*
* @NL80211_ATTR_TDLS_INITIATOR: flag attribute indicating the current end is
* the TDLS link initiator.
@@ -1980,6 +1984,24 @@ enum nl80211_commands {
* @NL80211_ATTR_BSSID: The BSSID of the AP. Note that %NL80211_ATTR_MAC is also
* used in various commands/events for specifying the BSSID.
*
+ * @NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI: Relative RSSI threshold by which
+ * other BSSs has to be better or slightly worse than the current
+ * connected BSS so that they get reported to user space.
+ * This will give an opportunity to userspace to consider connecting to
+ * other matching BSSs which have better or slightly worse RSSI than
+ * the current connected BSS by using an offloaded operation to avoid
+ * unnecessary wakeups.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST: When present the RSSI level for BSSs in
+ * the specified band is to be adjusted before doing
+ * %NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI based comparision to figure out
+ * better BSSs. The attribute value is a packed structure
+ * value as specified by &struct nl80211_bss_select_rssi_adjust.
+ *
+ * @NL80211_ATTR_TIMEOUT_REASON: The reason for which an operation timed out.
+ * u32 attribute with an &enum nl80211_timeout_reason value. This is used,
+ * e.g., with %NL80211_CMD_CONNECT event.
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2386,6 +2408,11 @@ enum nl80211_attrs {
NL80211_ATTR_BSSID,
+ NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+ NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+
+ NL80211_ATTR_TIMEOUT_REASON,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3078,6 +3105,13 @@ enum nl80211_reg_rule_attr {
* how this API was implemented in the past. Also, due to the same problem,
* the only way to create a matchset with only an RSSI filter (with this
* attribute) is if there's only a single matchset with the RSSI attribute.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI: Flag indicating whether
+ * %NL80211_SCHED_SCAN_MATCH_ATTR_RSSI to be used as absolute RSSI or
+ * relative to current bss's RSSI.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST: When present the RSSI level for
+ * BSS-es in the specified band is to be adjusted before doing
+ * RSSI-based BSS selection. The attribute value is a packed structure
+ * value as specified by &struct nl80211_bss_select_rssi_adjust.
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3087,6 +3121,8 @@ enum nl80211_sched_scan_match_attr {
NL80211_SCHED_SCAN_MATCH_ATTR_SSID,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
+ NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
+ NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -4697,6 +4733,13 @@ enum nl80211_feature_flags {
* configuration (AP/mesh) with VHT rates.
* @NL80211_EXT_FEATURE_FILS_STA: This driver supports Fast Initial Link Setup
* with user space SME (NL80211_CMD_AUTHENTICATE) in station mode.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA: This driver supports randomized TA
+ * in @NL80211_CMD_FRAME while not associated.
+ * @NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED: This driver supports
+ * randomized TA in @NL80211_CMD_FRAME while associated.
+ * @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
+ * for reporting BSSs with better RSSI than the current connected BSS
+ * (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4712,6 +4755,9 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_BEACON_RATE_HT,
NL80211_EXT_FEATURE_BEACON_RATE_VHT,
NL80211_EXT_FEATURE_FILS_STA,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
+ NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4751,6 +4797,21 @@ enum nl80211_connect_failed_reason {
};
/**
+ * enum nl80211_timeout_reason - timeout reasons
+ *
+ * @NL80211_TIMEOUT_UNSPECIFIED: Timeout reason unspecified.
+ * @NL80211_TIMEOUT_SCAN: Scan (AP discovery) timed out.
+ * @NL80211_TIMEOUT_AUTH: Authentication timed out.
+ * @NL80211_TIMEOUT_ASSOC: Association timed out.
+ */
+enum nl80211_timeout_reason {
+ NL80211_TIMEOUT_UNSPECIFIED,
+ NL80211_TIMEOUT_SCAN,
+ NL80211_TIMEOUT_AUTH,
+ NL80211_TIMEOUT_ASSOC,
+};
+
+/**
* enum nl80211_scan_flags - scan request control flags
*
* Scan request control flags are used to control the handling
@@ -4964,8 +5025,9 @@ enum nl80211_sched_scan_plan {
/**
* struct nl80211_bss_select_rssi_adjust - RSSI adjustment parameters.
*
- * @band: band of BSS that must match for RSSI value adjustment.
- * @delta: value used to adjust the RSSI value of matching BSS.
+ * @band: band of BSS that must match for RSSI value adjustment. The value
+ * of this field is according to &enum nl80211_band.
+ * @delta: value used to adjust the RSSI value of matching BSS in dB.
*/
struct nl80211_bss_select_rssi_adjust {
__u8 band;
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index a081efb..345551e 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -4,6 +4,8 @@
#include <linux/types.h>
#include <linux/pkt_sched.h>
+#define TC_COOKIE_MAX_SIZE 16
+
/* Action attributes */
enum {
TCA_ACT_UNSPEC,
@@ -12,6 +14,7 @@ enum {
TCA_ACT_INDEX,
TCA_ACT_STATS,
TCA_ACT_PAD,
+ TCA_ACT_COOKIE,
__TCA_ACT_MAX
};
@@ -342,7 +345,7 @@ enum {
TCA_BPF_NAME,
TCA_BPF_FLAGS,
TCA_BPF_FLAGS_GEN,
- TCA_BPF_DIGEST,
+ TCA_BPF_TAG,
__TCA_BPF_MAX,
};
@@ -416,6 +419,17 @@ enum {
TCA_FLOWER_KEY_ICMPV6_TYPE, /* u8 */
TCA_FLOWER_KEY_ICMPV6_TYPE_MASK,/* u8 */
+ TCA_FLOWER_KEY_ARP_SIP, /* be32 */
+ TCA_FLOWER_KEY_ARP_SIP_MASK, /* be32 */
+ TCA_FLOWER_KEY_ARP_TIP, /* be32 */
+ TCA_FLOWER_KEY_ARP_TIP_MASK, /* be32 */
+ TCA_FLOWER_KEY_ARP_OP, /* u8 */
+ TCA_FLOWER_KEY_ARP_OP_MASK, /* u8 */
+ TCA_FLOWER_KEY_ARP_SHA, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ARP_SHA_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */
+ TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/psample.h b/include/uapi/linux/psample.h
new file mode 100644
index 0000000..ed48996
--- /dev/null
+++ b/include/uapi/linux/psample.h
@@ -0,0 +1,35 @@
+#ifndef __UAPI_PSAMPLE_H
+#define __UAPI_PSAMPLE_H
+
+enum {
+ /* sampled packet metadata */
+ PSAMPLE_ATTR_IIFINDEX,
+ PSAMPLE_ATTR_OIFINDEX,
+ PSAMPLE_ATTR_ORIGSIZE,
+ PSAMPLE_ATTR_SAMPLE_GROUP,
+ PSAMPLE_ATTR_GROUP_SEQ,
+ PSAMPLE_ATTR_SAMPLE_RATE,
+ PSAMPLE_ATTR_DATA,
+
+ /* commands attributes */
+ PSAMPLE_ATTR_GROUP_REFCOUNT,
+
+ __PSAMPLE_ATTR_MAX
+};
+
+enum psample_command {
+ PSAMPLE_CMD_SAMPLE,
+ PSAMPLE_CMD_GET_GROUP,
+ PSAMPLE_CMD_NEW_GROUP,
+ PSAMPLE_CMD_DEL_GROUP,
+};
+
+/* Can be overridden at runtime by module option */
+#define PSAMPLE_ATTR_MAX (__PSAMPLE_ATTR_MAX - 1)
+
+#define PSAMPLE_NL_MCGRP_CONFIG_NAME "config"
+#define PSAMPLE_NL_MCGRP_SAMPLE_NAME "packets"
+#define PSAMPLE_GENL_NAME "psample"
+#define PSAMPLE_GENL_VERSION 1
+
+#endif
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index a406adc..03c27ce 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -115,6 +115,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_PR_SUPPORTED 113
#define SCTP_DEFAULT_PRINFO 114
#define SCTP_PR_ASSOC_STATUS 115
+#define SCTP_ENABLE_STREAM_RESET 118
+#define SCTP_RESET_STREAMS 119
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -138,6 +140,15 @@ typedef __s32 sctp_assoc_t;
#define SCTP_PR_RTX_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_RTX)
#define SCTP_PR_PRIO_ENABLED(x) (SCTP_PR_POLICY(x) == SCTP_PR_SCTP_PRIO)
+/* For enable stream reset */
+#define SCTP_ENABLE_RESET_STREAM_REQ 0x01
+#define SCTP_ENABLE_RESET_ASSOC_REQ 0x02
+#define SCTP_ENABLE_CHANGE_ASSOC_REQ 0x04
+#define SCTP_ENABLE_STRRESET_MASK 0x07
+
+#define SCTP_STREAM_RESET_INCOMING 0x01
+#define SCTP_STREAM_RESET_OUTGOING 0x02
+
/* These are bit fields for msghdr->msg_flags. See section 5.1. */
/* On user space Linux, these live in <bits/socket.h> as an enum. */
enum sctp_msg_flags {
@@ -1008,4 +1019,11 @@ struct sctp_info {
__u32 __reserved3;
};
+struct sctp_reset_streams {
+ sctp_assoc_t srs_assoc_id;
+ uint16_t srs_flags;
+ uint16_t srs_number_streams; /* 0 == ALL */
+ uint16_t srs_stream_list[]; /* list if srs_num_streams is not 0 */
+};
+
#endif /* _UAPI_SCTP_H */
diff --git a/include/uapi/linux/seg6.h b/include/uapi/linux/seg6.h
index c396a80..3349659 100644
--- a/include/uapi/linux/seg6.h
+++ b/include/uapi/linux/seg6.h
@@ -14,6 +14,8 @@
#ifndef _UAPI_LINUX_SEG6_H
#define _UAPI_LINUX_SEG6_H
+#include <linux/types.h>
+
/*
* SRH
*/
diff --git a/include/uapi/linux/seg6_hmac.h b/include/uapi/linux/seg6_hmac.h
index b652dfd..e691c75 100644
--- a/include/uapi/linux/seg6_hmac.h
+++ b/include/uapi/linux/seg6_hmac.h
@@ -1,6 +1,7 @@
#ifndef _UAPI_LINUX_SEG6_HMAC_H
#define _UAPI_LINUX_SEG6_HMAC_H
+#include <linux/types.h>
#include <linux/seg6.h>
#define SEG6_HMAC_SECRET_LEN 64
diff --git a/include/uapi/linux/seg6_iptunnel.h b/include/uapi/linux/seg6_iptunnel.h
index 0f7dbd2..7a7183d 100644
--- a/include/uapi/linux/seg6_iptunnel.h
+++ b/include/uapi/linux/seg6_iptunnel.h
@@ -33,6 +33,8 @@ enum {
SEG6_IPTUN_MODE_ENCAP,
};
+#ifdef __KERNEL__
+
static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
{
int encap = (tuninfo->mode == SEG6_IPTUN_MODE_ENCAP);
@@ -42,3 +44,5 @@ static inline size_t seg6_lwt_headroom(struct seg6_iptunnel_encap *tuninfo)
}
#endif
+
+#endif
diff --git a/include/uapi/linux/tc_act/Kbuild b/include/uapi/linux/tc_act/Kbuild
index e3db740..ba62ddf 100644
--- a/include/uapi/linux/tc_act/Kbuild
+++ b/include/uapi/linux/tc_act/Kbuild
@@ -4,6 +4,7 @@ header-y += tc_defact.h
header-y += tc_gact.h
header-y += tc_ipt.h
header-y += tc_mirred.h
+header-y += tc_sample.h
header-y += tc_nat.h
header-y += tc_pedit.h
header-y += tc_skbedit.h
diff --git a/include/uapi/linux/tc_act/tc_bpf.h b/include/uapi/linux/tc_act/tc_bpf.h
index a6b88a6..975b50d 100644
--- a/include/uapi/linux/tc_act/tc_bpf.h
+++ b/include/uapi/linux/tc_act/tc_bpf.h
@@ -27,7 +27,7 @@ enum {
TCA_ACT_BPF_FD,
TCA_ACT_BPF_NAME,
TCA_ACT_BPF_PAD,
- TCA_ACT_BPF_DIGEST,
+ TCA_ACT_BPF_TAG,
__TCA_ACT_BPF_MAX,
};
#define TCA_ACT_BPF_MAX (__TCA_ACT_BPF_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_sample.h b/include/uapi/linux/tc_act/tc_sample.h
new file mode 100644
index 0000000..edc9058
--- /dev/null
+++ b/include/uapi/linux/tc_act/tc_sample.h
@@ -0,0 +1,26 @@
+#ifndef __LINUX_TC_SAMPLE_H
+#define __LINUX_TC_SAMPLE_H
+
+#include <linux/types.h>
+#include <linux/pkt_cls.h>
+#include <linux/if_ether.h>
+
+#define TCA_ACT_SAMPLE 26
+
+struct tc_sample {
+ tc_gen;
+};
+
+enum {
+ TCA_SAMPLE_UNSPEC,
+ TCA_SAMPLE_TM,
+ TCA_SAMPLE_PARMS,
+ TCA_SAMPLE_RATE,
+ TCA_SAMPLE_TRUNC_SIZE,
+ TCA_SAMPLE_PSAMPLE_GROUP,
+ TCA_SAMPLE_PAD,
+ __TCA_SAMPLE_MAX
+};
+#define TCA_SAMPLE_MAX (__TCA_SAMPLE_MAX - 1)
+
+#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index c53de26..38a2b07 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -116,6 +116,7 @@ enum {
#define TCP_SAVE_SYN 27 /* Record SYN headers for new connections */
#define TCP_SAVED_SYN 28 /* Get SYN headers recorded for connection */
#define TCP_REPAIR_WINDOW 29 /* Get/set window parameters */
+#define TCP_FASTOPEN_CONNECT 30 /* Attempt FastOpen with connect */
struct tcp_repair_opt {
__u32 opt_code;
@@ -226,6 +227,8 @@ enum {
TCP_NLA_BUSY, /* Time (usec) busy sending data */
TCP_NLA_RWND_LIMITED, /* Time (usec) limited by receive window */
TCP_NLA_SNDBUF_LIMITED, /* Time (usec) limited by send buffer */
+ TCP_NLA_DATA_SEGS_OUT, /* Data pkts sent including retransmission */
+ TCP_NLA_TOTAL_RETRANS, /* Data pkts retransmitted */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/timerfd.h b/include/uapi/linux/timerfd.h
new file mode 100644
index 0000000..6fcfaa8
--- /dev/null
+++ b/include/uapi/linux/timerfd.h
@@ -0,0 +1,36 @@
+/*
+ * include/linux/timerfd.h
+ *
+ * Copyright (C) 2007 Davide Libenzi <davidel@xmailserver.org>
+ *
+ */
+
+#ifndef _UAPI_LINUX_TIMERFD_H
+#define _UAPI_LINUX_TIMERFD_H
+
+#include <linux/types.h>
+
+/* For O_CLOEXEC and O_NONBLOCK */
+#include <linux/fcntl.h>
+
+/* For _IO helpers */
+#include <linux/ioctl.h>
+
+/*
+ * CAREFUL: Check include/asm-generic/fcntl.h when defining
+ * new flags, since they might collide with O_* ones. We want
+ * to re-use O_* flags that couldn't possibly have a meaning
+ * from eventfd, in order to leave a free define-space for
+ * shared O_* flags.
+ *
+ * Also make sure to update the masks in include/linux/timerfd.h
+ * when adding new flags.
+ */
+#define TFD_TIMER_ABSTIME (1 << 0)
+#define TFD_TIMER_CANCEL_ON_SET (1 << 1)
+#define TFD_CLOEXEC O_CLOEXEC
+#define TFD_NONBLOCK O_NONBLOCK
+
+#define TFD_IOC_SET_TICKS _IOW('T', 0, __u64)
+
+#endif /* _UAPI_LINUX_TIMERFD_H */
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index bf049e8..5351b08 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -1,7 +1,7 @@
/*
* include/uapi/linux/tipc.h: Header for TIPC socket interface
*
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2015-2016 Ericsson AB
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
*
@@ -220,7 +220,7 @@ struct sockaddr_tipc {
#define TIPC_DESTNAME 3 /* destination name */
/*
- * TIPC-specific socket option values
+ * TIPC-specific socket option names
*/
#define TIPC_IMPORTANCE 127 /* Default: TIPC_LOW_IMPORTANCE */
@@ -229,6 +229,8 @@ struct sockaddr_tipc {
#define TIPC_CONN_TIMEOUT 130 /* Default: 8000 (ms) */
#define TIPC_NODE_RECVQ_DEPTH 131 /* Default: none (read only) */
#define TIPC_SOCK_RECVQ_DEPTH 132 /* Default: none (read only) */
+#define TIPC_MCAST_BROADCAST 133 /* Default: TIPC selects. No arg */
+#define TIPC_MCAST_REPLICAST 134 /* Default: TIPC selects. No arg */
/*
* Maximum sizes of TIPC bearer-related names (including terminating NULL)
diff --git a/include/uapi/rdma/Kbuild b/include/uapi/rdma/Kbuild
index 82bdf56..bb68cb1 100644
--- a/include/uapi/rdma/Kbuild
+++ b/include/uapi/rdma/Kbuild
@@ -16,3 +16,4 @@ header-y += nes-abi.h
header-y += ocrdma-abi.h
header-y += hns-abi.h
header-y += vmw_pvrdma-abi.h
+header-y += qedr-abi.h
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
index 48a19bd..d24eee1 100644
--- a/include/uapi/rdma/cxgb3-abi.h
+++ b/include/uapi/rdma/cxgb3-abi.h
@@ -30,7 +30,7 @@
* SOFTWARE.
*/
#ifndef CXGB3_ABI_USER_H
-#define CXBG3_ABI_USER_H
+#define CXGB3_ABI_USER_H
#include <linux/types.h>
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 85dc966..da7cd62 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -90,6 +90,17 @@ enum mlx5_user_cmds_supp_uhw {
MLX5_USER_CMDS_SUPP_UHW_CREATE_AH = 1 << 1,
};
+/* The eth_min_inline response value is set to off-by-one vs the FW
+ * returned value to allow user-space to deal with older kernels.
+ */
+enum mlx5_user_inline_mode {
+ MLX5_USER_INLINE_MODE_NA,
+ MLX5_USER_INLINE_MODE_NONE,
+ MLX5_USER_INLINE_MODE_L2,
+ MLX5_USER_INLINE_MODE_IP,
+ MLX5_USER_INLINE_MODE_TCP_UDP,
+};
+
struct mlx5_ib_alloc_ucontext_resp {
__u32 qp_tab_size;
__u32 bf_reg_size;
@@ -106,7 +117,8 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 response_length;
__u8 cqe_version;
__u8 cmds_supp_uhw;
- __u16 reserved2;
+ __u8 eth_min_inline;
+ __u8 reserved2;
__u64 hca_core_clock_offset;
__u32 log_uar_size;
__u32 num_uars_per_page;
diff --git a/init/Kconfig b/init/Kconfig
index 223b734..e1a93734 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1176,6 +1176,10 @@ config CGROUP_DEBUG
Say N.
+config SOCK_CGROUP_DATA
+ bool
+ default n
+
endif # CGROUPS
config CHECKPOINT_RESTORE
diff --git a/ipc/sem.c b/ipc/sem.c
index e08b948..3ec5742 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -1977,7 +1977,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
}
rcu_read_lock();
- sem_lock(sma, sops, nsops);
+ locknum = sem_lock(sma, sops, nsops);
if (!ipc_valid_object(&sma->sem_perm))
goto out_unlock_free;
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 1276474..e1ce4f4 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,7 +1,7 @@
obj-y := core.o
obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o
-obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o
+obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o
ifeq ($(CONFIG_PERF_EVENTS),y)
obj-$(CONFIG_BPF_SYSCALL) += stackmap.o
endif
diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
index a2ac051..3d55d95 100644
--- a/kernel/bpf/arraymap.c
+++ b/kernel/bpf/arraymap.c
@@ -11,7 +11,6 @@
*/
#include <linux/bpf.h>
#include <linux/err.h>
-#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/filter.h>
@@ -56,7 +55,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
attr->value_size == 0 || attr->map_flags)
return ERR_PTR(-EINVAL);
- if (attr->value_size >= 1 << (KMALLOC_SHIFT_MAX - 1))
+ if (attr->value_size > KMALLOC_MAX_SIZE)
/* if value_size is bigger, the user space won't be able to
* access the elements.
*/
@@ -74,14 +73,10 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
if (array_size >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-ENOMEM);
-
/* allocate all map elements and zero-initialize them */
- array = kzalloc(array_size, GFP_USER | __GFP_NOWARN);
- if (!array) {
- array = vzalloc(array_size);
- if (!array)
- return ERR_PTR(-ENOMEM);
- }
+ array = bpf_map_area_alloc(array_size);
+ if (!array)
+ return ERR_PTR(-ENOMEM);
/* copy mandatory map attributes */
array->map.map_type = attr->map_type;
@@ -97,7 +92,7 @@ static struct bpf_map *array_map_alloc(union bpf_attr *attr)
if (array_size >= U32_MAX - PAGE_SIZE ||
elem_size > PCPU_MIN_UNIT_SIZE || bpf_array_alloc_percpu(array)) {
- kvfree(array);
+ bpf_map_area_free(array);
return ERR_PTR(-ENOMEM);
}
out:
@@ -262,7 +257,7 @@ static void array_map_free(struct bpf_map *map)
if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
bpf_array_free_percpu(array);
- kvfree(array);
+ bpf_map_area_free(array);
}
static const struct bpf_map_ops array_ops = {
@@ -319,7 +314,8 @@ static void fd_array_map_free(struct bpf_map *map)
/* make sure it's empty */
for (i = 0; i < array->map.max_entries; i++)
BUG_ON(array->ptrs[i] != NULL);
- kvfree(array);
+
+ bpf_map_area_free(array);
}
static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 1eb4f13..fddd76b 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -146,10 +146,11 @@ void __bpf_prog_free(struct bpf_prog *fp)
vfree(fp);
}
-int bpf_prog_calc_digest(struct bpf_prog *fp)
+int bpf_prog_calc_tag(struct bpf_prog *fp)
{
const u32 bits_offset = SHA_MESSAGE_BYTES - sizeof(__be64);
- u32 raw_size = bpf_prog_digest_scratch_size(fp);
+ u32 raw_size = bpf_prog_tag_scratch_size(fp);
+ u32 digest[SHA_DIGEST_WORDS];
u32 ws[SHA_WORKSPACE_WORDS];
u32 i, bsize, psize, blocks;
struct bpf_insn *dst;
@@ -162,7 +163,7 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
if (!raw)
return -ENOMEM;
- sha_init(fp->digest);
+ sha_init(digest);
memset(ws, 0, sizeof(ws));
/* We need to take out the map fd for the digest calculation
@@ -204,13 +205,14 @@ int bpf_prog_calc_digest(struct bpf_prog *fp)
*bits = cpu_to_be64((psize - 1) << 3);
while (blocks--) {
- sha_transform(fp->digest, todo, ws);
+ sha_transform(digest, todo, ws);
todo += SHA_MESSAGE_BYTES;
}
- result = (__force __be32 *)fp->digest;
+ result = (__force __be32 *)digest;
for (i = 0; i < SHA_DIGEST_WORDS; i++)
- result[i] = cpu_to_be32(fp->digest[i]);
+ result[i] = cpu_to_be32(digest[i]);
+ memcpy(fp->tag, result, sizeof(fp->tag));
vfree(raw);
return 0;
@@ -1171,3 +1173,12 @@ int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
{
return -EFAULT;
}
+
+/* All definitions of tracepoints related to BPF. */
+#define CREATE_TRACE_POINTS
+#include <linux/bpf_trace.h>
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(xdp_exception);
+
+EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_get_type);
+EXPORT_TRACEPOINT_SYMBOL_GPL(bpf_prog_put_rcu);
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 34debc1..a753bbe 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -13,7 +13,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include "percpu_freelist.h"
#include "bpf_lru_list.h"
@@ -103,7 +102,7 @@ static void htab_free_elems(struct bpf_htab *htab)
free_percpu(pptr);
}
free_elems:
- vfree(htab->elems);
+ bpf_map_area_free(htab->elems);
}
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
@@ -125,7 +124,8 @@ static int prealloc_init(struct bpf_htab *htab)
{
int err = -ENOMEM, i;
- htab->elems = vzalloc(htab->elem_size * htab->map.max_entries);
+ htab->elems = bpf_map_area_alloc(htab->elem_size *
+ htab->map.max_entries);
if (!htab->elems)
return -ENOMEM;
@@ -274,7 +274,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
*/
goto free_htab;
- if (htab->map.value_size >= (1 << (KMALLOC_SHIFT_MAX - 1)) -
+ if (htab->map.value_size >= KMALLOC_MAX_SIZE -
MAX_BPF_STACK - sizeof(struct htab_elem))
/* if value_size is bigger, the user space won't be able to
* access the elements via bpf syscall. This check also makes
@@ -320,14 +320,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
goto free_htab;
err = -ENOMEM;
- htab->buckets = kmalloc_array(htab->n_buckets, sizeof(struct bucket),
- GFP_USER | __GFP_NOWARN);
-
- if (!htab->buckets) {
- htab->buckets = vmalloc(htab->n_buckets * sizeof(struct bucket));
- if (!htab->buckets)
- goto free_htab;
- }
+ htab->buckets = bpf_map_area_alloc(htab->n_buckets *
+ sizeof(struct bucket));
+ if (!htab->buckets)
+ goto free_htab;
for (i = 0; i < htab->n_buckets; i++) {
INIT_HLIST_HEAD(&htab->buckets[i].head);
@@ -354,7 +350,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
free_extra_elems:
free_percpu(htab->extra_elems);
free_buckets:
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
free_htab:
kfree(htab);
return ERR_PTR(err);
@@ -1014,7 +1010,7 @@ static void htab_map_free(struct bpf_map *map)
prealloc_destroy(htab);
free_percpu(htab->extra_elems);
- kvfree(htab->buckets);
+ bpf_map_area_free(htab->buckets);
kfree(htab);
}
diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c
index 0b030c9..fddcae8 100644
--- a/kernel/bpf/inode.c
+++ b/kernel/bpf/inode.c
@@ -21,6 +21,7 @@
#include <linux/parser.h>
#include <linux/filter.h>
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
enum bpf_type {
BPF_TYPE_UNSPEC = 0,
@@ -281,6 +282,13 @@ int bpf_obj_pin_user(u32 ufd, const char __user *pathname)
ret = bpf_obj_do_pin(pname, raw, type);
if (ret != 0)
bpf_any_put(raw, type);
+ if ((trace_bpf_obj_pin_prog_enabled() ||
+ trace_bpf_obj_pin_map_enabled()) && !ret) {
+ if (type == BPF_TYPE_PROG)
+ trace_bpf_obj_pin_prog(raw, ufd, pname);
+ if (type == BPF_TYPE_MAP)
+ trace_bpf_obj_pin_map(raw, ufd, pname);
+ }
out:
putname(pname);
return ret;
@@ -342,8 +350,15 @@ int bpf_obj_get_user(const char __user *pathname)
else
goto out;
- if (ret < 0)
+ if (ret < 0) {
bpf_any_put(raw, type);
+ } else if (trace_bpf_obj_get_prog_enabled() ||
+ trace_bpf_obj_get_map_enabled()) {
+ if (type == BPF_TYPE_PROG)
+ trace_bpf_obj_get_prog(raw, ret, pname);
+ if (type == BPF_TYPE_MAP)
+ trace_bpf_obj_get_map(raw, ret, pname);
+ }
out:
putname(pname);
return ret;
diff --git a/kernel/bpf/lpm_trie.c b/kernel/bpf/lpm_trie.c
new file mode 100644
index 0000000..144e976
--- /dev/null
+++ b/kernel/bpf/lpm_trie.c
@@ -0,0 +1,503 @@
+/*
+ * Longest prefix match list implementation
+ *
+ * Copyright (c) 2016,2017 Daniel Mack
+ * Copyright (c) 2016 David Herrmann
+ *
+ * This file is subject to the terms and conditions of version 2 of the GNU
+ * General Public License. See the file COPYING in the main directory of the
+ * Linux distribution for more details.
+ */
+
+#include <linux/bpf.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/vmalloc.h>
+#include <net/ipv6.h>
+
+/* Intermediate node */
+#define LPM_TREE_NODE_FLAG_IM BIT(0)
+
+struct lpm_trie_node;
+
+struct lpm_trie_node {
+ struct rcu_head rcu;
+ struct lpm_trie_node __rcu *child[2];
+ u32 prefixlen;
+ u32 flags;
+ u8 data[0];
+};
+
+struct lpm_trie {
+ struct bpf_map map;
+ struct lpm_trie_node __rcu *root;
+ size_t n_entries;
+ size_t max_prefixlen;
+ size_t data_size;
+ raw_spinlock_t lock;
+};
+
+/* This trie implements a longest prefix match algorithm that can be used to
+ * match IP addresses to a stored set of ranges.
+ *
+ * Data stored in @data of struct bpf_lpm_key and struct lpm_trie_node is
+ * interpreted as big endian, so data[0] stores the most significant byte.
+ *
+ * Match ranges are internally stored in instances of struct lpm_trie_node
+ * which each contain their prefix length as well as two pointers that may
+ * lead to more nodes containing more specific matches. Each node also stores
+ * a value that is defined by and returned to userspace via the update_elem
+ * and lookup functions.
+ *
+ * For instance, let's start with a trie that was created with a prefix length
+ * of 32, so it can be used for IPv4 addresses, and one single element that
+ * matches 192.168.0.0/16. The data array would hence contain
+ * [0xc0, 0xa8, 0x00, 0x00] in big-endian notation. This documentation will
+ * stick to IP-address notation for readability though.
+ *
+ * As the trie is empty initially, the new node (1) will be places as root
+ * node, denoted as (R) in the example below. As there are no other node, both
+ * child pointers are %NULL.
+ *
+ * +----------------+
+ * | (1) (R) |
+ * | 192.168.0.0/16 |
+ * | value: 1 |
+ * | [0] [1] |
+ * +----------------+
+ *
+ * Next, let's add a new node (2) matching 192.168.0.0/24. As there is already
+ * a node with the same data and a smaller prefix (ie, a less specific one),
+ * node (2) will become a child of (1). In child index depends on the next bit
+ * that is outside of what (1) matches, and that bit is 0, so (2) will be
+ * child[0] of (1):
+ *
+ * +----------------+
+ * | (1) (R) |
+ * | 192.168.0.0/16 |
+ * | value: 1 |
+ * | [0] [1] |
+ * +----------------+
+ * |
+ * +----------------+
+ * | (2) |
+ * | 192.168.0.0/24 |
+ * | value: 2 |
+ * | [0] [1] |
+ * +----------------+
+ *
+ * The child[1] slot of (1) could be filled with another node which has bit #17
+ * (the next bit after the ones that (1) matches on) set to 1. For instance,
+ * 192.168.128.0/24:
+ *
+ * +----------------+
+ * | (1) (R) |
+ * | 192.168.0.0/16 |
+ * | value: 1 |
+ * | [0] [1] |
+ * +----------------+
+ * | |
+ * +----------------+ +------------------+
+ * | (2) | | (3) |
+ * | 192.168.0.0/24 | | 192.168.128.0/24 |
+ * | value: 2 | | value: 3 |
+ * | [0] [1] | | [0] [1] |
+ * +----------------+ +------------------+
+ *
+ * Let's add another node (4) to the game for 192.168.1.0/24. In order to place
+ * it, node (1) is looked at first, and because (4) of the semantics laid out
+ * above (bit #17 is 0), it would normally be attached to (1) as child[0].
+ * However, that slot is already allocated, so a new node is needed in between.
+ * That node does not have a value attached to it and it will never be
+ * returned to users as result of a lookup. It is only there to differentiate
+ * the traversal further. It will get a prefix as wide as necessary to
+ * distinguish its two children:
+ *
+ * +----------------+
+ * | (1) (R) |
+ * | 192.168.0.0/16 |
+ * | value: 1 |
+ * | [0] [1] |
+ * +----------------+
+ * | |
+ * +----------------+ +------------------+
+ * | (4) (I) | | (3) |
+ * | 192.168.0.0/23 | | 192.168.128.0/24 |
+ * | value: --- | | value: 3 |
+ * | [0] [1] | | [0] [1] |
+ * +----------------+ +------------------+
+ * | |
+ * +----------------+ +----------------+
+ * | (2) | | (5) |
+ * | 192.168.0.0/24 | | 192.168.1.0/24 |
+ * | value: 2 | | value: 5 |
+ * | [0] [1] | | [0] [1] |
+ * +----------------+ +----------------+
+ *
+ * 192.168.1.1/32 would be a child of (5) etc.
+ *
+ * An intermediate node will be turned into a 'real' node on demand. In the
+ * example above, (4) would be re-used if 192.168.0.0/23 is added to the trie.
+ *
+ * A fully populated trie would have a height of 32 nodes, as the trie was
+ * created with a prefix length of 32.
+ *
+ * The lookup starts at the root node. If the current node matches and if there
+ * is a child that can be used to become more specific, the trie is traversed
+ * downwards. The last node in the traversal that is a non-intermediate one is
+ * returned.
+ */
+
+static inline int extract_bit(const u8 *data, size_t index)
+{
+ return !!(data[index / 8] & (1 << (7 - (index % 8))));
+}
+
+/**
+ * longest_prefix_match() - determine the longest prefix
+ * @trie: The trie to get internal sizes from
+ * @node: The node to operate on
+ * @key: The key to compare to @node
+ *
+ * Determine the longest prefix of @node that matches the bits in @key.
+ */
+static size_t longest_prefix_match(const struct lpm_trie *trie,
+ const struct lpm_trie_node *node,
+ const struct bpf_lpm_trie_key *key)
+{
+ size_t prefixlen = 0;
+ size_t i;
+
+ for (i = 0; i < trie->data_size; i++) {
+ size_t b;
+
+ b = 8 - fls(node->data[i] ^ key->data[i]);
+ prefixlen += b;
+
+ if (prefixlen >= node->prefixlen || prefixlen >= key->prefixlen)
+ return min(node->prefixlen, key->prefixlen);
+
+ if (b < 8)
+ break;
+ }
+
+ return prefixlen;
+}
+
+/* Called from syscall or from eBPF program */
+static void *trie_lookup_elem(struct bpf_map *map, void *_key)
+{
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct lpm_trie_node *node, *found = NULL;
+ struct bpf_lpm_trie_key *key = _key;
+
+ /* Start walking the trie from the root node ... */
+
+ for (node = rcu_dereference(trie->root); node;) {
+ unsigned int next_bit;
+ size_t matchlen;
+
+ /* Determine the longest prefix of @node that matches @key.
+ * If it's the maximum possible prefix for this trie, we have
+ * an exact match and can return it directly.
+ */
+ matchlen = longest_prefix_match(trie, node, key);
+ if (matchlen == trie->max_prefixlen) {
+ found = node;
+ break;
+ }
+
+ /* If the number of bits that match is smaller than the prefix
+ * length of @node, bail out and return the node we have seen
+ * last in the traversal (ie, the parent).
+ */
+ if (matchlen < node->prefixlen)
+ break;
+
+ /* Consider this node as return candidate unless it is an
+ * artificially added intermediate one.
+ */
+ if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+ found = node;
+
+ /* If the node match is fully satisfied, let's see if we can
+ * become more specific. Determine the next bit in the key and
+ * traverse down.
+ */
+ next_bit = extract_bit(key->data, node->prefixlen);
+ node = rcu_dereference(node->child[next_bit]);
+ }
+
+ if (!found)
+ return NULL;
+
+ return found->data + trie->data_size;
+}
+
+static struct lpm_trie_node *lpm_trie_node_alloc(const struct lpm_trie *trie,
+ const void *value)
+{
+ struct lpm_trie_node *node;
+ size_t size = sizeof(struct lpm_trie_node) + trie->data_size;
+
+ if (value)
+ size += trie->map.value_size;
+
+ node = kmalloc(size, GFP_ATOMIC | __GFP_NOWARN);
+ if (!node)
+ return NULL;
+
+ node->flags = 0;
+
+ if (value)
+ memcpy(node->data + trie->data_size, value,
+ trie->map.value_size);
+
+ return node;
+}
+
+/* Called from syscall or from eBPF program */
+static int trie_update_elem(struct bpf_map *map,
+ void *_key, void *value, u64 flags)
+{
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct lpm_trie_node *node, *im_node = NULL, *new_node = NULL;
+ struct lpm_trie_node __rcu **slot;
+ struct bpf_lpm_trie_key *key = _key;
+ unsigned long irq_flags;
+ unsigned int next_bit;
+ size_t matchlen = 0;
+ int ret = 0;
+
+ if (unlikely(flags > BPF_EXIST))
+ return -EINVAL;
+
+ if (key->prefixlen > trie->max_prefixlen)
+ return -EINVAL;
+
+ raw_spin_lock_irqsave(&trie->lock, irq_flags);
+
+ /* Allocate and fill a new node */
+
+ if (trie->n_entries == trie->map.max_entries) {
+ ret = -ENOSPC;
+ goto out;
+ }
+
+ new_node = lpm_trie_node_alloc(trie, value);
+ if (!new_node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ trie->n_entries++;
+
+ new_node->prefixlen = key->prefixlen;
+ RCU_INIT_POINTER(new_node->child[0], NULL);
+ RCU_INIT_POINTER(new_node->child[1], NULL);
+ memcpy(new_node->data, key->data, trie->data_size);
+
+ /* Now find a slot to attach the new node. To do that, walk the tree
+ * from the root and match as many bits as possible for each node until
+ * we either find an empty slot or a slot that needs to be replaced by
+ * an intermediate node.
+ */
+ slot = &trie->root;
+
+ while ((node = rcu_dereference_protected(*slot,
+ lockdep_is_held(&trie->lock)))) {
+ matchlen = longest_prefix_match(trie, node, key);
+
+ if (node->prefixlen != matchlen ||
+ node->prefixlen == key->prefixlen ||
+ node->prefixlen == trie->max_prefixlen)
+ break;
+
+ next_bit = extract_bit(key->data, node->prefixlen);
+ slot = &node->child[next_bit];
+ }
+
+ /* If the slot is empty (a free child pointer or an empty root),
+ * simply assign the @new_node to that slot and be done.
+ */
+ if (!node) {
+ rcu_assign_pointer(*slot, new_node);
+ goto out;
+ }
+
+ /* If the slot we picked already exists, replace it with @new_node
+ * which already has the correct data array set.
+ */
+ if (node->prefixlen == matchlen) {
+ new_node->child[0] = node->child[0];
+ new_node->child[1] = node->child[1];
+
+ if (!(node->flags & LPM_TREE_NODE_FLAG_IM))
+ trie->n_entries--;
+
+ rcu_assign_pointer(*slot, new_node);
+ kfree_rcu(node, rcu);
+
+ goto out;
+ }
+
+ /* If the new node matches the prefix completely, it must be inserted
+ * as an ancestor. Simply insert it between @node and *@slot.
+ */
+ if (matchlen == key->prefixlen) {
+ next_bit = extract_bit(node->data, matchlen);
+ rcu_assign_pointer(new_node->child[next_bit], node);
+ rcu_assign_pointer(*slot, new_node);
+ goto out;
+ }
+
+ im_node = lpm_trie_node_alloc(trie, NULL);
+ if (!im_node) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ im_node->prefixlen = matchlen;
+ im_node->flags |= LPM_TREE_NODE_FLAG_IM;
+ memcpy(im_node->data, node->data, trie->data_size);
+
+ /* Now determine which child to install in which slot */
+ if (extract_bit(key->data, matchlen)) {
+ rcu_assign_pointer(im_node->child[0], node);
+ rcu_assign_pointer(im_node->child[1], new_node);
+ } else {
+ rcu_assign_pointer(im_node->child[0], new_node);
+ rcu_assign_pointer(im_node->child[1], node);
+ }
+
+ /* Finally, assign the intermediate node to the determined spot */
+ rcu_assign_pointer(*slot, im_node);
+
+out:
+ if (ret) {
+ if (new_node)
+ trie->n_entries--;
+
+ kfree(new_node);
+ kfree(im_node);
+ }
+
+ raw_spin_unlock_irqrestore(&trie->lock, irq_flags);
+
+ return ret;
+}
+
+static int trie_delete_elem(struct bpf_map *map, void *key)
+{
+ /* TODO */
+ return -ENOSYS;
+}
+
+static struct bpf_map *trie_alloc(union bpf_attr *attr)
+{
+ size_t cost, cost_per_node;
+ struct lpm_trie *trie;
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
+
+ /* check sanity of attributes */
+ if (attr->max_entries == 0 ||
+ attr->map_flags != BPF_F_NO_PREALLOC ||
+ attr->key_size < sizeof(struct bpf_lpm_trie_key) + 1 ||
+ attr->key_size > sizeof(struct bpf_lpm_trie_key) + 256 ||
+ attr->value_size == 0)
+ return ERR_PTR(-EINVAL);
+
+ trie = kzalloc(sizeof(*trie), GFP_USER | __GFP_NOWARN);
+ if (!trie)
+ return ERR_PTR(-ENOMEM);
+
+ /* copy mandatory map attributes */
+ trie->map.map_type = attr->map_type;
+ trie->map.key_size = attr->key_size;
+ trie->map.value_size = attr->value_size;
+ trie->map.max_entries = attr->max_entries;
+ trie->data_size = attr->key_size -
+ offsetof(struct bpf_lpm_trie_key, data);
+ trie->max_prefixlen = trie->data_size * 8;
+
+ cost_per_node = sizeof(struct lpm_trie_node) +
+ attr->value_size + trie->data_size;
+ cost = sizeof(*trie) + attr->max_entries * cost_per_node;
+ trie->map.pages = round_up(cost, PAGE_SIZE) >> PAGE_SHIFT;
+
+ ret = bpf_map_precharge_memlock(trie->map.pages);
+ if (ret) {
+ kfree(trie);
+ return ERR_PTR(ret);
+ }
+
+ raw_spin_lock_init(&trie->lock);
+
+ return &trie->map;
+}
+
+static void trie_free(struct bpf_map *map)
+{
+ struct lpm_trie *trie = container_of(map, struct lpm_trie, map);
+ struct lpm_trie_node __rcu **slot;
+ struct lpm_trie_node *node;
+
+ raw_spin_lock(&trie->lock);
+
+ /* Always start at the root and walk down to a node that has no
+ * children. Then free that node, nullify its reference in the parent
+ * and start over.
+ */
+
+ for (;;) {
+ slot = &trie->root;
+
+ for (;;) {
+ node = rcu_dereference_protected(*slot,
+ lockdep_is_held(&trie->lock));
+ if (!node)
+ goto unlock;
+
+ if (rcu_access_pointer(node->child[0])) {
+ slot = &node->child[0];
+ continue;
+ }
+
+ if (rcu_access_pointer(node->child[1])) {
+ slot = &node->child[1];
+ continue;
+ }
+
+ kfree(node);
+ RCU_INIT_POINTER(*slot, NULL);
+ break;
+ }
+ }
+
+unlock:
+ raw_spin_unlock(&trie->lock);
+}
+
+static const struct bpf_map_ops trie_ops = {
+ .map_alloc = trie_alloc,
+ .map_free = trie_free,
+ .map_lookup_elem = trie_lookup_elem,
+ .map_update_elem = trie_update_elem,
+ .map_delete_elem = trie_delete_elem,
+};
+
+static struct bpf_map_type_list trie_type __read_mostly = {
+ .ops = &trie_ops,
+ .type = BPF_MAP_TYPE_LPM_TRIE,
+};
+
+static int __init register_trie_map(void)
+{
+ bpf_register_map_type(&trie_type);
+ return 0;
+}
+late_initcall(register_trie_map);
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 732ae16..be85191 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -7,7 +7,6 @@
#include <linux/bpf.h>
#include <linux/jhash.h>
#include <linux/filter.h>
-#include <linux/vmalloc.h>
#include <linux/stacktrace.h>
#include <linux/perf_event.h>
#include "percpu_freelist.h"
@@ -32,7 +31,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
u32 elem_size = sizeof(struct stack_map_bucket) + smap->map.value_size;
int err;
- smap->elems = vzalloc(elem_size * smap->map.max_entries);
+ smap->elems = bpf_map_area_alloc(elem_size * smap->map.max_entries);
if (!smap->elems)
return -ENOMEM;
@@ -45,7 +44,7 @@ static int prealloc_elems_and_freelist(struct bpf_stack_map *smap)
return 0;
free_elems:
- vfree(smap->elems);
+ bpf_map_area_free(smap->elems);
return err;
}
@@ -76,12 +75,9 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
if (cost >= U32_MAX - PAGE_SIZE)
return ERR_PTR(-E2BIG);
- smap = kzalloc(cost, GFP_USER | __GFP_NOWARN);
- if (!smap) {
- smap = vzalloc(cost);
- if (!smap)
- return ERR_PTR(-ENOMEM);
- }
+ smap = bpf_map_area_alloc(cost);
+ if (!smap)
+ return ERR_PTR(-ENOMEM);
err = -E2BIG;
cost += n_buckets * (value_size + sizeof(struct stack_map_bucket));
@@ -112,7 +108,7 @@ static struct bpf_map *stack_map_alloc(union bpf_attr *attr)
put_buffers:
put_callchain_buffers();
free_smap:
- kvfree(smap);
+ bpf_map_area_free(smap);
return ERR_PTR(err);
}
@@ -262,9 +258,9 @@ static void stack_map_free(struct bpf_map *map)
/* wait for bpf programs to complete before freeing stack map */
synchronize_rcu();
- vfree(smap->elems);
+ bpf_map_area_free(smap->elems);
pcpu_freelist_destroy(&smap->freelist);
- kvfree(smap);
+ bpf_map_area_free(smap);
put_callchain_buffers();
}
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index e89acea..08a4d28 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -10,8 +10,11 @@
* General Public License for more details.
*/
#include <linux/bpf.h>
+#include <linux/bpf_trace.h>
#include <linux/syscalls.h>
#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
#include <linux/anon_inodes.h>
#include <linux/file.h>
#include <linux/license.h>
@@ -49,6 +52,30 @@ void bpf_register_map_type(struct bpf_map_type_list *tl)
list_add(&tl->list_node, &bpf_map_types);
}
+void *bpf_map_area_alloc(size_t size)
+{
+ /* We definitely need __GFP_NORETRY, so OOM killer doesn't
+ * trigger under memory pressure as we really just want to
+ * fail instead.
+ */
+ const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
+ void *area;
+
+ if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
+ area = kmalloc(size, GFP_USER | flags);
+ if (area != NULL)
+ return area;
+ }
+
+ return __vmalloc(size, GFP_KERNEL | __GFP_HIGHMEM | flags,
+ PAGE_KERNEL);
+}
+
+void bpf_map_area_free(void *area)
+{
+ kvfree(area);
+}
+
int bpf_map_precharge_memlock(u32 pages)
{
struct user_struct *user = get_current_user();
@@ -215,6 +242,7 @@ static int map_create(union bpf_attr *attr)
/* failed to allocate fd */
goto free_map;
+ trace_bpf_map_create(map, err);
return err;
free_map:
@@ -339,6 +367,7 @@ static int map_lookup_elem(union bpf_attr *attr)
if (copy_to_user(uvalue, value, value_size) != 0)
goto free_value;
+ trace_bpf_map_lookup_elem(map, ufd, key, value);
err = 0;
free_value:
@@ -421,6 +450,8 @@ static int map_update_elem(union bpf_attr *attr)
__this_cpu_dec(bpf_prog_active);
preempt_enable();
+ if (!err)
+ trace_bpf_map_update_elem(map, ufd, key, value);
free_value:
kfree(value);
free_key:
@@ -466,6 +497,8 @@ static int map_delete_elem(union bpf_attr *attr)
__this_cpu_dec(bpf_prog_active);
preempt_enable();
+ if (!err)
+ trace_bpf_map_delete_elem(map, ufd, key);
free_key:
kfree(key);
err_put:
@@ -518,6 +551,7 @@ static int map_get_next_key(union bpf_attr *attr)
if (copy_to_user(unext_key, next_key, map->key_size) != 0)
goto free_next_key;
+ trace_bpf_map_next_key(map, ufd, key, next_key);
err = 0;
free_next_key:
@@ -671,8 +705,10 @@ static void __bpf_prog_put_rcu(struct rcu_head *rcu)
void bpf_prog_put(struct bpf_prog *prog)
{
- if (atomic_dec_and_test(&prog->aux->refcnt))
+ if (atomic_dec_and_test(&prog->aux->refcnt)) {
+ trace_bpf_prog_put_rcu(prog);
call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
+ }
}
EXPORT_SYMBOL_GPL(bpf_prog_put);
@@ -688,17 +724,17 @@ static int bpf_prog_release(struct inode *inode, struct file *filp)
static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
{
const struct bpf_prog *prog = filp->private_data;
- char prog_digest[sizeof(prog->digest) * 2 + 1] = { };
+ char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
- bin2hex(prog_digest, prog->digest, sizeof(prog->digest));
+ bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
seq_printf(m,
"prog_type:\t%u\n"
"prog_jited:\t%u\n"
- "prog_digest:\t%s\n"
+ "prog_tag:\t%s\n"
"memlock:\t%llu\n",
prog->type,
prog->jited,
- prog_digest,
+ prog_tag,
prog->pages * 1ULL << PAGE_SHIFT);
}
#endif
@@ -781,7 +817,11 @@ struct bpf_prog *bpf_prog_get(u32 ufd)
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
{
- return __bpf_prog_get(ufd, &type);
+ struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
+
+ if (!IS_ERR(prog))
+ trace_bpf_prog_get_type(prog);
+ return prog;
}
EXPORT_SYMBOL_GPL(bpf_prog_get_type);
@@ -863,6 +903,7 @@ static int bpf_prog_load(union bpf_attr *attr)
/* failed to allocate fd */
goto free_used_maps;
+ trace_bpf_prog_load(prog, err);
return err;
free_used_maps:
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 2efdc91..fb3513b 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1566,22 +1566,54 @@ static int evaluate_reg_imm_alu(struct bpf_verifier_env *env,
struct bpf_reg_state *dst_reg = &regs[insn->dst_reg];
struct bpf_reg_state *src_reg = &regs[insn->src_reg];
u8 opcode = BPF_OP(insn->code);
+ u64 dst_imm = dst_reg->imm;
- /* dst_reg->type == CONST_IMM here, simulate execution of 'add'/'or'
- * insn. Don't care about overflow or negative values, just add them
+ /* dst_reg->type == CONST_IMM here. Simulate execution of insns
+ * containing ALU ops. Don't care about overflow or negative
+ * values, just add/sub/... them; registers are in u64.
*/
- if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K)
- dst_reg->imm += insn->imm;
- else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
- src_reg->type == CONST_IMM)
- dst_reg->imm += src_reg->imm;
- else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K)
- dst_reg->imm |= insn->imm;
- else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
- src_reg->type == CONST_IMM)
- dst_reg->imm |= src_reg->imm;
- else
+ if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm += insn->imm;
+ } else if (opcode == BPF_ADD && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm += src_reg->imm;
+ } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm -= insn->imm;
+ } else if (opcode == BPF_SUB && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm -= src_reg->imm;
+ } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm *= insn->imm;
+ } else if (opcode == BPF_MUL && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm *= src_reg->imm;
+ } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm |= insn->imm;
+ } else if (opcode == BPF_OR && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm |= src_reg->imm;
+ } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm &= insn->imm;
+ } else if (opcode == BPF_AND && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm &= src_reg->imm;
+ } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm >>= insn->imm;
+ } else if (opcode == BPF_RSH && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm >>= src_reg->imm;
+ } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_K) {
+ dst_imm <<= insn->imm;
+ } else if (opcode == BPF_LSH && BPF_SRC(insn->code) == BPF_X &&
+ src_reg->type == CONST_IMM) {
+ dst_imm <<= src_reg->imm;
+ } else {
mark_reg_unknown_value(regs, insn->dst_reg);
+ goto out;
+ }
+
+ dst_reg->imm = dst_imm;
+out:
return 0;
}
@@ -2225,14 +2257,8 @@ static int check_ld_imm(struct bpf_verifier_env *env, struct bpf_insn *insn)
return err;
if (insn->src_reg == 0) {
- /* generic move 64-bit immediate into a register,
- * only analyzer needs to collect the ld_imm value.
- */
u64 imm = ((u64)(insn + 1)->imm << 32) | (u32)insn->imm;
- if (!env->analyzer_ops)
- return 0;
-
regs[insn->dst_reg].type = CONST_IMM;
regs[insn->dst_reg].imm = imm;
return 0;
@@ -3016,7 +3042,7 @@ static int replace_map_fd_with_map_ptr(struct bpf_verifier_env *env)
int insn_cnt = env->prog->len;
int i, j, err;
- err = bpf_prog_calc_digest(env->prog);
+ err = bpf_prog_calc_tag(env->prog);
if (err)
return err;
@@ -3165,10 +3191,14 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
insn = env->prog->insnsi + delta;
for (i = 0; i < insn_cnt; i++, insn++) {
- if (insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
+ if (insn->code == (BPF_LDX | BPF_MEM | BPF_B) ||
+ insn->code == (BPF_LDX | BPF_MEM | BPF_H) ||
+ insn->code == (BPF_LDX | BPF_MEM | BPF_W) ||
insn->code == (BPF_LDX | BPF_MEM | BPF_DW))
type = BPF_READ;
- else if (insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
+ else if (insn->code == (BPF_STX | BPF_MEM | BPF_B) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_H) ||
+ insn->code == (BPF_STX | BPF_MEM | BPF_W) ||
insn->code == (BPF_STX | BPF_MEM | BPF_DW))
type = BPF_WRITE;
else
@@ -3177,8 +3207,7 @@ static int convert_ctx_accesses(struct bpf_verifier_env *env)
if (env->insn_aux_data[i].ptr_type != PTR_TO_CTX)
continue;
- cnt = ops->convert_ctx_access(type, insn->dst_reg, insn->src_reg,
- insn->off, insn_buf, env->prog);
+ cnt = ops->convert_ctx_access(type, insn, insn_buf, env->prog);
if (cnt == 0 || cnt >= ARRAY_SIZE(insn_buf)) {
verbose("bpf verifier is misconfigured\n");
return -EINVAL;
diff --git a/kernel/capability.c b/kernel/capability.c
index a98e814..f97fe77 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -318,6 +318,7 @@ bool has_capability(struct task_struct *t, int cap)
{
return has_ns_capability(t, &init_user_ns, cap);
}
+EXPORT_SYMBOL(has_capability);
/**
* has_ns_capability_noaudit - Does a task have a capability (unaudited)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f75c4d0..0a5f630 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -764,7 +764,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
{
struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
int prev_state, ret = 0;
- bool hasdied = false;
if (num_online_cpus() == 1)
return -EBUSY;
@@ -809,7 +808,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
cpuhp_kick_ap_work(cpu);
}
- hasdied = prev_state != st->state && st->state == CPUHP_OFFLINE;
out:
cpu_hotplug_done();
return ret;
@@ -1302,10 +1300,24 @@ static int cpuhp_cb_check(enum cpuhp_state state)
*/
static int cpuhp_reserve_state(enum cpuhp_state state)
{
- enum cpuhp_state i;
+ enum cpuhp_state i, end;
+ struct cpuhp_step *step;
- for (i = CPUHP_AP_ONLINE_DYN; i <= CPUHP_AP_ONLINE_DYN_END; i++) {
- if (!cpuhp_ap_states[i].name)
+ switch (state) {
+ case CPUHP_AP_ONLINE_DYN:
+ step = cpuhp_ap_states + CPUHP_AP_ONLINE_DYN;
+ end = CPUHP_AP_ONLINE_DYN_END;
+ break;
+ case CPUHP_BP_PREPARE_DYN:
+ step = cpuhp_bp_states + CPUHP_BP_PREPARE_DYN;
+ end = CPUHP_BP_PREPARE_DYN_END;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ for (i = state; i <= end; i++, step++) {
+ if (!step->name)
return i;
}
WARN(1, "No more dynamic states available for CPU hotplug\n");
@@ -1323,7 +1335,7 @@ static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
mutex_lock(&cpuhp_state_mutex);
- if (state == CPUHP_AP_ONLINE_DYN) {
+ if (state == CPUHP_AP_ONLINE_DYN || state == CPUHP_BP_PREPARE_DYN) {
ret = cpuhp_reserve_state(state);
if (ret < 0)
goto out;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index ab15509..110b38a 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2249,7 +2249,7 @@ static int __perf_install_in_context(void *info)
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
- bool activate = true;
+ bool reprogram = true;
int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
@@ -2257,27 +2257,26 @@ static int __perf_install_in_context(void *info)
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
- /* If we're on the wrong CPU, try again */
- if (task_cpu(ctx->task) != smp_processor_id()) {
- ret = -ESRCH;
- goto unlock;
- }
+ reprogram = (ctx->task == current);
/*
- * If we're on the right CPU, see if the task we target is
- * current, if not we don't have to activate the ctx, a future
- * context switch will do that for us.
+ * If the task is running, it must be running on this CPU,
+ * otherwise we cannot reprogram things.
+ *
+ * If its not running, we don't care, ctx->lock will
+ * serialize against it becoming runnable.
*/
- if (ctx->task != current)
- activate = false;
- else
- WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
+ if (task_curr(ctx->task) && !reprogram) {
+ ret = -ESRCH;
+ goto unlock;
+ }
+ WARN_ON_ONCE(reprogram && cpuctx->task_ctx && cpuctx->task_ctx != ctx);
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
- if (activate) {
+ if (reprogram) {
ctx_sched_out(ctx, cpuctx, EVENT_TIME);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx);
@@ -2328,13 +2327,36 @@ perf_install_in_context(struct perf_event_context *ctx,
/*
* Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition.
+ *
+ * Instead we use task_curr(), which tells us if the task is running.
+ * However, since we use task_curr() outside of rq::lock, we can race
+ * against the actual state. This means the result can be wrong.
+ *
+ * If we get a false positive, we retry, this is harmless.
+ *
+ * If we get a false negative, things are complicated. If we are after
+ * perf_event_context_sched_in() ctx::lock will serialize us, and the
+ * value must be correct. If we're before, it doesn't matter since
+ * perf_event_context_sched_in() will program the counter.
+ *
+ * However, this hinges on the remote context switch having observed
+ * our task->perf_event_ctxp[] store, such that it will in fact take
+ * ctx::lock in perf_event_context_sched_in().
+ *
+ * We do this by task_function_call(), if the IPI fails to hit the task
+ * we know any future context switch of task must see the
+ * perf_event_ctpx[] store.
*/
-again:
+
/*
- * Cannot use task_function_call() because we need to run on the task's
- * CPU regardless of whether its current or not.
+ * This smp_mb() orders the task->perf_event_ctxp[] store with the
+ * task_cpu() load, such that if the IPI then does not find the task
+ * running, a future context switch of that task must observe the
+ * store.
*/
- if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+ smp_mb();
+again:
+ if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
@@ -2348,12 +2370,16 @@ again:
raw_spin_unlock_irq(&ctx->lock);
return;
}
- raw_spin_unlock_irq(&ctx->lock);
/*
- * Since !ctx->is_active doesn't mean anything, we must IPI
- * unconditionally.
+ * If the task is not running, ctx->lock will avoid it becoming so,
+ * thus we can safely install the event.
*/
- goto again;
+ if (task_curr(task)) {
+ raw_spin_unlock_irq(&ctx->lock);
+ goto again;
+ }
+ add_event_to_ctx(event, ctx);
+ raw_spin_unlock_irq(&ctx->lock);
}
/*
@@ -7034,25 +7060,12 @@ static void perf_log_itrace_start(struct perf_event *event)
perf_output_end(&handle);
}
-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event,
- int throttle, struct perf_sample_data *data,
- struct pt_regs *regs)
+static int
+__perf_event_account_interrupt(struct perf_event *event, int throttle)
{
- int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
- u64 seq;
int ret = 0;
-
- /*
- * Non-sampling counters might still use the PMI to fold short
- * hardware counters, ignore those.
- */
- if (unlikely(!is_sampling_event(event)))
- return 0;
+ u64 seq;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
@@ -7080,6 +7093,34 @@ static int __perf_event_overflow(struct perf_event *event,
perf_adjust_period(event, delta, hwc->last_period, true);
}
+ return ret;
+}
+
+int perf_event_account_interrupt(struct perf_event *event)
+{
+ return __perf_event_account_interrupt(event, 1);
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event,
+ int throttle, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int events = atomic_read(&event->event_limit);
+ int ret = 0;
+
+ /*
+ * Non-sampling counters might still use the PMI to fold short
+ * hardware counters, ignore those.
+ */
+ if (unlikely(!is_sampling_event(event)))
+ return 0;
+
+ ret = __perf_event_account_interrupt(event, throttle);
+
/*
* XXX event_limit might not quite work as expected on inherited
* events
@@ -9503,6 +9544,37 @@ static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id)
return 0;
}
+/*
+ * Variation on perf_event_ctx_lock_nested(), except we take two context
+ * mutexes.
+ */
+static struct perf_event_context *
+__perf_event_ctx_lock_double(struct perf_event *group_leader,
+ struct perf_event_context *ctx)
+{
+ struct perf_event_context *gctx;
+
+again:
+ rcu_read_lock();
+ gctx = READ_ONCE(group_leader->ctx);
+ if (!atomic_inc_not_zero(&gctx->refcount)) {
+ rcu_read_unlock();
+ goto again;
+ }
+ rcu_read_unlock();
+
+ mutex_lock_double(&gctx->mutex, &ctx->mutex);
+
+ if (group_leader->ctx != gctx) {
+ mutex_unlock(&ctx->mutex);
+ mutex_unlock(&gctx->mutex);
+ put_ctx(gctx);
+ goto again;
+ }
+
+ return gctx;
+}
+
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
@@ -9746,12 +9818,31 @@ SYSCALL_DEFINE5(perf_event_open,
}
if (move_group) {
- gctx = group_leader->ctx;
- mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ gctx = __perf_event_ctx_lock_double(group_leader, ctx);
+
if (gctx->task == TASK_TOMBSTONE) {
err = -ESRCH;
goto err_locked;
}
+
+ /*
+ * Check if we raced against another sys_perf_event_open() call
+ * moving the software group underneath us.
+ */
+ if (!(group_leader->group_caps & PERF_EV_CAP_SOFTWARE)) {
+ /*
+ * If someone moved the group out from under us, check
+ * if this new event wound up on the same ctx, if so
+ * its the regular !move_group case, otherwise fail.
+ */
+ if (gctx != ctx) {
+ err = -EINVAL;
+ goto err_locked;
+ } else {
+ perf_event_ctx_unlock(group_leader, gctx);
+ move_group = 0;
+ }
+ }
} else {
mutex_lock(&ctx->mutex);
}
@@ -9853,7 +9944,7 @@ SYSCALL_DEFINE5(perf_event_open,
perf_unpin_context(ctx);
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
if (task) {
@@ -9879,7 +9970,7 @@ SYSCALL_DEFINE5(perf_event_open,
err_locked:
if (move_group)
- mutex_unlock(&gctx->mutex);
+ perf_event_ctx_unlock(group_leader, gctx);
mutex_unlock(&ctx->mutex);
/* err_file: */
fput(event_file);
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 93ad6c1..a9b8cf5 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -182,6 +182,13 @@ void static_key_slow_dec_deferred(struct static_key_deferred *key)
}
EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
+void static_key_deferred_flush(struct static_key_deferred *key)
+{
+ STATIC_KEY_CHECK_USE();
+ flush_delayed_work(&key->work);
+}
+EXPORT_SYMBOL_GPL(static_key_deferred_flush);
+
void jump_label_rate_limit(struct static_key_deferred *key,
unsigned long rl)
{
diff --git a/kernel/memremap.c b/kernel/memremap.c
index b501e39..9ecedc2 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -246,7 +246,9 @@ static void devm_memremap_pages_release(struct device *dev, void *data)
/* pages are dead and unused, undo the arch mapping */
align_start = res->start & ~(SECTION_SIZE - 1);
align_size = ALIGN(resource_size(res), SECTION_SIZE);
+ mem_hotplug_begin();
arch_remove_memory(align_start, align_size);
+ mem_hotplug_done();
untrack_pfn(NULL, PHYS_PFN(align_start), align_size);
pgmap_radix_release(res);
dev_WARN_ONCE(dev, pgmap->altmap && pgmap->altmap->alloc,
@@ -358,7 +360,9 @@ void *devm_memremap_pages(struct device *dev, struct resource *res,
if (error)
goto err_pfn_remap;
+ mem_hotplug_begin();
error = arch_add_memory(nid, align_start, align_size, true);
+ mem_hotplug_done();
if (error)
goto err_add_memory;
diff --git a/kernel/module.c b/kernel/module.c
index 5088784c..38d4270 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -1145,7 +1145,7 @@ static size_t module_flags_taint(struct module *mod, char *buf)
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
if (taint_flags[i].module && test_bit(i, &mod->taints))
- buf[l++] = taint_flags[i].true;
+ buf[l++] = taint_flags[i].c_true;
}
return l;
diff --git a/kernel/panic.c b/kernel/panic.c
index c51edaa..08aa88d 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -249,7 +249,7 @@ void panic(const char *fmt, ...)
* Delay timeout seconds before rebooting the machine.
* We can't use the "normal" timers since we just panicked.
*/
- pr_emerg("Rebooting in %d seconds..", panic_timeout);
+ pr_emerg("Rebooting in %d seconds..\n", panic_timeout);
for (i = 0; i < panic_timeout * 1000; i += PANIC_TIMER_STEP) {
touch_nmi_watchdog();
@@ -355,7 +355,7 @@ const char *print_tainted(void)
for (i = 0; i < TAINT_FLAGS_COUNT; i++) {
const struct taint_flag *t = &taint_flags[i];
*s++ = test_bit(i, &tainted_mask) ?
- t->true : t->false;
+ t->c_true : t->c_false;
}
*s = 0;
} else
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index df9e8e9..eef2ce9 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -151,8 +151,12 @@ out:
static void delayed_free_pidns(struct rcu_head *p)
{
- kmem_cache_free(pid_ns_cachep,
- container_of(p, struct pid_namespace, rcu));
+ struct pid_namespace *ns = container_of(p, struct pid_namespace, rcu);
+
+ dec_pid_namespaces(ns->ucounts);
+ put_user_ns(ns->user_ns);
+
+ kmem_cache_free(pid_ns_cachep, ns);
}
static void destroy_pid_namespace(struct pid_namespace *ns)
@@ -162,8 +166,6 @@ static void destroy_pid_namespace(struct pid_namespace *ns)
ns_free_inum(&ns->ns);
for (i = 0; i < PIDMAP_ENTRIES; i++)
kfree(ns->pidmap[i].page);
- dec_pid_namespaces(ns->ucounts);
- put_user_ns(ns->user_ns);
call_rcu(&ns->rcu, delayed_free_pidns);
}
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index f67ceb7..15e6bae 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -46,7 +46,7 @@ static const char * const mem_sleep_labels[] = {
const char *mem_sleep_states[PM_SUSPEND_MAX];
suspend_state_t mem_sleep_current = PM_SUSPEND_FREEZE;
-suspend_state_t mem_sleep_default = PM_SUSPEND_MAX;
+static suspend_state_t mem_sleep_default = PM_SUSPEND_MEM;
unsigned int pm_suspend_global_flags;
EXPORT_SYMBOL_GPL(pm_suspend_global_flags);
@@ -168,7 +168,7 @@ void suspend_set_ops(const struct platform_suspend_ops *ops)
}
if (valid_state(PM_SUSPEND_MEM)) {
mem_sleep_states[PM_SUSPEND_MEM] = mem_sleep_labels[PM_SUSPEND_MEM];
- if (mem_sleep_default >= PM_SUSPEND_MEM)
+ if (mem_sleep_default == PM_SUSPEND_MEM)
mem_sleep_current = PM_SUSPEND_MEM;
}
diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
index 80adef7..0d6ff3e 100644
--- a/kernel/rcu/rcu.h
+++ b/kernel/rcu/rcu.h
@@ -136,6 +136,7 @@ int rcu_jiffies_till_stall_check(void);
#define TPS(x) tracepoint_string(x)
void rcu_early_boot_tests(void);
+void rcu_test_sync_prims(void);
/*
* This function really isn't for public consumption, but RCU is special in
diff --git a/kernel/rcu/tiny.c b/kernel/rcu/tiny.c
index 1898559..b23a4d0 100644
--- a/kernel/rcu/tiny.c
+++ b/kernel/rcu/tiny.c
@@ -185,9 +185,6 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
* benefits of doing might_sleep() to reduce latency.)
*
* Cool, huh? (Due to Josh Triplett.)
- *
- * But we want to make this a static inline later. The cond_resched()
- * currently makes this problematic.
*/
void synchronize_sched(void)
{
@@ -195,7 +192,6 @@ void synchronize_sched(void)
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched() in RCU read-side critical section");
- cond_resched();
}
EXPORT_SYMBOL_GPL(synchronize_sched);
diff --git a/kernel/rcu/tiny_plugin.h b/kernel/rcu/tiny_plugin.h
index 196f030..c64b827 100644
--- a/kernel/rcu/tiny_plugin.h
+++ b/kernel/rcu/tiny_plugin.h
@@ -60,12 +60,17 @@ EXPORT_SYMBOL_GPL(rcu_scheduler_active);
/*
* During boot, we forgive RCU lockdep issues. After this function is
- * invoked, we start taking RCU lockdep issues seriously.
+ * invoked, we start taking RCU lockdep issues seriously. Note that unlike
+ * Tree RCU, Tiny RCU transitions directly from RCU_SCHEDULER_INACTIVE
+ * to RCU_SCHEDULER_RUNNING, skipping the RCU_SCHEDULER_INIT stage.
+ * The reason for this is that Tiny RCU does not need kthreads, so does
+ * not have to care about the fact that the scheduler is half-initialized
+ * at a certain phase of the boot process.
*/
void __init rcu_scheduler_starting(void)
{
WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = 1;
+ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
}
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 96c52e4..cb4e205 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -127,13 +127,16 @@ int rcu_num_nodes __read_mostly = NUM_RCU_NODES; /* Total # rcu_nodes in use. */
int sysctl_panic_on_rcu_stall __read_mostly;
/*
- * The rcu_scheduler_active variable transitions from zero to one just
- * before the first task is spawned. So when this variable is zero, RCU
- * can assume that there is but one task, allowing RCU to (for example)
+ * The rcu_scheduler_active variable is initialized to the value
+ * RCU_SCHEDULER_INACTIVE and transitions RCU_SCHEDULER_INIT just before the
+ * first task is spawned. So when this variable is RCU_SCHEDULER_INACTIVE,
+ * RCU can assume that there is but one task, allowing RCU to (for example)
* optimize synchronize_rcu() to a simple barrier(). When this variable
- * is one, RCU must actually do all the hard work required to detect real
- * grace periods. This variable is also used to suppress boot-time false
- * positives from lockdep-RCU error checking.
+ * is RCU_SCHEDULER_INIT, RCU must actually do all the hard work required
+ * to detect real grace periods. This variable is also used to suppress
+ * boot-time false positives from lockdep-RCU error checking. Finally, it
+ * transitions from RCU_SCHEDULER_INIT to RCU_SCHEDULER_RUNNING after RCU
+ * is fully initialized, including all of its kthreads having been spawned.
*/
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
@@ -3980,18 +3983,22 @@ static int __init rcu_spawn_gp_kthread(void)
early_initcall(rcu_spawn_gp_kthread);
/*
- * This function is invoked towards the end of the scheduler's initialization
- * process. Before this is called, the idle task might contain
- * RCU read-side critical sections (during which time, this idle
- * task is booting the system). After this function is called, the
- * idle tasks are prohibited from containing RCU read-side critical
- * sections. This function also enables RCU lockdep checking.
+ * This function is invoked towards the end of the scheduler's
+ * initialization process. Before this is called, the idle task might
+ * contain synchronous grace-period primitives (during which time, this idle
+ * task is booting the system, and such primitives are no-ops). After this
+ * function is called, any synchronous grace-period primitives are run as
+ * expedited, with the requesting task driving the grace period forward.
+ * A later core_initcall() rcu_exp_runtime_mode() will switch to full
+ * runtime RCU functionality.
*/
void rcu_scheduler_starting(void)
{
WARN_ON(num_online_cpus() != 1);
WARN_ON(nr_context_switches() > 0);
- rcu_scheduler_active = 1;
+ rcu_test_sync_prims();
+ rcu_scheduler_active = RCU_SCHEDULER_INIT;
+ rcu_test_sync_prims();
}
/*
diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
index d3053e9..e59e184 100644
--- a/kernel/rcu/tree_exp.h
+++ b/kernel/rcu/tree_exp.h
@@ -532,18 +532,28 @@ struct rcu_exp_work {
};
/*
+ * Common code to drive an expedited grace period forward, used by
+ * workqueues and mid-boot-time tasks.
+ */
+static void rcu_exp_sel_wait_wake(struct rcu_state *rsp,
+ smp_call_func_t func, unsigned long s)
+{
+ /* Initialize the rcu_node tree in preparation for the wait. */
+ sync_rcu_exp_select_cpus(rsp, func);
+
+ /* Wait and clean up, including waking everyone. */
+ rcu_exp_wait_wake(rsp, s);
+}
+
+/*
* Work-queue handler to drive an expedited grace period forward.
*/
static void wait_rcu_exp_gp(struct work_struct *wp)
{
struct rcu_exp_work *rewp;
- /* Initialize the rcu_node tree in preparation for the wait. */
rewp = container_of(wp, struct rcu_exp_work, rew_work);
- sync_rcu_exp_select_cpus(rewp->rew_rsp, rewp->rew_func);
-
- /* Wait and clean up, including waking everyone. */
- rcu_exp_wait_wake(rewp->rew_rsp, rewp->rew_s);
+ rcu_exp_sel_wait_wake(rewp->rew_rsp, rewp->rew_func, rewp->rew_s);
}
/*
@@ -569,12 +579,18 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
if (exp_funnel_lock(rsp, s))
return; /* Someone else did our work for us. */
- /* Marshall arguments and schedule the expedited grace period. */
- rew.rew_func = func;
- rew.rew_rsp = rsp;
- rew.rew_s = s;
- INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
- schedule_work(&rew.rew_work);
+ /* Ensure that load happens before action based on it. */
+ if (unlikely(rcu_scheduler_active == RCU_SCHEDULER_INIT)) {
+ /* Direct call during scheduler init and early_initcalls(). */
+ rcu_exp_sel_wait_wake(rsp, func, s);
+ } else {
+ /* Marshall arguments & schedule the expedited grace period. */
+ rew.rew_func = func;
+ rew.rew_rsp = rsp;
+ rew.rew_s = s;
+ INIT_WORK_ONSTACK(&rew.rew_work, wait_rcu_exp_gp);
+ schedule_work(&rew.rew_work);
+ }
/* Wait for expedited grace period to complete. */
rdp = per_cpu_ptr(rsp->rda, raw_smp_processor_id());
@@ -676,6 +692,8 @@ void synchronize_rcu_expedited(void)
{
struct rcu_state *rsp = rcu_state_p;
+ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
+ return;
_synchronize_rcu_expedited(rsp, sync_rcu_exp_handler);
}
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
@@ -693,3 +711,15 @@ void synchronize_rcu_expedited(void)
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
+
+/*
+ * Switch to run-time mode once Tree RCU has fully initialized.
+ */
+static int __init rcu_exp_runtime_mode(void)
+{
+ rcu_test_sync_prims();
+ rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
+ rcu_test_sync_prims();
+ return 0;
+}
+core_initcall(rcu_exp_runtime_mode);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 85c5a88..56583e7 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -670,7 +670,7 @@ void synchronize_rcu(void)
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_rcu() in RCU read-side critical section");
- if (!rcu_scheduler_active)
+ if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
return;
if (rcu_gp_is_expedited())
synchronize_rcu_expedited();
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index f19271d..4f6db7e 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -121,11 +121,14 @@ EXPORT_SYMBOL(rcu_read_lock_sched_held);
* Should expedited grace-period primitives always fall back to their
* non-expedited counterparts? Intended for use within RCU. Note
* that if the user specifies both rcu_expedited and rcu_normal, then
- * rcu_normal wins.
+ * rcu_normal wins. (Except during the time period during boot from
+ * when the first task is spawned until the rcu_exp_runtime_mode()
+ * core_initcall() is invoked, at which point everything is expedited.)
*/
bool rcu_gp_is_normal(void)
{
- return READ_ONCE(rcu_normal);
+ return READ_ONCE(rcu_normal) &&
+ rcu_scheduler_active != RCU_SCHEDULER_INIT;
}
EXPORT_SYMBOL_GPL(rcu_gp_is_normal);
@@ -135,13 +138,14 @@ static atomic_t rcu_expedited_nesting =
/*
* Should normal grace-period primitives be expedited? Intended for
* use within RCU. Note that this function takes the rcu_expedited
- * sysfs/boot variable into account as well as the rcu_expedite_gp()
- * nesting. So looping on rcu_unexpedite_gp() until rcu_gp_is_expedited()
- * returns false is a -really- bad idea.
+ * sysfs/boot variable and rcu_scheduler_active into account as well
+ * as the rcu_expedite_gp() nesting. So looping on rcu_unexpedite_gp()
+ * until rcu_gp_is_expedited() returns false is a -really- bad idea.
*/
bool rcu_gp_is_expedited(void)
{
- return rcu_expedited || atomic_read(&rcu_expedited_nesting);
+ return rcu_expedited || atomic_read(&rcu_expedited_nesting) ||
+ rcu_scheduler_active == RCU_SCHEDULER_INIT;
}
EXPORT_SYMBOL_GPL(rcu_gp_is_expedited);
@@ -257,7 +261,7 @@ EXPORT_SYMBOL_GPL(rcu_callback_map);
int notrace debug_lockdep_rcu_enabled(void)
{
- return rcu_scheduler_active && debug_locks &&
+ return rcu_scheduler_active != RCU_SCHEDULER_INACTIVE && debug_locks &&
current->lockdep_recursion == 0;
}
EXPORT_SYMBOL_GPL(debug_lockdep_rcu_enabled);
@@ -591,7 +595,7 @@ EXPORT_SYMBOL_GPL(call_rcu_tasks);
void synchronize_rcu_tasks(void)
{
/* Complain if the scheduler has not started. */
- RCU_LOCKDEP_WARN(!rcu_scheduler_active,
+ RCU_LOCKDEP_WARN(rcu_scheduler_active == RCU_SCHEDULER_INACTIVE,
"synchronize_rcu_tasks called too soon");
/* Wait for the grace period. */
@@ -813,6 +817,23 @@ static void rcu_spawn_tasks_kthread(void)
#endif /* #ifdef CONFIG_TASKS_RCU */
+/*
+ * Test each non-SRCU synchronous grace-period wait API. This is
+ * useful just after a change in mode for these primitives, and
+ * during early boot.
+ */
+void rcu_test_sync_prims(void)
+{
+ if (!IS_ENABLED(CONFIG_PROVE_RCU))
+ return;
+ synchronize_rcu();
+ synchronize_rcu_bh();
+ synchronize_sched();
+ synchronize_rcu_expedited();
+ synchronize_rcu_bh_expedited();
+ synchronize_sched_expedited();
+}
+
#ifdef CONFIG_PROVE_RCU
/*
@@ -865,6 +886,7 @@ void rcu_early_boot_tests(void)
early_boot_test_call_rcu_bh();
if (rcu_self_test_sched)
early_boot_test_call_rcu_sched();
+ rcu_test_sync_prims();
}
static int rcu_verify_early_boot_tests(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index ff046b7..3603d93 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -346,7 +346,7 @@ static bool task_participate_group_stop(struct task_struct *task)
* fresh group stop. Read comment in do_signal_stop() for details.
*/
if (!sig->group_stop_count && !(sig->flags & SIGNAL_STOP_STOPPED)) {
- sig->flags = SIGNAL_STOP_STOPPED;
+ signal_set_stop_flags(sig, SIGNAL_STOP_STOPPED);
return true;
}
return false;
@@ -843,7 +843,7 @@ static bool prepare_signal(int sig, struct task_struct *p, bool force)
* will take ->siglock, notice SIGNAL_CLD_MASK, and
* notify its parent. See get_signal_to_deliver().
*/
- signal->flags = why | SIGNAL_STOP_CONTINUED;
+ signal_set_stop_flags(signal, why | SIGNAL_STOP_CONTINUED);
signal->group_stop_count = 0;
signal->group_exit_code = 0;
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8dbaec0..1aea594 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -2475,6 +2475,7 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
break;
if (neg)
continue;
+ val = convmul * val / convdiv;
if ((min && val < *min) || (max && val > *max))
continue;
*i = val;
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 2c115fd..74e0388 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -767,7 +767,7 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
tick = expires;
/* Skip reprogram of event if its not changed */
- if (ts->tick_stopped && (expires == dev->next_event))
+ if (ts->tick_stopped && (expires == ts->next_tick))
goto out;
/*
@@ -787,6 +787,8 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
trace_tick_stop(1, TICK_DEP_MASK_NONE);
}
+ ts->next_tick = tick;
+
/*
* If the expiration time == KTIME_MAX, then we simply stop
* the tick timer.
@@ -802,7 +804,10 @@ static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
else
tick_program_event(tick, 1);
out:
- /* Update the estimated sleep length */
+ /*
+ * Update the estimated sleep length until the next timer
+ * (not only the tick).
+ */
ts->sleep_length = ktime_sub(dev->next_event, now);
return tick;
}
diff --git a/kernel/time/tick-sched.h b/kernel/time/tick-sched.h
index bf38226..075444e 100644
--- a/kernel/time/tick-sched.h
+++ b/kernel/time/tick-sched.h
@@ -27,6 +27,7 @@ enum tick_nohz_mode {
* timer is modified for nohz sleeps. This is necessary
* to resume the tick timer operation in the timeline
* when the CPU returns from nohz sleep.
+ * @next_tick: Next tick to be fired when in dynticks mode.
* @tick_stopped: Indicator that the idle tick has been stopped
* @idle_jiffies: jiffies at the entry to idle for idle time accounting
* @idle_calls: Total number of idle calls
@@ -44,6 +45,7 @@ struct tick_sched {
unsigned long check_clocks;
enum tick_nohz_mode nohz_mode;
ktime_t last_tick;
+ ktime_t next_tick;
int inidle;
int tick_stopped;
unsigned long idle_jiffies;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index f883c43..424daa4 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -395,6 +395,36 @@ static const struct bpf_func_proto bpf_current_task_under_cgroup_proto = {
.arg2_type = ARG_ANYTHING,
};
+BPF_CALL_3(bpf_probe_read_str, void *, dst, u32, size,
+ const void *, unsafe_ptr)
+{
+ int ret;
+
+ /*
+ * The strncpy_from_unsafe() call will likely not fill the entire
+ * buffer, but that's okay in this circumstance as we're probing
+ * arbitrary memory anyway similar to bpf_probe_read() and might
+ * as well probe the stack. Thus, memory is explicitly cleared
+ * only in error case, so that improper users ignoring return
+ * code altogether don't copy garbage; otherwise length of string
+ * is returned that can be used for bpf_perf_event_output() et al.
+ */
+ ret = strncpy_from_unsafe(dst, unsafe_ptr, size);
+ if (unlikely(ret < 0))
+ memset(dst, 0, size);
+
+ return ret;
+}
+
+static const struct bpf_func_proto bpf_probe_read_str_proto = {
+ .func = bpf_probe_read_str,
+ .gpl_only = true,
+ .ret_type = RET_INTEGER,
+ .arg1_type = ARG_PTR_TO_UNINIT_MEM,
+ .arg2_type = ARG_CONST_SIZE,
+ .arg3_type = ARG_ANYTHING,
+};
+
static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -432,6 +462,8 @@ static const struct bpf_func_proto *tracing_func_proto(enum bpf_func_id func_id)
return &bpf_current_task_under_cgroup_proto;
case BPF_FUNC_get_prandom_u32:
return &bpf_get_prandom_u32_proto;
+ case BPF_FUNC_probe_read_str:
+ return &bpf_probe_read_str_proto;
default:
return NULL;
}
@@ -459,6 +491,13 @@ static bool kprobe_prog_is_valid_access(int off, int size, enum bpf_access_type
return false;
if (off % size != 0)
return false;
+ /*
+ * Assertion for 32 bit to make sure last 8 byte access
+ * (BPF_DW) to the last 4 byte member is disallowed.
+ */
+ if (off + size > sizeof(struct pt_regs))
+ return false;
+
return true;
}
@@ -540,6 +579,8 @@ static bool tp_prog_is_valid_access(int off, int size, enum bpf_access_type type
return false;
if (off % size != 0)
return false;
+
+ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(__u64));
return true;
}
@@ -572,28 +613,29 @@ static bool pe_prog_is_valid_access(int off, int size, enum bpf_access_type type
return true;
}
-static u32 pe_prog_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
+static u32 pe_prog_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct bpf_perf_event_data, sample_period):
BUILD_BUG_ON(FIELD_SIZEOF(struct perf_sample_data, period) != sizeof(u64));
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
- data), dst_reg, src_reg,
+ data), si->dst_reg, si->src_reg,
offsetof(struct bpf_perf_event_data_kern, data));
- *insn++ = BPF_LDX_MEM(BPF_DW, dst_reg, dst_reg,
+ *insn++ = BPF_LDX_MEM(BPF_DW, si->dst_reg, si->dst_reg,
offsetof(struct perf_sample_data, period));
break;
default:
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct bpf_perf_event_data_kern,
- regs), dst_reg, src_reg,
+ regs), si->dst_reg, si->src_reg,
offsetof(struct bpf_perf_event_data_kern, regs));
- *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), dst_reg, dst_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(long), si->dst_reg, si->dst_reg,
+ si->off);
break;
}
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 5d33a73..30a144b1 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -163,14 +163,15 @@ trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
EXPORT_SYMBOL_GPL(trace_print_bitmask_seq);
const char *
-trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
+trace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len,
+ bool spacing)
{
int i;
const char *ret = trace_seq_buffer_ptr(p);
for (i = 0; i < buf_len; i++)
- trace_seq_printf(p, "%s%2.2x", i == 0 ? "" : " ", buf[i]);
-
+ trace_seq_printf(p, "%s%2.2x", !spacing || i == 0 ? "" : " ",
+ buf[i]);
trace_seq_putc(p, 0);
return ret;
diff --git a/kernel/ucount.c b/kernel/ucount.c
index 9d20d5d..4bbd38e 100644
--- a/kernel/ucount.c
+++ b/kernel/ucount.c
@@ -128,10 +128,10 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
- spin_lock(&ucounts_lock);
+ spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
if (!ucounts) {
- spin_unlock(&ucounts_lock);
+ spin_unlock_irq(&ucounts_lock);
new = kzalloc(sizeof(*new), GFP_KERNEL);
if (!new)
@@ -141,7 +141,7 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
new->uid = uid;
atomic_set(&new->count, 0);
- spin_lock(&ucounts_lock);
+ spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
if (ucounts) {
kfree(new);
@@ -152,16 +152,18 @@ static struct ucounts *get_ucounts(struct user_namespace *ns, kuid_t uid)
}
if (!atomic_add_unless(&ucounts->count, 1, INT_MAX))
ucounts = NULL;
- spin_unlock(&ucounts_lock);
+ spin_unlock_irq(&ucounts_lock);
return ucounts;
}
static void put_ucounts(struct ucounts *ucounts)
{
+ unsigned long flags;
+
if (atomic_dec_and_test(&ucounts->count)) {
- spin_lock(&ucounts_lock);
+ spin_lock_irqsave(&ucounts_lock, flags);
hlist_del_init(&ucounts->node);
- spin_unlock(&ucounts_lock);
+ spin_unlock_irqrestore(&ucounts_lock, flags);
kfree(ucounts);
}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index d4b0fa0..63177be 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -49,6 +49,8 @@ unsigned long *watchdog_cpumask_bits = cpumask_bits(&watchdog_cpumask);
#define for_each_watchdog_cpu(cpu) \
for_each_cpu_and((cpu), cpu_online_mask, &watchdog_cpumask)
+atomic_t watchdog_park_in_progress = ATOMIC_INIT(0);
+
/*
* The 'watchdog_running' variable is set to 1 when the watchdog threads
* are registered/started and is set to 0 when the watchdog threads are
@@ -260,6 +262,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
int duration;
int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return HRTIMER_NORESTART;
+
/* kick the hardlockup detector */
watchdog_interrupt_count();
@@ -467,12 +472,16 @@ static int watchdog_park_threads(void)
{
int cpu, ret = 0;
+ atomic_set(&watchdog_park_in_progress, 1);
+
for_each_watchdog_cpu(cpu) {
ret = kthread_park(per_cpu(softlockup_watchdog, cpu));
if (ret)
break;
}
+ atomic_set(&watchdog_park_in_progress, 0);
+
return ret;
}
diff --git a/kernel/watchdog_hld.c b/kernel/watchdog_hld.c
index 84016c8..12b8dd6 100644
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -84,6 +84,9 @@ static void watchdog_overflow_callback(struct perf_event *event,
/* Ensure the watchdog never gets throttled */
event->hw.interrupts = 0;
+ if (atomic_read(&watchdog_park_in_progress) != 0)
+ return;
+
if (__this_cpu_read(watchdog_nmi_touch) == true) {
__this_cpu_write(watchdog_nmi_touch, false);
return;
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 3d2515a..15969ab 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -164,7 +164,7 @@ config DEBUG_INFO_REDUCED
config DEBUG_INFO_SPLIT
bool "Produce split debuginfo in .dwo files"
- depends on DEBUG_INFO
+ depends on DEBUG_INFO && !FRV
help
Generate debug info into separate .dwo files. This significantly
reduces the build directory size for builds with DEBUG_INFO,
diff --git a/lib/ioremap.c b/lib/ioremap.c
index 86c8911..a3e14ce 100644
--- a/lib/ioremap.c
+++ b/lib/ioremap.c
@@ -144,4 +144,3 @@ int ioremap_page_range(unsigned long addr,
return err;
}
-EXPORT_SYMBOL_GPL(ioremap_page_range);
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 25f5723..e68604a 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -730,43 +730,50 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
}
EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
+static inline void pipe_truncate(struct iov_iter *i)
+{
+ struct pipe_inode_info *pipe = i->pipe;
+ if (pipe->nrbufs) {
+ size_t off = i->iov_offset;
+ int idx = i->idx;
+ int nrbufs = (idx - pipe->curbuf) & (pipe->buffers - 1);
+ if (off) {
+ pipe->bufs[idx].len = off - pipe->bufs[idx].offset;
+ idx = next_idx(idx, pipe);
+ nrbufs++;
+ }
+ while (pipe->nrbufs > nrbufs) {
+ pipe_buf_release(pipe, &pipe->bufs[idx]);
+ idx = next_idx(idx, pipe);
+ pipe->nrbufs--;
+ }
+ }
+}
+
static void pipe_advance(struct iov_iter *i, size_t size)
{
struct pipe_inode_info *pipe = i->pipe;
- struct pipe_buffer *buf;
- int idx = i->idx;
- size_t off = i->iov_offset, orig_sz;
-
if (unlikely(i->count < size))
size = i->count;
- orig_sz = size;
-
if (size) {
+ struct pipe_buffer *buf;
+ size_t off = i->iov_offset, left = size;
+ int idx = i->idx;
if (off) /* make it relative to the beginning of buffer */
- size += off - pipe->bufs[idx].offset;
+ left += off - pipe->bufs[idx].offset;
while (1) {
buf = &pipe->bufs[idx];
- if (size <= buf->len)
+ if (left <= buf->len)
break;
- size -= buf->len;
+ left -= buf->len;
idx = next_idx(idx, pipe);
}
- buf->len = size;
i->idx = idx;
- off = i->iov_offset = buf->offset + size;
- }
- if (off)
- idx = next_idx(idx, pipe);
- if (pipe->nrbufs) {
- int unused = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
- /* [curbuf,unused) is in use. Free [idx,unused) */
- while (idx != unused) {
- pipe_buf_release(pipe, &pipe->bufs[idx]);
- idx = next_idx(idx, pipe);
- pipe->nrbufs--;
- }
+ i->iov_offset = buf->offset + left;
}
- i->count -= orig_sz;
+ i->count -= size;
+ /* ... and discard everything past that point */
+ pipe_truncate(i);
}
void iov_iter_advance(struct iov_iter *i, size_t size)
@@ -826,6 +833,7 @@ void iov_iter_pipe(struct iov_iter *i, int direction,
size_t count)
{
BUG_ON(direction != ITER_PIPE);
+ WARN_ON(pipe->nrbufs == pipe->buffers);
i->type = direction;
i->pipe = pipe;
i->idx = (pipe->curbuf + pipe->nrbufs) & (pipe->buffers - 1);
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 0b92d60..84812a9 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -769,7 +769,7 @@ static void radix_tree_free_nodes(struct radix_tree_node *node)
struct radix_tree_node *old = child;
offset = child->offset + 1;
child = child->parent;
- WARN_ON_ONCE(!list_empty(&node->private_list));
+ WARN_ON_ONCE(!list_empty(&old->private_list));
radix_tree_node_free(old);
if (old == entry_to_node(node))
return;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 975b8fc..a8d74a7 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -483,11 +483,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
: 1UL << (BITS_PER_LONG - IO_TLB_SHIFT);
/*
- * For mappings greater than a page, we limit the stride (and
- * hence alignment) to a page size.
+ * For mappings greater than or equal to a page, we limit the stride
+ * (and hence alignment) to a page size.
*/
nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- if (size > PAGE_SIZE)
+ if (size >= PAGE_SIZE)
stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
else
stride = 1;
diff --git a/mm/filemap.c b/mm/filemap.c
index d0e4d10..b772a33 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -138,7 +138,7 @@ static int page_cache_tree_insert(struct address_space *mapping,
dax_radix_locked_entry(0, RADIX_DAX_EMPTY));
/* Wakeup waiters for exceptional entry lock */
dax_wake_mapping_entry_waiter(mapping, page->index, p,
- false);
+ true);
}
}
__radix_tree_replace(&mapping->page_tree, node, slot, page,
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 10eedbf..5f3ad65c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -783,6 +783,12 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
assert_spin_locked(pmd_lockptr(mm, pmd));
+ /*
+ * When we COW a devmap PMD entry, we split it into PTEs, so we should
+ * not be in this function with `flags & FOLL_COW` set.
+ */
+ WARN_ONCE(flags & FOLL_COW, "mm: In follow_devmap_pmd with FOLL_COW set");
+
if (flags & FOLL_WRITE && !pmd_write(*pmd))
return NULL;
@@ -883,15 +889,17 @@ void huge_pmd_set_accessed(struct vm_fault *vmf, pmd_t orig_pmd)
{
pmd_t entry;
unsigned long haddr;
+ bool write = vmf->flags & FAULT_FLAG_WRITE;
vmf->ptl = pmd_lock(vmf->vma->vm_mm, vmf->pmd);
if (unlikely(!pmd_same(*vmf->pmd, orig_pmd)))
goto unlock;
entry = pmd_mkyoung(orig_pmd);
+ if (write)
+ entry = pmd_mkdirty(entry);
haddr = vmf->address & HPAGE_PMD_MASK;
- if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry,
- vmf->flags & FAULT_FLAG_WRITE))
+ if (pmdp_set_access_flags(vmf->vma, haddr, vmf->pmd, entry, write))
update_mmu_cache_pmd(vmf->vma, vmf->address, vmf->pmd);
unlock:
@@ -919,8 +927,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd,
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
- pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
- __GFP_OTHER_NODE, vma,
+ pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE, vma,
vmf->address, page_to_nid(page));
if (unlikely(!pages[i] ||
mem_cgroup_try_charge(pages[i], vma->vm_mm,
@@ -1127,6 +1134,16 @@ out_unlock:
return ret;
}
+/*
+ * FOLL_FORCE can write to even unwritable pmd's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pmd(pmd_t pmd, unsigned int flags)
+{
+ return pmd_write(pmd) ||
+ ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pmd_dirty(pmd));
+}
+
struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
@@ -1137,7 +1154,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
assert_spin_locked(pmd_lockptr(mm, pmd));
- if (flags & FOLL_WRITE && !pmd_write(*pmd))
+ if (flags & FOLL_WRITE && !can_follow_write_pmd(*pmd, flags))
goto out;
/* Avoid dumping huge zero page */
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 3edb759..c7025c1 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1773,23 +1773,32 @@ free:
}
/*
- * When releasing a hugetlb pool reservation, any surplus pages that were
- * allocated to satisfy the reservation must be explicitly freed if they were
- * never used.
- * Called with hugetlb_lock held.
+ * This routine has two main purposes:
+ * 1) Decrement the reservation count (resv_huge_pages) by the value passed
+ * in unused_resv_pages. This corresponds to the prior adjustments made
+ * to the associated reservation map.
+ * 2) Free any unused surplus pages that may have been allocated to satisfy
+ * the reservation. As many as unused_resv_pages may be freed.
+ *
+ * Called with hugetlb_lock held. However, the lock could be dropped (and
+ * reacquired) during calls to cond_resched_lock. Whenever dropping the lock,
+ * we must make sure nobody else can claim pages we are in the process of
+ * freeing. Do this by ensuring resv_huge_page always is greater than the
+ * number of huge pages we plan to free when dropping the lock.
*/
static void return_unused_surplus_pages(struct hstate *h,
unsigned long unused_resv_pages)
{
unsigned long nr_pages;
- /* Uncommit the reservation */
- h->resv_huge_pages -= unused_resv_pages;
-
/* Cannot return gigantic pages currently */
if (hstate_is_gigantic(h))
- return;
+ goto out;
+ /*
+ * Part (or even all) of the reservation could have been backed
+ * by pre-allocated pages. Only free surplus pages.
+ */
nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
/*
@@ -1799,12 +1808,22 @@ static void return_unused_surplus_pages(struct hstate *h,
* when the nodes with surplus pages have no free pages.
* free_pool_huge_page() will balance the the freed pages across the
* on-line nodes with memory and will handle the hstate accounting.
+ *
+ * Note that we decrement resv_huge_pages as we free the pages. If
+ * we drop the lock, resv_huge_pages will still be sufficiently large
+ * to cover subsequent pages we may free.
*/
while (nr_pages--) {
+ h->resv_huge_pages--;
+ unused_resv_pages--;
if (!free_pool_huge_page(h, &node_states[N_MEMORY], 1))
- break;
+ goto out;
cond_resched_lock(&hugetlb_lock);
}
+
+out:
+ /* Fully uncommit the reservation */
+ h->resv_huge_pages -= unused_resv_pages;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index e32389a..77ae323 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -943,7 +943,7 @@ static void collapse_huge_page(struct mm_struct *mm,
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
/* Only allocate from the target node */
- gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_OTHER_NODE | __GFP_THISNODE;
+ gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
/*
* Before allocating the hugepage, release the mmap_sem read lock.
@@ -1242,7 +1242,6 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
struct vm_area_struct *vma;
unsigned long addr;
pmd_t *pmd, _pmd;
- bool deposited = false;
i_mmap_lock_write(mapping);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1267,26 +1266,10 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
spinlock_t *ptl = pmd_lock(vma->vm_mm, pmd);
/* assume page table is clear */
_pmd = pmdp_collapse_flush(vma, addr, pmd);
- /*
- * now deposit the pgtable for arch that need it
- * otherwise free it.
- */
- if (arch_needs_pgtable_deposit()) {
- /*
- * The deposit should be visibile only after
- * collapse is seen by others.
- */
- smp_wmb();
- pgtable_trans_huge_deposit(vma->vm_mm, pmd,
- pmd_pgtable(_pmd));
- deposited = true;
- }
spin_unlock(ptl);
up_write(&vma->vm_mm->mmap_sem);
- if (!deposited) {
- atomic_long_dec(&vma->vm_mm->nr_ptes);
- pte_free(vma->vm_mm, pmd_pgtable(_pmd));
- }
+ atomic_long_dec(&vma->vm_mm->nr_ptes);
+ pte_free(vma->vm_mm, pmd_pgtable(_pmd));
}
}
i_mmap_unlock_write(mapping);
@@ -1326,8 +1309,7 @@ static void collapse_shmem(struct mm_struct *mm,
VM_BUG_ON(start & (HPAGE_PMD_NR - 1));
/* Only allocate from the target node */
- gfp = alloc_hugepage_khugepaged_gfpmask() |
- __GFP_OTHER_NODE | __GFP_THISNODE;
+ gfp = alloc_hugepage_khugepaged_gfpmask() | __GFP_THISNODE;
new_page = khugepaged_alloc_page(hpage, gfp, node);
if (!new_page) {
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 4048897..b822e15 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -625,8 +625,8 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
int nid, unsigned int lru_mask)
{
+ struct lruvec *lruvec = mem_cgroup_lruvec(NODE_DATA(nid), memcg);
unsigned long nr = 0;
- struct mem_cgroup_per_node *mz;
enum lru_list lru;
VM_BUG_ON((unsigned)nid >= nr_node_ids);
@@ -634,8 +634,7 @@ unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg,
for_each_lru(lru) {
if (!(BIT(lru) & lru_mask))
continue;
- mz = mem_cgroup_nodeinfo(memcg, nid);
- nr += mz->lru_size[lru];
+ nr += mem_cgroup_get_lru_size(lruvec, lru);
}
return nr;
}
@@ -1002,6 +1001,7 @@ out:
* mem_cgroup_update_lru_size - account for adding or removing an lru page
* @lruvec: mem_cgroup per zone lru vector
* @lru: index of lru list the page is sitting on
+ * @zid: zone id of the accounted pages
* @nr_pages: positive when adding or negative when removing
*
* This function must be called under lru_lock, just before a page is added
@@ -1009,27 +1009,25 @@ out:
* so as to allow it to check that lru_size 0 is consistent with list_empty).
*/
void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
- int nr_pages)
+ int zid, int nr_pages)
{
struct mem_cgroup_per_node *mz;
unsigned long *lru_size;
long size;
- bool empty;
if (mem_cgroup_disabled())
return;
mz = container_of(lruvec, struct mem_cgroup_per_node, lruvec);
- lru_size = mz->lru_size + lru;
- empty = list_empty(lruvec->lists + lru);
+ lru_size = &mz->lru_zone_size[zid][lru];
if (nr_pages < 0)
*lru_size += nr_pages;
size = *lru_size;
- if (WARN_ONCE(size < 0 || empty != !size,
- "%s(%p, %d, %d): lru_size %ld but %sempty\n",
- __func__, lruvec, lru, nr_pages, size, empty ? "" : "not ")) {
+ if (WARN_ONCE(size < 0,
+ "%s(%p, %d, %d): lru_size %ld\n",
+ __func__, lruvec, lru, nr_pages, size)) {
VM_BUG_ON(1);
*lru_size = 0;
}
@@ -4355,9 +4353,9 @@ static int mem_cgroup_do_precharge(unsigned long count)
return ret;
}
- /* Try charges one by one with reclaim */
+ /* Try charges one by one with reclaim, but do not retry */
while (count--) {
- ret = try_charge(mc.to, GFP_KERNEL & ~__GFP_NORETRY, 1);
+ ret = try_charge(mc.to, GFP_KERNEL | __GFP_NORETRY, 1);
if (ret)
return ret;
mc.precharge++;
diff --git a/mm/memory.c b/mm/memory.c
index 9f2c15c..6bf2b471 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3772,8 +3772,8 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
}
#endif /* __PAGETABLE_PMD_FOLDED */
-static int __follow_pte(struct mm_struct *mm, unsigned long address,
- pte_t **ptepp, spinlock_t **ptlp)
+static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{
pgd_t *pgd;
pud_t *pud;
@@ -3790,11 +3790,20 @@ static int __follow_pte(struct mm_struct *mm, unsigned long address,
pmd = pmd_offset(pud, address);
VM_BUG_ON(pmd_trans_huge(*pmd));
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- goto out;
- /* We cannot handle huge page PFN maps. Luckily they don't exist. */
- if (pmd_huge(*pmd))
+ if (pmd_huge(*pmd)) {
+ if (!pmdpp)
+ goto out;
+
+ *ptlp = pmd_lock(mm, pmd);
+ if (pmd_huge(*pmd)) {
+ *pmdpp = pmd;
+ return 0;
+ }
+ spin_unlock(*ptlp);
+ }
+
+ if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
goto out;
ptep = pte_offset_map_lock(mm, pmd, address, ptlp);
@@ -3810,16 +3819,30 @@ out:
return -EINVAL;
}
-int follow_pte(struct mm_struct *mm, unsigned long address, pte_t **ptepp,
- spinlock_t **ptlp)
+static inline int follow_pte(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, spinlock_t **ptlp)
+{
+ int res;
+
+ /* (void) is needed to make gcc happy */
+ (void) __cond_lock(*ptlp,
+ !(res = __follow_pte_pmd(mm, address, ptepp, NULL,
+ ptlp)));
+ return res;
+}
+
+int follow_pte_pmd(struct mm_struct *mm, unsigned long address,
+ pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp)
{
int res;
/* (void) is needed to make gcc happy */
(void) __cond_lock(*ptlp,
- !(res = __follow_pte(mm, address, ptepp, ptlp)));
+ !(res = __follow_pte_pmd(mm, address, ptepp, pmdpp,
+ ptlp)));
return res;
}
+EXPORT_SYMBOL(follow_pte_pmd);
/**
* follow_pfn - look up PFN at a user virtual address
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index e43142c1..ca2723d 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1033,36 +1033,39 @@ static void node_states_set_node(int node, struct memory_notify *arg)
node_set_state(node, N_MEMORY);
}
-int zone_can_shift(unsigned long pfn, unsigned long nr_pages,
- enum zone_type target)
+bool zone_can_shift(unsigned long pfn, unsigned long nr_pages,
+ enum zone_type target, int *zone_shift)
{
struct zone *zone = page_zone(pfn_to_page(pfn));
enum zone_type idx = zone_idx(zone);
int i;
+ *zone_shift = 0;
+
if (idx < target) {
/* pages must be at end of current zone */
if (pfn + nr_pages != zone_end_pfn(zone))
- return 0;
+ return false;
/* no zones in use between current zone and target */
for (i = idx + 1; i < target; i++)
if (zone_is_initialized(zone - idx + i))
- return 0;
+ return false;
}
if (target < idx) {
/* pages must be at beginning of current zone */
if (pfn != zone->zone_start_pfn)
- return 0;
+ return false;
/* no zones in use between current zone and target */
for (i = target + 1; i < idx; i++)
if (zone_is_initialized(zone - idx + i))
- return 0;
+ return false;
}
- return target - idx;
+ *zone_shift = target - idx;
+ return true;
}
/* Must be protected by mem_hotplug_begin() */
@@ -1089,10 +1092,13 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
!can_online_high_movable(zone))
return -EINVAL;
- if (online_type == MMOP_ONLINE_KERNEL)
- zone_shift = zone_can_shift(pfn, nr_pages, ZONE_NORMAL);
- else if (online_type == MMOP_ONLINE_MOVABLE)
- zone_shift = zone_can_shift(pfn, nr_pages, ZONE_MOVABLE);
+ if (online_type == MMOP_ONLINE_KERNEL) {
+ if (!zone_can_shift(pfn, nr_pages, ZONE_NORMAL, &zone_shift))
+ return -EINVAL;
+ } else if (online_type == MMOP_ONLINE_MOVABLE) {
+ if (!zone_can_shift(pfn, nr_pages, ZONE_MOVABLE, &zone_shift))
+ return -EINVAL;
+ }
zone = move_pfn_range(zone_shift, pfn, pfn + nr_pages);
if (!zone)
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2e34664..1e7873e 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -2017,8 +2017,8 @@ retry_cpuset:
nmask = policy_nodemask(gfp, pol);
zl = policy_zonelist(gfp, pol, node);
- mpol_cond_put(pol);
page = __alloc_pages_nodemask(gfp, order, zl, nmask);
+ mpol_cond_put(pol);
out:
if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
goto retry_cpuset;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2c6d5f6..f3e0c69 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1864,14 +1864,14 @@ int move_freepages(struct zone *zone,
#endif
for (page = start_page; page <= end_page;) {
- /* Make sure we are not inadvertently changing nodes */
- VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
-
if (!pfn_valid_within(page_to_pfn(page))) {
page++;
continue;
}
+ /* Make sure we are not inadvertently changing nodes */
+ VM_BUG_ON_PAGE(page_to_nid(page) != zone_to_nid(zone), page);
+
if (!PageBuddy(page)) {
page++;
continue;
@@ -2583,30 +2583,22 @@ int __isolate_free_page(struct page *page, unsigned int order)
* Update NUMA hit/miss statistics
*
* Must be called with interrupts disabled.
- *
- * When __GFP_OTHER_NODE is set assume the node of the preferred
- * zone is the local node. This is useful for daemons who allocate
- * memory on behalf of other processes.
*/
-static inline void zone_statistics(struct zone *preferred_zone, struct zone *z,
- gfp_t flags)
+static inline void zone_statistics(struct zone *preferred_zone, struct zone *z)
{
#ifdef CONFIG_NUMA
- int local_nid = numa_node_id();
enum zone_stat_item local_stat = NUMA_LOCAL;
- if (unlikely(flags & __GFP_OTHER_NODE)) {
+ if (z->node != numa_node_id())
local_stat = NUMA_OTHER;
- local_nid = preferred_zone->node;
- }
- if (z->node == local_nid) {
+ if (z->node == preferred_zone->node)
__inc_zone_state(z, NUMA_HIT);
- __inc_zone_state(z, local_stat);
- } else {
+ else {
__inc_zone_state(z, NUMA_MISS);
__inc_zone_state(preferred_zone, NUMA_FOREIGN);
}
+ __inc_zone_state(z, local_stat);
#endif
}
@@ -2674,7 +2666,7 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
}
__count_zid_vm_events(PGALLOC, page_zonenum(page), 1 << order);
- zone_statistics(preferred_zone, zone, gfp_flags);
+ zone_statistics(preferred_zone, zone);
local_irq_restore(flags);
VM_BUG_ON_PAGE(bad_range(zone, page), page);
@@ -3531,12 +3523,13 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
struct page *page = NULL;
unsigned int alloc_flags;
unsigned long did_some_progress;
- enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
+ enum compact_priority compact_priority;
enum compact_result compact_result;
- int compaction_retries = 0;
- int no_progress_loops = 0;
+ int compaction_retries;
+ int no_progress_loops;
unsigned long alloc_start = jiffies;
unsigned int stall_timeout = 10 * HZ;
+ unsigned int cpuset_mems_cookie;
/*
* In the slowpath, we sanity check order to avoid ever trying to
@@ -3557,6 +3550,23 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
(__GFP_ATOMIC|__GFP_DIRECT_RECLAIM)))
gfp_mask &= ~__GFP_ATOMIC;
+retry_cpuset:
+ compaction_retries = 0;
+ no_progress_loops = 0;
+ compact_priority = DEF_COMPACT_PRIORITY;
+ cpuset_mems_cookie = read_mems_allowed_begin();
+ /*
+ * We need to recalculate the starting point for the zonelist iterator
+ * because we might have used different nodemask in the fast path, or
+ * there was a cpuset modification and we are retrying - otherwise we
+ * could end up iterating over non-eligible zones endlessly.
+ */
+ ac->preferred_zoneref = first_zones_zonelist(ac->zonelist,
+ ac->high_zoneidx, ac->nodemask);
+ if (!ac->preferred_zoneref->zone)
+ goto nopage;
+
+
/*
* The fast path uses conservative alloc_flags to succeed only until
* kswapd needs to be woken up, and to avoid the cost of setting up
@@ -3716,6 +3726,13 @@ retry:
&compaction_retries))
goto retry;
+ /*
+ * It's possible we raced with cpuset update so the OOM would be
+ * premature (see below the nopage: label for full explanation).
+ */
+ if (read_mems_allowed_retry(cpuset_mems_cookie))
+ goto retry_cpuset;
+
/* Reclaim has failed us, start killing things */
page = __alloc_pages_may_oom(gfp_mask, order, ac, &did_some_progress);
if (page)
@@ -3728,6 +3745,16 @@ retry:
}
nopage:
+ /*
+ * When updating a task's mems_allowed or mempolicy nodemask, it is
+ * possible to race with parallel threads in such a way that our
+ * allocation can fail while the mask is being updated. If we are about
+ * to fail, check if the cpuset changed during allocation and if so,
+ * retry.
+ */
+ if (read_mems_allowed_retry(cpuset_mems_cookie))
+ goto retry_cpuset;
+
warn_alloc(gfp_mask,
"page allocation failure: order:%u", order);
got_pg:
@@ -3742,7 +3769,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, nodemask_t *nodemask)
{
struct page *page;
- unsigned int cpuset_mems_cookie;
unsigned int alloc_flags = ALLOC_WMARK_LOW;
gfp_t alloc_mask = gfp_mask; /* The gfp_t that was actually used for allocation */
struct alloc_context ac = {
@@ -3779,9 +3805,6 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (IS_ENABLED(CONFIG_CMA) && ac.migratetype == MIGRATE_MOVABLE)
alloc_flags |= ALLOC_CMA;
-retry_cpuset:
- cpuset_mems_cookie = read_mems_allowed_begin();
-
/* Dirty zone balancing only done in the fast path */
ac.spread_dirty_pages = (gfp_mask & __GFP_WRITE);
@@ -3792,8 +3815,13 @@ retry_cpuset:
*/
ac.preferred_zoneref = first_zones_zonelist(ac.zonelist,
ac.high_zoneidx, ac.nodemask);
- if (!ac.preferred_zoneref) {
+ if (!ac.preferred_zoneref->zone) {
page = NULL;
+ /*
+ * This might be due to race with cpuset_current_mems_allowed
+ * update, so make sure we retry with original nodemask in the
+ * slow path.
+ */
goto no_zone;
}
@@ -3802,6 +3830,7 @@ retry_cpuset:
if (likely(page))
goto out;
+no_zone:
/*
* Runtime PM, block IO and its error handling path can deadlock
* because I/O on the device might not complete.
@@ -3813,21 +3842,10 @@ retry_cpuset:
* Restore the original nodemask if it was potentially replaced with
* &cpuset_current_mems_allowed to optimize the fast-path attempt.
*/
- if (cpusets_enabled())
+ if (unlikely(ac.nodemask != nodemask))
ac.nodemask = nodemask;
- page = __alloc_pages_slowpath(alloc_mask, order, &ac);
-no_zone:
- /*
- * When updating a task's mems_allowed, it is possible to race with
- * parallel threads in such a way that an allocation can fail while
- * the mask is being updated. If a page allocation is about to fail,
- * check if the cpuset changed during allocation and if so, retry.
- */
- if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie))) {
- alloc_mask = gfp_mask;
- goto retry_cpuset;
- }
+ page = __alloc_pages_slowpath(alloc_mask, order, &ac);
out:
if (memcg_kmem_enabled() && (gfp_mask & __GFP_ACCOUNT) && page &&
@@ -3904,8 +3922,8 @@ EXPORT_SYMBOL(free_pages);
* drivers to provide a backing region of memory for use as either an
* sk_buff->head, or to be used in the "frags" portion of skb_shared_info.
*/
-static struct page *__page_frag_refill(struct page_frag_cache *nc,
- gfp_t gfp_mask)
+static struct page *__page_frag_cache_refill(struct page_frag_cache *nc,
+ gfp_t gfp_mask)
{
struct page *page = NULL;
gfp_t gfp = gfp_mask;
@@ -3925,22 +3943,23 @@ static struct page *__page_frag_refill(struct page_frag_cache *nc,
return page;
}
-void __page_frag_drain(struct page *page, unsigned int order,
- unsigned int count)
+void __page_frag_cache_drain(struct page *page, unsigned int count)
{
VM_BUG_ON_PAGE(page_ref_count(page) == 0, page);
if (page_ref_sub_and_test(page, count)) {
+ unsigned int order = compound_order(page);
+
if (order == 0)
free_hot_cold_page(page, false);
else
__free_pages_ok(page, order);
}
}
-EXPORT_SYMBOL(__page_frag_drain);
+EXPORT_SYMBOL(__page_frag_cache_drain);
-void *__alloc_page_frag(struct page_frag_cache *nc,
- unsigned int fragsz, gfp_t gfp_mask)
+void *page_frag_alloc(struct page_frag_cache *nc,
+ unsigned int fragsz, gfp_t gfp_mask)
{
unsigned int size = PAGE_SIZE;
struct page *page;
@@ -3948,7 +3967,7 @@ void *__alloc_page_frag(struct page_frag_cache *nc,
if (unlikely(!nc->va)) {
refill:
- page = __page_frag_refill(nc, gfp_mask);
+ page = __page_frag_cache_refill(nc, gfp_mask);
if (!page)
return NULL;
@@ -3991,19 +4010,19 @@ refill:
return nc->va + offset;
}
-EXPORT_SYMBOL(__alloc_page_frag);
+EXPORT_SYMBOL(page_frag_alloc);
/*
* Frees a page fragment allocated out of either a compound or order 0 page.
*/
-void __free_page_frag(void *addr)
+void page_frag_free(void *addr)
{
struct page *page = virt_to_head_page(addr);
if (unlikely(put_page_testzero(page)))
__free_pages_ok(page, compound_order(page));
}
-EXPORT_SYMBOL(__free_page_frag);
+EXPORT_SYMBOL(page_frag_free);
static void *make_alloc_exact(unsigned long addr, unsigned int order,
size_t size)
@@ -7255,6 +7274,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
.zone = page_zone(pfn_to_page(start)),
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
+ .gfp_mask = GFP_KERNEL,
};
INIT_LIST_HEAD(&cc.migratepages);
diff --git a/mm/slab.c b/mm/slab.c
index 29bc6c0..4f2ec6b 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2457,7 +2457,6 @@ union freelist_init_state {
unsigned int pos;
unsigned int *list;
unsigned int count;
- unsigned int rand;
};
struct rnd_state rnd_state;
};
@@ -2483,8 +2482,7 @@ static bool freelist_state_initialize(union freelist_init_state *state,
} else {
state->list = cachep->random_seq;
state->count = count;
- state->pos = 0;
- state->rand = rand;
+ state->pos = rand % count;
ret = true;
}
return ret;
@@ -2493,7 +2491,9 @@ static bool freelist_state_initialize(union freelist_init_state *state,
/* Get the next entry on the list and randomize it using a random shift */
static freelist_idx_t next_random_slot(union freelist_init_state *state)
{
- return (state->list[state->pos++] + state->rand) % state->count;
+ if (state->pos >= state->count)
+ state->pos = 0;
+ return state->list[state->pos++];
}
/* Swap two freelist entries */
diff --git a/mm/slub.c b/mm/slub.c
index 067598a..7aa6f43 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -496,10 +496,11 @@ static inline int check_valid_pointer(struct kmem_cache *s,
return 1;
}
-static void print_section(char *text, u8 *addr, unsigned int length)
+static void print_section(char *level, char *text, u8 *addr,
+ unsigned int length)
{
metadata_access_enable();
- print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
+ print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
length, 1);
metadata_access_disable();
}
@@ -636,14 +637,15 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
p, p - addr, get_freepointer(s, p));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p - s->red_left_pad, s->red_left_pad);
+ print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
+ s->red_left_pad);
else if (p > addr + 16)
- print_section("Bytes b4 ", p - 16, 16);
+ print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
- print_section("Object ", p, min_t(unsigned long, s->object_size,
- PAGE_SIZE));
+ print_section(KERN_ERR, "Object ", p,
+ min_t(unsigned long, s->object_size, PAGE_SIZE));
if (s->flags & SLAB_RED_ZONE)
- print_section("Redzone ", p + s->object_size,
+ print_section(KERN_ERR, "Redzone ", p + s->object_size,
s->inuse - s->object_size);
if (s->offset)
@@ -658,7 +660,8 @@ static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
if (off != size_from_object(s))
/* Beginning of the filler is the free pointer */
- print_section("Padding ", p + off, size_from_object(s) - off);
+ print_section(KERN_ERR, "Padding ", p + off,
+ size_from_object(s) - off);
dump_stack();
}
@@ -820,7 +823,7 @@ static int slab_pad_check(struct kmem_cache *s, struct page *page)
end--;
slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
- print_section("Padding ", end - remainder, remainder);
+ print_section(KERN_ERR, "Padding ", end - remainder, remainder);
restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
return 0;
@@ -973,7 +976,7 @@ static void trace(struct kmem_cache *s, struct page *page, void *object,
page->freelist);
if (!alloc)
- print_section("Object ", (void *)object,
+ print_section(KERN_INFO, "Object ", (void *)object,
s->object_size);
dump_stack();
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 1c6e032..4761701 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -943,11 +943,25 @@ bool reuse_swap_page(struct page *page, int *total_mapcount)
count = page_trans_huge_mapcount(page, total_mapcount);
if (count <= 1 && PageSwapCache(page)) {
count += page_swapcount(page);
- if (count == 1 && !PageWriteback(page)) {
+ if (count != 1)
+ goto out;
+ if (!PageWriteback(page)) {
delete_from_swap_cache(page);
SetPageDirty(page);
+ } else {
+ swp_entry_t entry;
+ struct swap_info_struct *p;
+
+ entry.val = page_private(page);
+ p = swap_info_get(entry);
+ if (p->flags & SWP_STABLE_WRITES) {
+ spin_unlock(&p->lock);
+ return false;
+ }
+ spin_unlock(&p->lock);
}
}
+out:
return count <= 1;
}
@@ -2448,6 +2462,10 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
error = -ENOMEM;
goto bad_swap;
}
+
+ if (bdi_cap_stable_pages_required(inode_to_bdi(inode)))
+ p->flags |= SWP_STABLE_WRITES;
+
if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) {
int cpu;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 6aa5b01..532a2a7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -242,6 +242,16 @@ unsigned long lruvec_lru_size(struct lruvec *lruvec, enum lru_list lru)
return node_page_state(lruvec_pgdat(lruvec), NR_LRU_BASE + lru);
}
+unsigned long lruvec_zone_lru_size(struct lruvec *lruvec, enum lru_list lru,
+ int zone_idx)
+{
+ if (!mem_cgroup_disabled())
+ return mem_cgroup_get_zone_lru_size(lruvec, lru, zone_idx);
+
+ return zone_page_state(&lruvec_pgdat(lruvec)->node_zones[zone_idx],
+ NR_ZONE_LRU_BASE + lru);
+}
+
/*
* Add a shrinker callback to be called from the vm.
*/
@@ -1382,8 +1392,7 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode)
* be complete before mem_cgroup_update_lru_size due to a santity check.
*/
static __always_inline void update_lru_sizes(struct lruvec *lruvec,
- enum lru_list lru, unsigned long *nr_zone_taken,
- unsigned long nr_taken)
+ enum lru_list lru, unsigned long *nr_zone_taken)
{
int zid;
@@ -1392,11 +1401,11 @@ static __always_inline void update_lru_sizes(struct lruvec *lruvec,
continue;
__update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
- }
-
#ifdef CONFIG_MEMCG
- mem_cgroup_update_lru_size(lruvec, lru, -nr_taken);
+ mem_cgroup_update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]);
#endif
+ }
+
}
/*
@@ -1501,7 +1510,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
*nr_scanned = scan;
trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, scan,
nr_taken, mode, is_file_lru(lru));
- update_lru_sizes(lruvec, lru, nr_zone_taken, nr_taken);
+ update_lru_sizes(lruvec, lru, nr_zone_taken);
return nr_taken;
}
@@ -2047,10 +2056,8 @@ static bool inactive_list_is_low(struct lruvec *lruvec, bool file,
if (!managed_zone(zone))
continue;
- inactive_zone = zone_page_state(zone,
- NR_ZONE_LRU_BASE + (file * LRU_FILE));
- active_zone = zone_page_state(zone,
- NR_ZONE_LRU_BASE + (file * LRU_FILE) + LRU_ACTIVE);
+ inactive_zone = lruvec_zone_lru_size(lruvec, file * LRU_FILE, zid);
+ active_zone = lruvec_zone_lru_size(lruvec, (file * LRU_FILE) + LRU_ACTIVE, zid);
inactive -= min(inactive, inactive_zone);
active -= min(active, active_zone);
diff --git a/net/6lowpan/nhc.c b/net/6lowpan/nhc.c
index 7008d53..4fa2fdd 100644
--- a/net/6lowpan/nhc.c
+++ b/net/6lowpan/nhc.c
@@ -27,8 +27,8 @@ static int lowpan_nhc_insert(struct lowpan_nhc *nhc)
/* Figure out where to put new node */
while (*new) {
- struct lowpan_nhc *this = container_of(*new, struct lowpan_nhc,
- node);
+ struct lowpan_nhc *this = rb_entry(*new, struct lowpan_nhc,
+ node);
int result, len_dif, len;
len_dif = nhc->idlen - this->idlen;
@@ -69,8 +69,8 @@ static struct lowpan_nhc *lowpan_nhc_by_nhcid(const struct sk_buff *skb)
const u8 *nhcid_skb_ptr = skb->data;
while (node) {
- struct lowpan_nhc *nhc = container_of(node, struct lowpan_nhc,
- node);
+ struct lowpan_nhc *nhc = rb_entry(node, struct lowpan_nhc,
+ node);
u8 nhcid_skb_ptr_masked[LOWPAN_NHC_MAX_ID_LEN];
int result, i;
diff --git a/net/Kconfig b/net/Kconfig
index 2e9ee61..ce4aee6 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -259,10 +259,6 @@ config XPS
config HWBM
bool
-config SOCK_CGROUP_DATA
- bool
- default n
-
config CGROUP_NET_PRIO
bool "Network priority cgroup"
depends on CGROUPS
@@ -394,6 +390,7 @@ source "net/9p/Kconfig"
source "net/caif/Kconfig"
source "net/ceph/Kconfig"
source "net/nfc/Kconfig"
+source "net/psample/Kconfig"
config LWTUNNEL
bool "Network light weight tunnels"
diff --git a/net/Makefile b/net/Makefile
index 5d6e0e5f..7d41de4 100644
--- a/net/Makefile
+++ b/net/Makefile
@@ -70,6 +70,7 @@ obj-$(CONFIG_DNS_RESOLVER) += dns_resolver/
obj-$(CONFIG_CEPH_LIB) += ceph/
obj-$(CONFIG_BATMAN_ADV) += batman-adv/
obj-$(CONFIG_NFC) += nfc/
+obj-$(CONFIG_PSAMPLE) += psample/
obj-$(CONFIG_OPENVSWITCH) += openvswitch/
obj-$(CONFIG_VSOCKETS) += vmw_vsock/
obj-$(CONFIG_MPLS) += mpls/
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 4855d18..038b109 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,7 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
{
ax25_clear_queues(ax25);
- if (!sock_flag(ax25->sk, SOCK_DESTROY))
+ if (!ax25->sk || !sock_flag(ax25->sk, SOCK_DESTROY))
ax25_stop_heartbeat(ax25);
ax25_stop_t1timer(ax25);
ax25_stop_t2timer(ax25);
diff --git a/net/batman-adv/Makefile b/net/batman-adv/Makefile
index f724d3c..915987b 100644
--- a/net/batman-adv/Makefile
+++ b/net/batman-adv/Makefile
@@ -1,5 +1,5 @@
#
-# Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+# Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
#
# Marek Lindner, Simon Wunderlich
#
diff --git a/net/batman-adv/bat_algo.c b/net/batman-adv/bat_algo.c
index 623d043..44fd073 100644
--- a/net/batman-adv/bat_algo.c
+++ b/net/batman-adv/bat_algo.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_algo.h b/net/batman-adv/bat_algo.h
index 3b5b69c..29f6312 100644
--- a/net/batman-adv/bat_algo.h
+++ b/net/batman-adv/bat_algo.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Linus Lüssing
*
diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
index f00f666..7c3d994e 100644
--- a/net/batman-adv/bat_iv_ogm.c
+++ b/net/batman-adv/bat_iv_ogm.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_iv_ogm.h b/net/batman-adv/bat_iv_ogm.h
index b9f3550..ae2ab52 100644
--- a/net/batman-adv/bat_iv_ogm.h
+++ b/net/batman-adv/bat_iv_ogm.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/bat_v.c b/net/batman-adv/bat_v.c
index 2ac612d7..0acd081 100644
--- a/net/batman-adv/bat_v.c
+++ b/net/batman-adv/bat_v.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*
diff --git a/net/batman-adv/bat_v.h b/net/batman-adv/bat_v.h
index 83b7763..dd7c4b6 100644
--- a/net/batman-adv/bat_v.h
+++ b/net/batman-adv/bat_v.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Linus Lüssing
*
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index f2fb2f0..b90c990 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*
diff --git a/net/batman-adv/bat_v_elp.h b/net/batman-adv/bat_v_elp.h
index be17c0b..376ead2 100644
--- a/net/batman-adv/bat_v_elp.h
+++ b/net/batman-adv/bat_v_elp.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing, Marek Lindner
*
diff --git a/net/batman-adv/bat_v_ogm.c b/net/batman-adv/bat_v_ogm.c
index 38b9aab..03a35c9 100644
--- a/net/batman-adv/bat_v_ogm.c
+++ b/net/batman-adv/bat_v_ogm.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
diff --git a/net/batman-adv/bat_v_ogm.h b/net/batman-adv/bat_v_ogm.h
index 4c4d45c..2068770 100644
--- a/net/batman-adv/bat_v_ogm.h
+++ b/net/batman-adv/bat_v_ogm.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
diff --git a/net/batman-adv/bitarray.c b/net/batman-adv/bitarray.c
index 0322714..2b070c7 100644
--- a/net/batman-adv/bitarray.c
+++ b/net/batman-adv/bitarray.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bitarray.h b/net/batman-adv/bitarray.h
index 0e6e9d0..cc262c9 100644
--- a/net/batman-adv/bitarray.h
+++ b/net/batman-adv/bitarray.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index e7f690b..ba8420d 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -449,7 +449,6 @@ static void batadv_bla_send_claim(struct batadv_priv *bat_priv, u8 *mac,
batadv_inc_counter(bat_priv, BATADV_CNT_RX);
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- soft_iface->last_rx = jiffies;
netif_rx(skb);
out:
diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h
index 1ae93e4..e157986 100644
--- a/net/batman-adv/bridge_loop_avoidance.h
+++ b/net/batman-adv/bridge_loop_avoidance.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
@@ -20,6 +20,8 @@
#include "main.h"
+#include <linux/compiler.h>
+#include <linux/stddef.h>
#include <linux/types.h>
struct net_device;
@@ -27,6 +29,22 @@ struct netlink_callback;
struct seq_file;
struct sk_buff;
+/**
+ * batadv_bla_is_loopdetect_mac - check if the mac address is from a loop detect
+ * frame sent by bridge loop avoidance
+ * @mac: mac address to check
+ *
+ * Return: true if the it looks like a loop detect frame
+ * (mac starts with BA:BE), false otherwise
+ */
+static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac)
+{
+ if (mac[0] == 0xba && mac[1] == 0xbe)
+ return true;
+
+ return false;
+}
+
#ifdef CONFIG_BATMAN_ADV_BLA
bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb,
unsigned short vid, bool is_bcast);
diff --git a/net/batman-adv/debugfs.c b/net/batman-adv/debugfs.c
index 7792550..e32ad47 100644
--- a/net/batman-adv/debugfs.c
+++ b/net/batman-adv/debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
@@ -19,7 +19,7 @@
#include "main.h"
#include <linux/debugfs.h>
-#include <linux/device.h>
+#include <linux/err.h>
#include <linux/errno.h>
#include <linux/export.h>
#include <linux/fs.h>
diff --git a/net/batman-adv/debugfs.h b/net/batman-adv/debugfs.h
index e49121e..9c5d4a6 100644
--- a/net/batman-adv/debugfs.h
+++ b/net/batman-adv/debugfs.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index 49576c5..1bfd1db 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
@@ -1050,7 +1050,6 @@ bool batadv_dat_snoop_outgoing_arp_request(struct batadv_priv *bat_priv,
bat_priv->soft_iface);
bat_priv->stats.rx_packets++;
bat_priv->stats.rx_bytes += skb->len + ETH_HLEN + hdr_size;
- bat_priv->soft_iface->last_rx = jiffies;
netif_rx(skb_new);
batadv_dbg(BATADV_DBG_DAT, bat_priv, "ARP request replied locally\n");
diff --git a/net/batman-adv/distributed-arp-table.h b/net/batman-adv/distributed-arp-table.h
index 813ecea..ec364a3 100644
--- a/net/batman-adv/distributed-arp-table.h
+++ b/net/batman-adv/distributed-arp-table.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2011-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2011-2017 B.A.T.M.A.N. contributors:
*
* Antonio Quartulli
*
diff --git a/net/batman-adv/fragmentation.c b/net/batman-adv/fragmentation.c
index 9c561e6..ead18ca 100644
--- a/net/batman-adv/fragmentation.c
+++ b/net/batman-adv/fragmentation.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
@@ -474,7 +474,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
primary_if = batadv_primary_if_get_selected(bat_priv);
if (!primary_if) {
ret = -EINVAL;
- goto put_primary_if;
+ goto free_skb;
}
/* Create one header to be copied to all fragments */
@@ -502,7 +502,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
skb_fragment = batadv_frag_create(skb, &frag_header, mtu);
if (!skb_fragment) {
ret = -ENOMEM;
- goto free_skb;
+ goto put_primary_if;
}
batadv_inc_counter(bat_priv, BATADV_CNT_FRAG_TX);
@@ -511,7 +511,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
ret = batadv_send_unicast_skb(skb_fragment, neigh_node);
if (ret != NET_XMIT_SUCCESS) {
ret = NET_XMIT_DROP;
- goto free_skb;
+ goto put_primary_if;
}
frag_header.no++;
@@ -519,7 +519,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
/* The initial check in this function should cover this case */
if (frag_header.no == BATADV_FRAG_MAX_FRAGMENTS - 1) {
ret = -EINVAL;
- goto free_skb;
+ goto put_primary_if;
}
}
@@ -527,7 +527,7 @@ int batadv_frag_send_packet(struct sk_buff *skb,
if (batadv_skb_head_push(skb, header_size) < 0 ||
pskb_expand_head(skb, header_size + ETH_HLEN, 0, GFP_ATOMIC) < 0) {
ret = -ENOMEM;
- goto free_skb;
+ goto put_primary_if;
}
memcpy(skb->data, &frag_header, header_size);
diff --git a/net/batman-adv/fragmentation.h b/net/batman-adv/fragmentation.h
index b95f619..1a2d6c3 100644
--- a/net/batman-adv/fragmentation.h
+++ b/net/batman-adv/fragmentation.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2013-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2013-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll <martin@hundeboll.net>
*
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index 52b8bd6..de9955d 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_client.h b/net/batman-adv/gateway_client.h
index 859166d..3baa3d4 100644
--- a/net/batman-adv/gateway_client.h
+++ b/net/batman-adv/gateway_client.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.c b/net/batman-adv/gateway_common.c
index 2118481..5db2e43 100644
--- a/net/batman-adv/gateway_common.c
+++ b/net/batman-adv/gateway_common.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/gateway_common.h b/net/batman-adv/gateway_common.h
index 8a5e1dd..0a6a97d 100644
--- a/net/batman-adv/gateway_common.h
+++ b/net/batman-adv/gateway_common.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 61a431a..e348f76 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/hard-interface.h b/net/batman-adv/hard-interface.h
index d6309a4..9f9890f 100644
--- a/net/batman-adv/hard-interface.h
+++ b/net/batman-adv/hard-interface.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/hash.c b/net/batman-adv/hash.c
index a0a0fdb..b5f7e13 100644
--- a/net/batman-adv/hash.c
+++ b/net/batman-adv/hash.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/hash.h b/net/batman-adv/hash.h
index 557a704..0c905e9 100644
--- a/net/batman-adv/hash.h
+++ b/net/batman-adv/hash.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2006-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2006-2017 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich, Marek Lindner
*
diff --git a/net/batman-adv/icmp_socket.c b/net/batman-adv/icmp_socket.c
index b310f381a..6308c9f 100644
--- a/net/batman-adv/icmp_socket.c
+++ b/net/batman-adv/icmp_socket.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/icmp_socket.h b/net/batman-adv/icmp_socket.h
index e44a7da..f3fec40 100644
--- a/net/batman-adv/icmp_socket.h
+++ b/net/batman-adv/icmp_socket.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/log.c b/net/batman-adv/log.c
index c73c317..4ef4bde 100644
--- a/net/batman-adv/log.c
+++ b/net/batman-adv/log.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/log.h b/net/batman-adv/log.h
index 3284a7b..7a2b9f4 100644
--- a/net/batman-adv/log.h
+++ b/net/batman-adv/log.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c
index d46415e..5000c54 100644
--- a/net/batman-adv/main.c
+++ b/net/batman-adv/main.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h
index a6cc804..57a8103 100644
--- a/net/batman-adv/main.h
+++ b/net/batman-adv/main.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -24,7 +24,7 @@
#define BATADV_DRIVER_DEVICE "batman-adv"
#ifndef BATADV_SOURCE_VERSION
-#define BATADV_SOURCE_VERSION "2016.5"
+#define BATADV_SOURCE_VERSION "2017.0"
#endif
/* B.A.T.M.A.N. parameters */
diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c
index 090a69f..952ba81 100644
--- a/net/batman-adv/multicast.c
+++ b/net/batman-adv/multicast.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*
diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h
index 2cddaf5..2a78cdd 100644
--- a/net/batman-adv/multicast.h
+++ b/net/batman-adv/multicast.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2014-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2014-2017 B.A.T.M.A.N. contributors:
*
* Linus Lüssing
*
diff --git a/net/batman-adv/netlink.c b/net/batman-adv/netlink.c
index 0627381..ab13b4d 100644
--- a/net/batman-adv/netlink.c
+++ b/net/batman-adv/netlink.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
diff --git a/net/batman-adv/netlink.h b/net/batman-adv/netlink.h
index 52eb162..f1cd8c5 100644
--- a/net/batman-adv/netlink.h
+++ b/net/batman-adv/netlink.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2017 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index ab5a3bf..e1f6fc7 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*
diff --git a/net/batman-adv/network-coding.h b/net/batman-adv/network-coding.h
index d6d7fb4..c66efb8 100644
--- a/net/batman-adv/network-coding.h
+++ b/net/batman-adv/network-coding.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Martin Hundebøll, Jeppe Ledet-Pedersen
*
diff --git a/net/batman-adv/originator.c b/net/batman-adv/originator.c
index 8f3b296..8e2a4b2 100644
--- a/net/batman-adv/originator.c
+++ b/net/batman-adv/originator.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2009-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2009-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/originator.h b/net/batman-adv/originator.h
index ebc5618..d94220a 100644
--- a/net/batman-adv/originator.h
+++ b/net/batman-adv/originator.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/packet.h b/net/batman-adv/packet.h
index 7a36bcf..8e8a5db 100644
--- a/net/batman-adv/packet.h
+++ b/net/batman-adv/packet.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c
index 6713bdf..7fd740b 100644
--- a/net/batman-adv/routing.c
+++ b/net/batman-adv/routing.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -719,20 +719,19 @@ static int batadv_route_unicast_packet(struct sk_buff *skb,
len = skb->len;
res = batadv_send_skb_to_orig(skb, orig_node, recv_if);
- if (res == NET_XMIT_SUCCESS)
- ret = NET_RX_SUCCESS;
-
- /* skb was consumed */
- skb = NULL;
/* translate transmit result into receive result */
if (res == NET_XMIT_SUCCESS) {
+ ret = NET_RX_SUCCESS;
/* skb was transmitted and consumed */
batadv_inc_counter(bat_priv, BATADV_CNT_FORWARD);
batadv_add_counter(bat_priv, BATADV_CNT_FORWARD_BYTES,
len + ETH_HLEN);
}
+ /* skb was consumed */
+ skb = NULL;
+
put_orig_node:
batadv_orig_node_put(orig_node);
free_skb:
diff --git a/net/batman-adv/routing.h b/net/batman-adv/routing.h
index 05c3ff4..5ede16c 100644
--- a/net/batman-adv/routing.h
+++ b/net/batman-adv/routing.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/send.c b/net/batman-adv/send.c
index 49021b7..1489ec2 100644
--- a/net/batman-adv/send.c
+++ b/net/batman-adv/send.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -77,6 +77,7 @@ int batadv_send_skb_packet(struct sk_buff *skb,
{
struct batadv_priv *bat_priv;
struct ethhdr *ethhdr;
+ int ret;
bat_priv = netdev_priv(hard_iface->soft_iface);
@@ -115,7 +116,8 @@ int batadv_send_skb_packet(struct sk_buff *skb,
* congestion and traffic shaping, it drops and returns NET_XMIT_DROP
* (which is > 0). This will not be treated as an error.
*/
- return dev_queue_xmit(skb);
+ ret = dev_queue_xmit(skb);
+ return net_xmit_eval(ret);
send_skb_err:
kfree_skb(skb);
return NET_XMIT_DROP;
diff --git a/net/batman-adv/send.h b/net/batman-adv/send.h
index a94e1e8..f21166d 100644
--- a/net/batman-adv/send.h
+++ b/net/batman-adv/send.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 7b3494a..5d099b2 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
@@ -258,7 +258,8 @@ static int batadv_interface_tx(struct sk_buff *skb,
ethhdr = eth_hdr(skb);
/* Register the client MAC in the transtable */
- if (!is_multicast_ether_addr(ethhdr->h_source)) {
+ if (!is_multicast_ether_addr(ethhdr->h_source) &&
+ !batadv_bla_is_loopdetect_mac(ethhdr->h_source)) {
client_added = batadv_tt_local_add(soft_iface, ethhdr->h_source,
vid, skb->skb_iif,
skb->mark);
@@ -481,8 +482,6 @@ void batadv_interface_rx(struct net_device *soft_iface,
batadv_add_counter(bat_priv, BATADV_CNT_RX_BYTES,
skb->len + ETH_HLEN);
- soft_iface->last_rx = jiffies;
-
/* Let the bridge loop avoidance check the packet. If will
* not handle it, we can safely push it up.
*/
diff --git a/net/batman-adv/soft-interface.h b/net/batman-adv/soft-interface.h
index ec303dd..639c3ab 100644
--- a/net/batman-adv/soft-interface.h
+++ b/net/batman-adv/soft-interface.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 17c8441..0ae8b30 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/sysfs.h b/net/batman-adv/sysfs.h
index c76021b..e487412 100644
--- a/net/batman-adv/sysfs.h
+++ b/net/batman-adv/sysfs.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2010-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2010-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
diff --git a/net/batman-adv/tp_meter.c b/net/batman-adv/tp_meter.c
index 981e8c5..c94ebde 100644
--- a/net/batman-adv/tp_meter.c
+++ b/net/batman-adv/tp_meter.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
*
@@ -23,7 +23,7 @@
#include <linux/byteorder/generic.h>
#include <linux/cache.h>
#include <linux/compiler.h>
-#include <linux/device.h>
+#include <linux/err.h>
#include <linux/etherdevice.h>
#include <linux/fs.h>
#include <linux/if_ether.h>
diff --git a/net/batman-adv/tp_meter.h b/net/batman-adv/tp_meter.h
index ba922c4..a8ada5c 100644
--- a/net/batman-adv/tp_meter.h
+++ b/net/batman-adv/tp_meter.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2012-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2012-2017 B.A.T.M.A.N. contributors:
*
* Edo Monticelli, Antonio Quartulli
*
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 30ecbfb..6077a87 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
@@ -3714,7 +3714,6 @@ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
{
struct batadv_hashtable *hash = bat_priv->tt.local_hash;
struct batadv_tt_common_entry *tt_common_entry;
- u16 changed_num = 0;
struct hlist_head *head;
u32 i;
@@ -3736,7 +3735,6 @@ static void batadv_tt_local_set_flags(struct batadv_priv *bat_priv, u16 flags,
continue;
tt_common_entry->flags &= ~flags;
}
- changed_num++;
if (!count)
continue;
diff --git a/net/batman-adv/translation-table.h b/net/batman-adv/translation-table.h
index 783fdba..411d586 100644
--- a/net/batman-adv/translation-table.h
+++ b/net/batman-adv/translation-table.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich, Antonio Quartulli
*
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index a783420..1d9e267 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/tvlv.h b/net/batman-adv/tvlv.h
index e4369b5..4d01400 100644
--- a/net/batman-adv/tvlv.h
+++ b/net/batman-adv/tvlv.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
index e913aee..8f64a5c 100644
--- a/net/batman-adv/types.h
+++ b/net/batman-adv/types.h
@@ -1,4 +1,4 @@
-/* Copyright (C) 2007-2016 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2017 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 7cb41ae..5a1f8ef 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -220,6 +220,31 @@ out:
}
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
+static void maybe_deliver_addr(struct net_bridge_port *p, struct sk_buff *skb,
+ const unsigned char *addr, bool local_orig)
+{
+ struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
+ const unsigned char *src = eth_hdr(skb)->h_source;
+
+ if (!should_deliver(p, skb))
+ return;
+
+ /* Even with hairpin, no soliloquies - prevent breaking IPv6 DAD */
+ if (skb->dev == p->dev && ether_addr_equal(src, addr))
+ return;
+
+ skb = skb_copy(skb, GFP_ATOMIC);
+ if (!skb) {
+ dev->stats.tx_dropped++;
+ return;
+ }
+
+ if (!is_broadcast_ether_addr(addr))
+ memcpy(eth_hdr(skb)->h_dest, addr, ETH_ALEN);
+
+ __br_forward(p, skb, local_orig);
+}
+
/* called with rcu_read_lock */
void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
struct sk_buff *skb,
@@ -241,10 +266,20 @@ void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
NULL;
- port = (unsigned long)lport > (unsigned long)rport ?
- lport : rport;
+ if ((unsigned long)lport > (unsigned long)rport) {
+ port = lport;
+
+ if (port->flags & BR_MULTICAST_TO_UNICAST) {
+ maybe_deliver_addr(lport, skb, p->eth_addr,
+ local_orig);
+ goto delivered;
+ }
+ } else {
+ port = rport;
+ }
prev = maybe_deliver(prev, port, skb, local_orig);
+delivered:
if (IS_ERR(prev))
goto out;
if (prev == port)
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 7dbc80d..056e6ac 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -531,7 +531,7 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
break;
}
- p = br_multicast_new_port_group(port, group, *pp, state);
+ p = br_multicast_new_port_group(port, group, *pp, state, NULL);
if (unlikely(!p))
return -ENOMEM;
rcu_assign_pointer(*pp, p);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index b30e77e..1de3438 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -43,12 +43,14 @@ static void br_multicast_add_router(struct net_bridge *br,
static void br_ip4_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
- __u16 vid);
+ __u16 vid,
+ const unsigned char *src);
+
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
- __u16 vid);
+ __u16 vid, const unsigned char *src);
#endif
unsigned int br_mdb_rehash_seq;
@@ -540,7 +542,7 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br,
break;
case 2:
mld2q = (struct mld2_query *)icmp6_hdr(skb);
- mld2q->mld2q_mrc = ntohs((u16)jiffies_to_msecs(interval));
+ mld2q->mld2q_mrc = htons((u16)jiffies_to_msecs(interval));
mld2q->mld2q_type = ICMPV6_MGM_QUERY;
mld2q->mld2q_code = 0;
mld2q->mld2q_cksum = 0;
@@ -711,7 +713,8 @@ struct net_bridge_port_group *br_multicast_new_port_group(
struct net_bridge_port *port,
struct br_ip *group,
struct net_bridge_port_group __rcu *next,
- unsigned char flags)
+ unsigned char flags,
+ const unsigned char *src)
{
struct net_bridge_port_group *p;
@@ -726,12 +729,32 @@ struct net_bridge_port_group *br_multicast_new_port_group(
hlist_add_head(&p->mglist, &port->mglist);
setup_timer(&p->timer, br_multicast_port_group_expired,
(unsigned long)p);
+
+ if (src)
+ memcpy(p->eth_addr, src, ETH_ALEN);
+ else
+ memset(p->eth_addr, 0xff, ETH_ALEN);
+
return p;
}
+static bool br_port_group_equal(struct net_bridge_port_group *p,
+ struct net_bridge_port *port,
+ const unsigned char *src)
+{
+ if (p->port != port)
+ return false;
+
+ if (!(port->flags & BR_MULTICAST_TO_UNICAST))
+ return true;
+
+ return ether_addr_equal(src, p->eth_addr);
+}
+
static int br_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
- struct br_ip *group)
+ struct br_ip *group,
+ const unsigned char *src)
{
struct net_bridge_port_group __rcu **pp;
struct net_bridge_port_group *p;
@@ -758,13 +781,13 @@ static int br_multicast_add_group(struct net_bridge *br,
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port == port)
+ if (br_port_group_equal(p, port, src))
goto found;
if ((unsigned long)p->port < (unsigned long)port)
break;
}
- p = br_multicast_new_port_group(port, group, *pp, 0);
+ p = br_multicast_new_port_group(port, group, *pp, 0, src);
if (unlikely(!p))
goto err;
rcu_assign_pointer(*pp, p);
@@ -783,7 +806,8 @@ err:
static int br_ip4_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
- __u16 vid)
+ __u16 vid,
+ const unsigned char *src)
{
struct br_ip br_group;
@@ -794,14 +818,15 @@ static int br_ip4_multicast_add_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IP);
br_group.vid = vid;
- return br_multicast_add_group(br, port, &br_group);
+ return br_multicast_add_group(br, port, &br_group, src);
}
#if IS_ENABLED(CONFIG_IPV6)
static int br_ip6_multicast_add_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
- __u16 vid)
+ __u16 vid,
+ const unsigned char *src)
{
struct br_ip br_group;
@@ -812,7 +837,7 @@ static int br_ip6_multicast_add_group(struct net_bridge *br,
br_group.proto = htons(ETH_P_IPV6);
br_group.vid = vid;
- return br_multicast_add_group(br, port, &br_group);
+ return br_multicast_add_group(br, port, &br_group, src);
}
#endif
@@ -1081,6 +1106,7 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ const unsigned char *src;
struct igmpv3_report *ih;
struct igmpv3_grec *grec;
int i;
@@ -1121,12 +1147,14 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
continue;
}
+ src = eth_hdr(skb)->h_source;
if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
type == IGMPV3_MODE_IS_INCLUDE) &&
ntohs(grec->grec_nsrcs) == 0) {
- br_ip4_multicast_leave_group(br, port, group, vid);
+ br_ip4_multicast_leave_group(br, port, group, vid, src);
} else {
- err = br_ip4_multicast_add_group(br, port, group, vid);
+ err = br_ip4_multicast_add_group(br, port, group, vid,
+ src);
if (err)
break;
}
@@ -1141,6 +1169,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
struct sk_buff *skb,
u16 vid)
{
+ const unsigned char *src;
struct icmp6hdr *icmp6h;
struct mld2_grec *grec;
int i;
@@ -1188,14 +1217,16 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
continue;
}
+ src = eth_hdr(skb)->h_source;
if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
ntohs(*nsrcs) == 0) {
br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
- vid);
+ vid, src);
} else {
err = br_ip6_multicast_add_group(br, port,
- &grec->grec_mca, vid);
+ &grec->grec_mca, vid,
+ src);
if (err)
break;
}
@@ -1511,7 +1542,8 @@ br_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
struct br_ip *group,
struct bridge_mcast_other_query *other_query,
- struct bridge_mcast_own_query *own_query)
+ struct bridge_mcast_own_query *own_query,
+ const unsigned char *src)
{
struct net_bridge_mdb_htable *mdb;
struct net_bridge_mdb_entry *mp;
@@ -1535,7 +1567,7 @@ br_multicast_leave_group(struct net_bridge *br,
for (pp = &mp->ports;
(p = mlock_dereference(*pp, br)) != NULL;
pp = &p->next) {
- if (p->port != port)
+ if (!br_port_group_equal(p, port, src))
continue;
rcu_assign_pointer(*pp, p->next);
@@ -1566,7 +1598,7 @@ br_multicast_leave_group(struct net_bridge *br,
for (p = mlock_dereference(mp->ports, br);
p != NULL;
p = mlock_dereference(p->next, br)) {
- if (p->port != port)
+ if (!br_port_group_equal(p, port, src))
continue;
if (!hlist_unhashed(&p->mglist) &&
@@ -1617,7 +1649,8 @@ out:
static void br_ip4_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
__be32 group,
- __u16 vid)
+ __u16 vid,
+ const unsigned char *src)
{
struct br_ip br_group;
struct bridge_mcast_own_query *own_query;
@@ -1632,14 +1665,15 @@ static void br_ip4_multicast_leave_group(struct net_bridge *br,
br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group, &br->ip4_other_query,
- own_query);
+ own_query, src);
}
#if IS_ENABLED(CONFIG_IPV6)
static void br_ip6_multicast_leave_group(struct net_bridge *br,
struct net_bridge_port *port,
const struct in6_addr *group,
- __u16 vid)
+ __u16 vid,
+ const unsigned char *src)
{
struct br_ip br_group;
struct bridge_mcast_own_query *own_query;
@@ -1654,7 +1688,7 @@ static void br_ip6_multicast_leave_group(struct net_bridge *br,
br_group.vid = vid;
br_multicast_leave_group(br, port, &br_group, &br->ip6_other_query,
- own_query);
+ own_query, src);
}
#endif
@@ -1712,6 +1746,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
u16 vid)
{
struct sk_buff *skb_trimmed = NULL;
+ const unsigned char *src;
struct igmphdr *ih;
int err;
@@ -1731,13 +1766,14 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
}
ih = igmp_hdr(skb);
+ src = eth_hdr(skb)->h_source;
BR_INPUT_SKB_CB(skb)->igmp = ih->type;
switch (ih->type) {
case IGMP_HOST_MEMBERSHIP_REPORT:
case IGMPV2_HOST_MEMBERSHIP_REPORT:
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
- err = br_ip4_multicast_add_group(br, port, ih->group, vid);
+ err = br_ip4_multicast_add_group(br, port, ih->group, vid, src);
break;
case IGMPV3_HOST_MEMBERSHIP_REPORT:
err = br_ip4_multicast_igmp3_report(br, port, skb_trimmed, vid);
@@ -1746,7 +1782,7 @@ static int br_multicast_ipv4_rcv(struct net_bridge *br,
err = br_ip4_multicast_query(br, port, skb_trimmed, vid);
break;
case IGMP_HOST_LEAVE_MESSAGE:
- br_ip4_multicast_leave_group(br, port, ih->group, vid);
+ br_ip4_multicast_leave_group(br, port, ih->group, vid, src);
break;
}
@@ -1766,6 +1802,7 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
u16 vid)
{
struct sk_buff *skb_trimmed = NULL;
+ const unsigned char *src;
struct mld_msg *mld;
int err;
@@ -1785,8 +1822,10 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
switch (mld->mld_type) {
case ICMPV6_MGM_REPORT:
+ src = eth_hdr(skb)->h_source;
BR_INPUT_SKB_CB(skb)->mrouters_only = 1;
- err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid);
+ err = br_ip6_multicast_add_group(br, port, &mld->mld_mca, vid,
+ src);
break;
case ICMPV6_MLD2_REPORT:
err = br_ip6_multicast_mld2_report(br, port, skb_trimmed, vid);
@@ -1795,7 +1834,8 @@ static int br_multicast_ipv6_rcv(struct net_bridge *br,
err = br_ip6_multicast_query(br, port, skb_trimmed, vid);
break;
case ICMPV6_MGM_REDUCTION:
- br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid);
+ src = eth_hdr(skb)->h_source;
+ br_ip6_multicast_leave_group(br, port, &mld->mld_mca, vid, src);
break;
}
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 71c7453..1ca2549 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -123,6 +123,7 @@ static inline size_t br_port_info_size(void)
+ nla_total_size(1) /* IFLA_BRPORT_GUARD */
+ nla_total_size(1) /* IFLA_BRPORT_PROTECT */
+ nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
+ + nla_total_size(1) /* IFLA_BRPORT_MCAST_TO_UCAST */
+ nla_total_size(1) /* IFLA_BRPORT_LEARNING */
+ nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
+ nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
@@ -173,6 +174,8 @@ static int br_port_fill_attrs(struct sk_buff *skb,
!!(p->flags & BR_ROOT_BLOCK)) ||
nla_put_u8(skb, IFLA_BRPORT_FAST_LEAVE,
!!(p->flags & BR_MULTICAST_FAST_LEAVE)) ||
+ nla_put_u8(skb, IFLA_BRPORT_MCAST_TO_UCAST,
+ !!(p->flags & BR_MULTICAST_TO_UNICAST)) ||
nla_put_u8(skb, IFLA_BRPORT_LEARNING, !!(p->flags & BR_LEARNING)) ||
nla_put_u8(skb, IFLA_BRPORT_UNICAST_FLOOD,
!!(p->flags & BR_FLOOD)) ||
@@ -586,6 +589,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
[IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
[IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
[IFLA_BRPORT_MULTICAST_ROUTER] = { .type = NLA_U8 },
+ [IFLA_BRPORT_MCAST_TO_UCAST] = { .type = NLA_U8 },
};
/* Change the state of the port and notify spanning tree */
@@ -636,6 +640,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])
br_set_port_flag(p, tb, IFLA_BRPORT_LEARNING, BR_LEARNING);
br_set_port_flag(p, tb, IFLA_BRPORT_UNICAST_FLOOD, BR_FLOOD);
br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_FLOOD, BR_MCAST_FLOOD);
+ br_set_port_flag(p, tb, IFLA_BRPORT_MCAST_TO_UCAST, BR_MULTICAST_TO_UNICAST);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP, BR_PROXYARP);
br_set_port_flag(p, tb, IFLA_BRPORT_PROXYARP_WIFI, BR_PROXYARP_WIFI);
@@ -781,20 +786,6 @@ static int br_validate(struct nlattr *tb[], struct nlattr *data[])
return 0;
}
-static int br_dev_newlink(struct net *src_net, struct net_device *dev,
- struct nlattr *tb[], struct nlattr *data[])
-{
- struct net_bridge *br = netdev_priv(dev);
-
- if (tb[IFLA_ADDRESS]) {
- spin_lock_bh(&br->lock);
- br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
- spin_unlock_bh(&br->lock);
- }
-
- return register_netdevice(dev);
-}
-
static int br_port_slave_changelink(struct net_device *brdev,
struct net_device *dev,
struct nlattr *tb[],
@@ -1115,6 +1106,25 @@ static int br_changelink(struct net_device *brdev, struct nlattr *tb[],
return 0;
}
+static int br_dev_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ struct net_bridge *br = netdev_priv(dev);
+ int err;
+
+ if (tb[IFLA_ADDRESS]) {
+ spin_lock_bh(&br->lock);
+ br_stp_change_bridge_id(br, nla_data(tb[IFLA_ADDRESS]));
+ spin_unlock_bh(&br->lock);
+ }
+
+ err = br_changelink(dev, tb, data);
+ if (err)
+ return err;
+
+ return register_netdevice(dev);
+}
+
static size_t br_get_size(const struct net_device *brdev)
{
return nla_total_size(sizeof(u32)) + /* IFLA_BR_FORWARD_DELAY */
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 8ce621e8..0b82a22 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -177,6 +177,7 @@ struct net_bridge_port_group {
struct timer_list timer;
struct br_ip addr;
unsigned char flags;
+ unsigned char eth_addr[ETH_ALEN];
};
struct net_bridge_mdb_entry
@@ -599,7 +600,7 @@ void br_multicast_free_pg(struct rcu_head *head);
struct net_bridge_port_group *
br_multicast_new_port_group(struct net_bridge_port *port, struct br_ip *group,
struct net_bridge_port_group __rcu *next,
- unsigned char flags);
+ unsigned char flags, const unsigned char *src);
void br_mdb_init(void);
void br_mdb_uninit(void);
void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,
diff --git a/net/bridge/br_sysfs_if.c b/net/bridge/br_sysfs_if.c
index 8bd5696..05e8946 100644
--- a/net/bridge/br_sysfs_if.c
+++ b/net/bridge/br_sysfs_if.c
@@ -188,6 +188,7 @@ static BRPORT_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
store_multicast_router);
BRPORT_ATTR_FLAG(multicast_fast_leave, BR_MULTICAST_FAST_LEAVE);
+BRPORT_ATTR_FLAG(multicast_to_unicast, BR_MULTICAST_TO_UNICAST);
#endif
static const struct brport_attribute *brport_attrs[] = {
@@ -214,6 +215,7 @@ static const struct brport_attribute *brport_attrs[] = {
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&brport_attr_multicast_router,
&brport_attr_multicast_fast_leave,
+ &brport_attr_multicast_to_unicast,
#endif
&brport_attr_proxyarp,
&brport_attr_proxyarp_wifi,
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
index 3408ed5..1816fc9 100644
--- a/net/caif/chnl_net.c
+++ b/net/caif/chnl_net.c
@@ -44,7 +44,6 @@ enum caif_states {
struct chnl_net {
struct cflayer chnl;
- struct net_device_stats stats;
struct caif_connect_request conn_req;
struct list_head list_field;
struct net_device *netdev;
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 3949ce7..292e33b 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -214,7 +214,7 @@ static int ceph_aes_crypt(const struct ceph_crypto_key *key, bool encrypt,
SKCIPHER_REQUEST_ON_STACK(req, key->tfm);
struct sg_table sgt;
struct scatterlist prealloc_sg;
- char iv[AES_BLOCK_SIZE];
+ char iv[AES_BLOCK_SIZE] __aligned(8);
int pad_byte = AES_BLOCK_SIZE - (in_len & (AES_BLOCK_SIZE - 1));
int crypt_len = encrypt ? in_len + pad_byte : in_len;
int ret;
diff --git a/net/core/dev.c b/net/core/dev.c
index c143f13..727b6fd 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2408,28 +2408,6 @@ void netif_schedule_queue(struct netdev_queue *txq)
}
EXPORT_SYMBOL(netif_schedule_queue);
-/**
- * netif_wake_subqueue - allow sending packets on subqueue
- * @dev: network device
- * @queue_index: sub queue index
- *
- * Resume individual transmit queue of a device with multiple transmit queues.
- */
-void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
-{
- struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
-
- if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
- struct Qdisc *q;
-
- rcu_read_lock();
- q = rcu_dereference(txq->qdisc);
- __netif_schedule(q);
- rcu_read_unlock();
- }
-}
-EXPORT_SYMBOL(netif_wake_subqueue);
-
void netif_tx_wake_queue(struct netdev_queue *dev_queue)
{
if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
@@ -2680,11 +2658,12 @@ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
netdev_features_t features, bool tx_path)
{
+ struct sk_buff *segs;
+
if (unlikely(skb_needs_check(skb, tx_path))) {
int err;
- skb_warn_bad_offload(skb);
-
+ /* We're going to init ->check field in TCP or UDP header */
err = skb_cow_head(skb, 0);
if (err < 0)
return ERR_PTR(err);
@@ -2712,7 +2691,12 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
skb_reset_mac_header(skb);
skb_reset_mac_len(skb);
- return skb_mac_gso_segment(skb, features);
+ segs = skb_mac_gso_segment(skb, features);
+
+ if (unlikely(skb_needs_check(skb, tx_path)))
+ skb_warn_bad_offload(skb);
+
+ return segs;
}
EXPORT_SYMBOL(__skb_gso_segment);
@@ -2795,9 +2779,9 @@ static netdev_features_t harmonize_features(struct sk_buff *skb,
if (skb->ip_summed != CHECKSUM_NONE &&
!can_checksum_protocol(features, type)) {
features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
- } else if (illegal_highdma(skb->dev, skb)) {
- features &= ~NETIF_F_SG;
}
+ if (illegal_highdma(skb->dev, skb))
+ features &= ~NETIF_F_SG;
return features;
}
@@ -3983,9 +3967,7 @@ int netdev_rx_handler_register(struct net_device *dev,
rx_handler_func_t *rx_handler,
void *rx_handler_data)
{
- ASSERT_RTNL();
-
- if (dev->rx_handler)
+ if (netdev_is_rx_handler_busy(dev))
return -EBUSY;
/* Note: rx_handler_data must be set before rx_handler */
@@ -4437,7 +4419,9 @@ static void skb_gro_reset_offset(struct sk_buff *skb)
pinfo->nr_frags &&
!PageHighMem(skb_frag_page(frag0))) {
NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
- NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
+ NAPI_GRO_CB(skb)->frag0_len = min_t(unsigned int,
+ skb_frag_size(frag0),
+ skb->end - skb->tail);
}
}
@@ -4615,6 +4599,7 @@ static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
case GRO_MERGED_FREE:
if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD) {
skb_dst_drop(skb);
+ secpath_reset(skb);
kmem_cache_free(skbuff_head_cache, skb);
} else {
__kfree_skb(skb);
@@ -4655,6 +4640,7 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
skb->encapsulation = 0;
skb_shinfo(skb)->gso_type = 0;
skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ secpath_reset(skb);
napi->skb = skb;
}
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index e23766c..6b3eee0 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1712,7 +1712,7 @@ static noinline_for_stack int ethtool_get_channels(struct net_device *dev,
static noinline_for_stack int ethtool_set_channels(struct net_device *dev,
void __user *useraddr)
{
- struct ethtool_channels channels, max;
+ struct ethtool_channels channels, max = { .cmd = ETHTOOL_GCHANNELS };
u32 max_rx_in_use = 0;
if (!dev->ethtool_ops->set_channels || !dev->ethtool_ops->get_channels)
@@ -1817,11 +1817,13 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
ret = __ethtool_get_sset_count(dev, gstrings.string_set);
if (ret < 0)
return ret;
+ if (ret > S32_MAX / ETH_GSTRING_LEN)
+ return -ENOMEM;
+ WARN_ON_ONCE(!ret);
gstrings.len = ret;
-
- data = kcalloc(gstrings.len, ETH_GSTRING_LEN, GFP_USER);
- if (!data)
+ data = vzalloc(gstrings.len * ETH_GSTRING_LEN);
+ if (gstrings.len && !data)
return -ENOMEM;
__ethtool_get_strings(dev, gstrings.string_set, data);
@@ -1830,12 +1832,13 @@ static int ethtool_get_strings(struct net_device *dev, void __user *useraddr)
if (copy_to_user(useraddr, &gstrings, sizeof(gstrings)))
goto out;
useraddr += sizeof(gstrings);
- if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
+ if (gstrings.len &&
+ copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN))
goto out;
ret = 0;
out:
- kfree(data);
+ vfree(data);
return ret;
}
@@ -1912,14 +1915,15 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
n_stats = ops->get_sset_count(dev, ETH_SS_STATS);
if (n_stats < 0)
return n_stats;
- WARN_ON(n_stats == 0);
-
+ if (n_stats > S32_MAX / sizeof(u64))
+ return -ENOMEM;
+ WARN_ON_ONCE(!n_stats);
if (copy_from_user(&stats, useraddr, sizeof(stats)))
return -EFAULT;
stats.n_stats = n_stats;
- data = kmalloc(n_stats * sizeof(u64), GFP_USER);
- if (!data)
+ data = vzalloc(n_stats * sizeof(u64));
+ if (n_stats && !data)
return -ENOMEM;
ops->get_ethtool_stats(dev, &stats, data);
@@ -1928,12 +1932,12 @@ static int ethtool_get_stats(struct net_device *dev, void __user *useraddr)
if (copy_to_user(useraddr, &stats, sizeof(stats)))
goto out;
useraddr += sizeof(stats);
- if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
goto out;
ret = 0;
out:
- kfree(data);
+ vfree(data);
return ret;
}
@@ -1948,17 +1952,18 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
return -EOPNOTSUPP;
n_stats = phy_get_sset_count(phydev);
-
if (n_stats < 0)
return n_stats;
- WARN_ON(n_stats == 0);
+ if (n_stats > S32_MAX / sizeof(u64))
+ return -ENOMEM;
+ WARN_ON_ONCE(!n_stats);
if (copy_from_user(&stats, useraddr, sizeof(stats)))
return -EFAULT;
stats.n_stats = n_stats;
- data = kmalloc_array(n_stats, sizeof(u64), GFP_USER);
- if (!data)
+ data = vzalloc(n_stats * sizeof(u64));
+ if (n_stats && !data)
return -ENOMEM;
mutex_lock(&phydev->lock);
@@ -1969,12 +1974,12 @@ static int ethtool_get_phy_stats(struct net_device *dev, void __user *useraddr)
if (copy_to_user(useraddr, &stats, sizeof(stats)))
goto out;
useraddr += sizeof(stats);
- if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64)))
+ if (n_stats && copy_to_user(useraddr, data, n_stats * sizeof(u64)))
goto out;
ret = 0;
out:
- kfree(data);
+ vfree(data);
return ret;
}
diff --git a/net/core/filter.c b/net/core/filter.c
index f4d16a9..1e00737 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -1522,10 +1522,11 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
{
bool is_pseudo = flags & BPF_F_PSEUDO_HDR;
bool is_mmzero = flags & BPF_F_MARK_MANGLED_0;
+ bool do_mforce = flags & BPF_F_MARK_ENFORCE;
__sum16 *ptr;
- if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_PSEUDO_HDR |
- BPF_F_HDR_FIELD_MASK)))
+ if (unlikely(flags & ~(BPF_F_MARK_MANGLED_0 | BPF_F_MARK_ENFORCE |
+ BPF_F_PSEUDO_HDR | BPF_F_HDR_FIELD_MASK)))
return -EINVAL;
if (unlikely(offset > 0xffff || offset & 1))
return -EFAULT;
@@ -1533,7 +1534,7 @@ BPF_CALL_5(bpf_l4_csum_replace, struct sk_buff *, skb, u32, offset,
return -EFAULT;
ptr = (__sum16 *)(skb->data + offset);
- if (is_mmzero && !*ptr)
+ if (is_mmzero && !do_mforce && !*ptr)
return 0;
switch (flags & BPF_F_HDR_FIELD_MASK) {
@@ -2598,7 +2599,7 @@ static const struct bpf_func_proto bpf_xdp_event_output_proto = {
};
static const struct bpf_func_proto *
-sk_filter_func_proto(enum bpf_func_id func_id)
+bpf_base_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
case BPF_FUNC_map_lookup_elem:
@@ -2626,6 +2627,17 @@ sk_filter_func_proto(enum bpf_func_id func_id)
}
static const struct bpf_func_proto *
+sk_filter_func_proto(enum bpf_func_id func_id)
+{
+ switch (func_id) {
+ case BPF_FUNC_skb_load_bytes:
+ return &bpf_skb_load_bytes_proto;
+ default:
+ return bpf_base_func_proto(func_id);
+ }
+}
+
+static const struct bpf_func_proto *
tc_cls_act_func_proto(enum bpf_func_id func_id)
{
switch (func_id) {
@@ -2680,7 +2692,7 @@ tc_cls_act_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return sk_filter_func_proto(func_id);
+ return bpf_base_func_proto(func_id);
}
}
@@ -2695,7 +2707,7 @@ xdp_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_xdp_adjust_head:
return &bpf_xdp_adjust_head_proto;
default:
- return sk_filter_func_proto(func_id);
+ return bpf_base_func_proto(func_id);
}
}
@@ -2706,7 +2718,7 @@ cg_skb_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skb_load_bytes:
return &bpf_skb_load_bytes_proto;
default:
- return sk_filter_func_proto(func_id);
+ return bpf_base_func_proto(func_id);
}
}
@@ -2733,7 +2745,7 @@ lwt_inout_func_proto(enum bpf_func_id func_id)
case BPF_FUNC_skb_under_cgroup:
return &bpf_skb_under_cgroup_proto;
default:
- return sk_filter_func_proto(func_id);
+ return bpf_base_func_proto(func_id);
}
}
@@ -2776,11 +2788,22 @@ static bool __is_valid_access(int off, int size)
{
if (off < 0 || off >= sizeof(struct __sk_buff))
return false;
+
/* The verifier guarantees that size > 0. */
if (off % size != 0)
return false;
- if (size != sizeof(__u32))
- return false;
+
+ switch (off) {
+ case offsetof(struct __sk_buff, cb[0]) ...
+ offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
+ if (off + size >
+ offsetof(struct __sk_buff, cb[4]) + sizeof(__u32))
+ return false;
+ break;
+ default:
+ if (size != sizeof(__u32))
+ return false;
+ }
return true;
}
@@ -2799,7 +2822,7 @@ static bool sk_filter_is_valid_access(int off, int size,
if (type == BPF_WRITE) {
switch (off) {
case offsetof(struct __sk_buff, cb[0]) ...
- offsetof(struct __sk_buff, cb[4]):
+ offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
break;
default:
return false;
@@ -2823,7 +2846,7 @@ static bool lwt_is_valid_access(int off, int size,
case offsetof(struct __sk_buff, mark):
case offsetof(struct __sk_buff, priority):
case offsetof(struct __sk_buff, cb[0]) ...
- offsetof(struct __sk_buff, cb[4]):
+ offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
break;
default:
return false;
@@ -2915,7 +2938,7 @@ static bool tc_cls_act_is_valid_access(int off, int size,
case offsetof(struct __sk_buff, tc_index):
case offsetof(struct __sk_buff, priority):
case offsetof(struct __sk_buff, cb[0]) ...
- offsetof(struct __sk_buff, cb[4]):
+ offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
case offsetof(struct __sk_buff, tc_classid):
break;
default:
@@ -2972,32 +2995,33 @@ void bpf_warn_invalid_xdp_action(u32 act)
}
EXPORT_SYMBOL_GPL(bpf_warn_invalid_xdp_action);
-static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
- struct bpf_insn *insn_buf,
- struct bpf_prog *prog)
+static u32 bpf_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
+ struct bpf_insn *insn_buf,
+ struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
+ int off;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct __sk_buff, len):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, len));
break;
case offsetof(struct __sk_buff, protocol):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, protocol));
break;
case offsetof(struct __sk_buff, vlan_proto):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_proto) != 2);
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, vlan_proto));
break;
@@ -3005,17 +3029,17 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, priority) != 4);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, priority));
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, priority));
break;
case offsetof(struct __sk_buff, ingress_ifindex):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, skb_iif) != 4);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, skb_iif));
break;
@@ -3023,17 +3047,17 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct sk_buff, dev));
- *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1);
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct net_device, ifindex));
break;
case offsetof(struct __sk_buff, hash):
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, hash));
break;
@@ -3041,63 +3065,77 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, mark));
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, mark));
break;
case offsetof(struct __sk_buff, pkt_type):
- return convert_skb_access(SKF_AD_PKTTYPE, dst_reg, src_reg, insn);
+ return convert_skb_access(SKF_AD_PKTTYPE, si->dst_reg,
+ si->src_reg, insn);
case offsetof(struct __sk_buff, queue_mapping):
- return convert_skb_access(SKF_AD_QUEUE, dst_reg, src_reg, insn);
+ return convert_skb_access(SKF_AD_QUEUE, si->dst_reg,
+ si->src_reg, insn);
case offsetof(struct __sk_buff, vlan_present):
return convert_skb_access(SKF_AD_VLAN_TAG_PRESENT,
- dst_reg, src_reg, insn);
+ si->dst_reg, si->src_reg, insn);
case offsetof(struct __sk_buff, vlan_tci):
return convert_skb_access(SKF_AD_VLAN_TAG,
- dst_reg, src_reg, insn);
+ si->dst_reg, si->src_reg, insn);
case offsetof(struct __sk_buff, cb[0]) ...
- offsetof(struct __sk_buff, cb[4]):
+ offsetof(struct __sk_buff, cb[4]) + sizeof(__u32) - 1:
BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, data) < 20);
+ BUILD_BUG_ON((offsetof(struct sk_buff, cb) +
+ offsetof(struct qdisc_skb_cb, data)) %
+ sizeof(__u64));
prog->cb_access = 1;
- ctx_off -= offsetof(struct __sk_buff, cb[0]);
- ctx_off += offsetof(struct sk_buff, cb);
- ctx_off += offsetof(struct qdisc_skb_cb, data);
+ off = si->off;
+ off -= offsetof(struct __sk_buff, cb[0]);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, data);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_STX_MEM(BPF_SIZE(si->code), si->dst_reg,
+ si->src_reg, off);
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_SIZE(si->code), si->dst_reg,
+ si->src_reg, off);
break;
case offsetof(struct __sk_buff, tc_classid):
- ctx_off -= offsetof(struct __sk_buff, tc_classid);
- ctx_off += offsetof(struct sk_buff, cb);
- ctx_off += offsetof(struct qdisc_skb_cb, tc_classid);
+ BUILD_BUG_ON(FIELD_SIZEOF(struct qdisc_skb_cb, tc_classid) != 2);
+
+ off = si->off;
+ off -= offsetof(struct __sk_buff, tc_classid);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct qdisc_skb_cb, tc_classid);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg,
+ si->src_reg, off);
else
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg, ctx_off);
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg,
+ si->src_reg, off);
break;
case offsetof(struct __sk_buff, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, data),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct sk_buff, data));
break;
case offsetof(struct __sk_buff, data_end):
- ctx_off -= offsetof(struct __sk_buff, data_end);
- ctx_off += offsetof(struct sk_buff, cb);
- ctx_off += offsetof(struct bpf_skb_data_end, data_end);
- *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), dst_reg, src_reg,
- ctx_off);
+ off = si->off;
+ off -= offsetof(struct __sk_buff, data_end);
+ off += offsetof(struct sk_buff, cb);
+ off += offsetof(struct bpf_skb_data_end, data_end);
+ *insn++ = BPF_LDX_MEM(BPF_SIZEOF(void *), si->dst_reg,
+ si->src_reg, off);
break;
case offsetof(struct __sk_buff, tc_index):
@@ -3105,110 +3143,107 @@ static u32 sk_filter_convert_ctx_access(enum bpf_access_type type, int dst_reg,
BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, tc_index) != 2);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, tc_index));
else
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sk_buff, tc_index));
- break;
#else
if (type == BPF_WRITE)
- *insn++ = BPF_MOV64_REG(dst_reg, dst_reg);
+ *insn++ = BPF_MOV64_REG(si->dst_reg, si->dst_reg);
else
- *insn++ = BPF_MOV64_IMM(dst_reg, 0);
- break;
+ *insn++ = BPF_MOV64_IMM(si->dst_reg, 0);
#endif
+ break;
}
return insn - insn_buf;
}
static u32 sock_filter_convert_ctx_access(enum bpf_access_type type,
- int dst_reg, int src_reg,
- int ctx_off,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct bpf_sock, bound_dev_if):
BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_bound_dev_if) != 4);
if (type == BPF_WRITE)
- *insn++ = BPF_STX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_STX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, sk_bound_dev_if));
else
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, sk_bound_dev_if));
break;
case offsetof(struct bpf_sock, family):
BUILD_BUG_ON(FIELD_SIZEOF(struct sock, sk_family) != 2);
- *insn++ = BPF_LDX_MEM(BPF_H, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_H, si->dst_reg, si->src_reg,
offsetof(struct sock, sk_family));
break;
case offsetof(struct bpf_sock, type):
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_TYPE_MASK);
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_TYPE_SHIFT);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_TYPE_MASK);
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_TYPE_SHIFT);
break;
case offsetof(struct bpf_sock, protocol):
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg,
offsetof(struct sock, __sk_flags_offset));
- *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, SK_FL_PROTO_MASK);
- *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, SK_FL_PROTO_SHIFT);
+ *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, SK_FL_PROTO_MASK);
+ *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, SK_FL_PROTO_SHIFT);
break;
}
return insn - insn_buf;
}
-static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
+static u32 tc_cls_act_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct __sk_buff, ifindex):
BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct sk_buff, dev),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct sk_buff, dev));
- *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, dst_reg,
+ *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->dst_reg,
offsetof(struct net_device, ifindex));
break;
default:
- return sk_filter_convert_ctx_access(type, dst_reg, src_reg,
- ctx_off, insn_buf, prog);
+ return bpf_convert_ctx_access(type, si, insn_buf, prog);
}
return insn - insn_buf;
}
-static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
- int src_reg, int ctx_off,
+static u32 xdp_convert_ctx_access(enum bpf_access_type type,
+ const struct bpf_insn *si,
struct bpf_insn *insn_buf,
struct bpf_prog *prog)
{
struct bpf_insn *insn = insn_buf;
- switch (ctx_off) {
+ switch (si->off) {
case offsetof(struct xdp_md, data):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct xdp_buff, data));
break;
case offsetof(struct xdp_md, data_end):
*insn++ = BPF_LDX_MEM(BPF_FIELD_SIZEOF(struct xdp_buff, data_end),
- dst_reg, src_reg,
+ si->dst_reg, si->src_reg,
offsetof(struct xdp_buff, data_end));
break;
}
@@ -3219,7 +3254,7 @@ static u32 xdp_convert_ctx_access(enum bpf_access_type type, int dst_reg,
static const struct bpf_verifier_ops sk_filter_ops = {
.get_func_proto = sk_filter_func_proto,
.is_valid_access = sk_filter_is_valid_access,
- .convert_ctx_access = sk_filter_convert_ctx_access,
+ .convert_ctx_access = bpf_convert_ctx_access,
};
static const struct bpf_verifier_ops tc_cls_act_ops = {
@@ -3238,24 +3273,24 @@ static const struct bpf_verifier_ops xdp_ops = {
static const struct bpf_verifier_ops cg_skb_ops = {
.get_func_proto = cg_skb_func_proto,
.is_valid_access = sk_filter_is_valid_access,
- .convert_ctx_access = sk_filter_convert_ctx_access,
+ .convert_ctx_access = bpf_convert_ctx_access,
};
static const struct bpf_verifier_ops lwt_inout_ops = {
.get_func_proto = lwt_inout_func_proto,
.is_valid_access = lwt_is_valid_access,
- .convert_ctx_access = sk_filter_convert_ctx_access,
+ .convert_ctx_access = bpf_convert_ctx_access,
};
static const struct bpf_verifier_ops lwt_xmit_ops = {
.get_func_proto = lwt_xmit_func_proto,
.is_valid_access = lwt_is_valid_access,
- .convert_ctx_access = sk_filter_convert_ctx_access,
+ .convert_ctx_access = bpf_convert_ctx_access,
.gen_prologue = tc_cls_act_prologue,
};
static const struct bpf_verifier_ops cg_sock_ops = {
- .get_func_proto = sk_filter_func_proto,
+ .get_func_proto = bpf_base_func_proto,
.is_valid_access = sock_filter_is_valid_access,
.convert_ctx_access = sock_filter_convert_ctx_access,
};
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index fe4e153..c35aae1 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -67,8 +67,8 @@ EXPORT_SYMBOL(skb_flow_dissector_init);
* The function will try to retrieve a be32 entity at
* offset poff
*/
-__be16 skb_flow_get_be16(const struct sk_buff *skb, int poff, void *data,
- int hlen)
+static __be16 skb_flow_get_be16(const struct sk_buff *skb, int poff,
+ void *data, int hlen)
{
__be16 *u, _u;
@@ -138,6 +138,7 @@ bool __skb_flow_dissect(const struct sk_buff *skb,
struct flow_dissector_key_control *key_control;
struct flow_dissector_key_basic *key_basic;
struct flow_dissector_key_addrs *key_addrs;
+ struct flow_dissector_key_arp *key_arp;
struct flow_dissector_key_ports *key_ports;
struct flow_dissector_key_icmp *key_icmp;
struct flow_dissector_key_tags *key_tags;
@@ -379,6 +380,62 @@ mpls:
nhoff += FCOE_HEADER_LEN;
goto out_good;
+
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_RARP): {
+ struct {
+ unsigned char ar_sha[ETH_ALEN];
+ unsigned char ar_sip[4];
+ unsigned char ar_tha[ETH_ALEN];
+ unsigned char ar_tip[4];
+ } *arp_eth, _arp_eth;
+ const struct arphdr *arp;
+ struct arphdr *_arp;
+
+ arp = __skb_header_pointer(skb, nhoff, sizeof(_arp), data,
+ hlen, &_arp);
+ if (!arp)
+ goto out_bad;
+
+ if (arp->ar_hrd != htons(ARPHRD_ETHER) ||
+ arp->ar_pro != htons(ETH_P_IP) ||
+ arp->ar_hln != ETH_ALEN ||
+ arp->ar_pln != 4 ||
+ (arp->ar_op != htons(ARPOP_REPLY) &&
+ arp->ar_op != htons(ARPOP_REQUEST)))
+ goto out_bad;
+
+ arp_eth = __skb_header_pointer(skb, nhoff + sizeof(_arp),
+ sizeof(_arp_eth), data,
+ hlen,
+ &_arp_eth);
+ if (!arp_eth)
+ goto out_bad;
+
+ if (dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_ARP)) {
+
+ key_arp = skb_flow_dissector_target(flow_dissector,
+ FLOW_DISSECTOR_KEY_ARP,
+ target_container);
+
+ memcpy(&key_arp->sip, arp_eth->ar_sip,
+ sizeof(key_arp->sip));
+ memcpy(&key_arp->tip, arp_eth->ar_tip,
+ sizeof(key_arp->tip));
+
+ /* Only store the lower byte of the opcode;
+ * this covers ARPOP_REPLY and ARPOP_REQUEST.
+ */
+ key_arp->op = ntohs(arp->ar_op) & 0xff;
+
+ ether_addr_copy(key_arp->sha, arp_eth->ar_sha);
+ ether_addr_copy(key_arp->tha, arp_eth->ar_tha);
+ }
+
+ goto out_good;
+ }
+
default:
goto out_bad;
}
diff --git a/net/core/lwt_bpf.c b/net/core/lwt_bpf.c
index 71bb3e2..0cfe7b0 100644
--- a/net/core/lwt_bpf.c
+++ b/net/core/lwt_bpf.c
@@ -237,7 +237,7 @@ static const struct nla_policy bpf_nl_policy[LWT_BPF_MAX + 1] = {
[LWT_BPF_XMIT_HEADROOM] = { .type = NLA_U32 },
};
-static int bpf_build_state(struct net_device *dev, struct nlattr *nla,
+static int bpf_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
@@ -352,7 +352,7 @@ static int bpf_encap_nlsize(struct lwtunnel_state *lwtstate)
0;
}
-int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
+static int bpf_lwt_prog_cmp(struct bpf_lwt_prog *a, struct bpf_lwt_prog *b)
{
/* FIXME:
* The LWT state is currently rebuilt for delete requests which
@@ -386,6 +386,7 @@ static const struct lwtunnel_encap_ops bpf_encap_ops = {
.fill_encap = bpf_fill_encap_info,
.get_encap_size = bpf_encap_nlsize,
.cmp_encap = bpf_encap_cmp,
+ .owner = THIS_MODULE,
};
static int __init bpf_lwt_init(void)
diff --git a/net/core/lwtunnel.c b/net/core/lwtunnel.c
index a5d4e86..6df9f8f 100644
--- a/net/core/lwtunnel.c
+++ b/net/core/lwtunnel.c
@@ -26,6 +26,7 @@
#include <net/lwtunnel.h>
#include <net/rtnetlink.h>
#include <net/ip6_fib.h>
+#include <net/nexthop.h>
#ifdef CONFIG_MODULES
@@ -100,7 +101,7 @@ int lwtunnel_encap_del_ops(const struct lwtunnel_encap_ops *ops,
}
EXPORT_SYMBOL(lwtunnel_encap_del_ops);
-int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
+int lwtunnel_build_state(u16 encap_type,
struct nlattr *encap, unsigned int family,
const void *cfg, struct lwtunnel_state **lws)
{
@@ -114,25 +115,77 @@ int lwtunnel_build_state(struct net_device *dev, u16 encap_type,
ret = -EOPNOTSUPP;
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
+ if (likely(ops && ops->build_state && try_module_get(ops->owner))) {
+ ret = ops->build_state(encap, family, cfg, lws);
+ if (ret)
+ module_put(ops->owner);
+ }
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(lwtunnel_build_state);
+
+int lwtunnel_valid_encap_type(u16 encap_type)
+{
+ const struct lwtunnel_encap_ops *ops;
+ int ret = -EINVAL;
+
+ if (encap_type == LWTUNNEL_ENCAP_NONE ||
+ encap_type > LWTUNNEL_ENCAP_MAX)
+ return ret;
+
+ rcu_read_lock();
+ ops = rcu_dereference(lwtun_encaps[encap_type]);
+ rcu_read_unlock();
#ifdef CONFIG_MODULES
if (!ops) {
const char *encap_type_str = lwtunnel_encap_str(encap_type);
if (encap_type_str) {
- rcu_read_unlock();
+ __rtnl_unlock();
request_module("rtnl-lwt-%s", encap_type_str);
+ rtnl_lock();
+
rcu_read_lock();
ops = rcu_dereference(lwtun_encaps[encap_type]);
+ rcu_read_unlock();
}
}
#endif
- if (likely(ops && ops->build_state))
- ret = ops->build_state(dev, encap, family, cfg, lws);
- rcu_read_unlock();
+ return ops ? 0 : -EOPNOTSUPP;
+}
+EXPORT_SYMBOL(lwtunnel_valid_encap_type);
- return ret;
+int lwtunnel_valid_encap_type_attr(struct nlattr *attr, int remaining)
+{
+ struct rtnexthop *rtnh = (struct rtnexthop *)attr;
+ struct nlattr *nla_entype;
+ struct nlattr *attrs;
+ struct nlattr *nla;
+ u16 encap_type;
+ int attrlen;
+
+ while (rtnh_ok(rtnh, remaining)) {
+ attrlen = rtnh_attrlen(rtnh);
+ if (attrlen > 0) {
+ attrs = rtnh_attrs(rtnh);
+ nla = nla_find(attrs, attrlen, RTA_ENCAP);
+ nla_entype = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
+
+ if (nla_entype) {
+ encap_type = nla_get_u16(nla_entype);
+
+ if (lwtunnel_valid_encap_type(encap_type) != 0)
+ return -EOPNOTSUPP;
+ }
+ }
+ rtnh = rtnh_next(rtnh, &remaining);
+ }
+
+ return 0;
}
-EXPORT_SYMBOL(lwtunnel_build_state);
+EXPORT_SYMBOL(lwtunnel_valid_encap_type_attr);
void lwtstate_free(struct lwtunnel_state *lws)
{
@@ -144,6 +197,7 @@ void lwtstate_free(struct lwtunnel_state *lws)
} else {
kfree(lws);
}
+ module_put(ops->owner);
}
EXPORT_SYMBOL(lwtstate_free);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 75e3ea7..adfb54b 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -837,8 +837,7 @@ static void copy_rtnl_link_stats(struct rtnl_link_stats *a,
static inline int rtnl_vfinfo_size(const struct net_device *dev,
u32 ext_filter_mask)
{
- if (dev->dev.parent && dev_is_pci(dev->dev.parent) &&
- (ext_filter_mask & RTEXT_FILTER_VF)) {
+ if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF)) {
int num_vfs = dev_num_vf(dev->dev.parent);
size_t size = nla_total_size(0);
size += num_vfs *
@@ -2571,7 +2570,7 @@ replay:
return -ENODEV;
}
- if (tb[IFLA_MAP] || tb[IFLA_MASTER] || tb[IFLA_PROTINFO])
+ if (tb[IFLA_MAP] || tb[IFLA_PROTINFO])
return -EOPNOTSUPP;
if (!ops) {
@@ -2653,6 +2652,11 @@ replay:
if (err < 0)
goto out_unregister;
}
+ if (tb[IFLA_MASTER]) {
+ err = do_set_master(dev, nla_get_u32(tb[IFLA_MASTER]));
+ if (err)
+ goto out_unregister;
+ }
out:
if (link_net)
put_net(link_net);
@@ -3829,6 +3833,39 @@ static int rtnl_fill_statsinfo(struct sk_buff *skb, struct net_device *dev,
*idxattr = 0;
}
+ if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, *idxattr)) {
+ struct rtnl_af_ops *af_ops;
+
+ *idxattr = IFLA_STATS_AF_SPEC;
+ attr = nla_nest_start(skb, IFLA_STATS_AF_SPEC);
+ if (!attr)
+ goto nla_put_failure;
+
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+ if (af_ops->fill_stats_af) {
+ struct nlattr *af;
+ int err;
+
+ af = nla_nest_start(skb, af_ops->family);
+ if (!af)
+ goto nla_put_failure;
+
+ err = af_ops->fill_stats_af(skb, dev);
+
+ if (err == -ENODATA)
+ nla_nest_cancel(skb, af);
+ else if (err < 0)
+ goto nla_put_failure;
+
+ nla_nest_end(skb, af);
+ }
+ }
+
+ nla_nest_end(skb, attr);
+
+ *idxattr = 0;
+ }
+
nlmsg_end(skb, nlh);
return 0;
@@ -3885,6 +3922,23 @@ static size_t if_nlmsg_stats_size(const struct net_device *dev,
if (stats_attr_valid(filter_mask, IFLA_STATS_LINK_OFFLOAD_XSTATS, 0))
size += rtnl_get_offload_stats_size(dev);
+ if (stats_attr_valid(filter_mask, IFLA_STATS_AF_SPEC, 0)) {
+ struct rtnl_af_ops *af_ops;
+
+ /* for IFLA_STATS_AF_SPEC */
+ size += nla_total_size(0);
+
+ list_for_each_entry(af_ops, &rtnl_af_ops, list) {
+ if (af_ops->get_stats_af_size) {
+ size += nla_total_size(
+ af_ops->get_stats_af_size(dev));
+
+ /* for AF_* */
+ size += nla_total_size(0);
+ }
+ }
+ }
+
return size;
}
diff --git a/net/core/secure_seq.c b/net/core/secure_seq.c
index 3a9fcec..758f140b 100644
--- a/net/core/secure_seq.c
+++ b/net/core/secure_seq.c
@@ -122,7 +122,9 @@ u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
{
u64 seq;
net_secret_init();
- seq = siphash_3u32(saddr, daddr, (u32)sport << 16 | dport, &net_secret);
+ seq = siphash_3u32((__force u32)saddr, (__force u32)daddr,
+ (__force u32)sport << 16 | (__force u32)dport,
+ &net_secret);
seq += ktime_get_real_ns();
seq &= (1ull << 48) - 1;
return seq;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index adec4bf..26c1344 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -369,7 +369,7 @@ static void *__netdev_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, fragsz, gfp_mask);
+ data = page_frag_alloc(nc, fragsz, gfp_mask);
local_irq_restore(flags);
return data;
}
@@ -391,7 +391,7 @@ static void *__napi_alloc_frag(unsigned int fragsz, gfp_t gfp_mask)
{
struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
- return __alloc_page_frag(&nc->page, fragsz, gfp_mask);
+ return page_frag_alloc(&nc->page, fragsz, gfp_mask);
}
void *napi_alloc_frag(unsigned int fragsz)
@@ -441,7 +441,7 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
local_irq_save(flags);
nc = this_cpu_ptr(&netdev_alloc_cache);
- data = __alloc_page_frag(nc, len, gfp_mask);
+ data = page_frag_alloc(nc, len, gfp_mask);
pfmemalloc = nc->pfmemalloc;
local_irq_restore(flags);
@@ -505,7 +505,7 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
if (sk_memalloc_socks())
gfp_mask |= __GFP_MEMALLOC;
- data = __alloc_page_frag(&nc->page, len, gfp_mask);
+ data = page_frag_alloc(&nc->page, len, gfp_mask);
if (unlikely(!data))
return NULL;
@@ -1192,10 +1192,10 @@ EXPORT_SYMBOL(__pskb_copy_fclone);
int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
gfp_t gfp_mask)
{
- int i;
- u8 *data;
- int size = nhead + skb_end_offset(skb) + ntail;
+ int i, osize = skb_end_offset(skb);
+ int size = osize + nhead + ntail;
long off;
+ u8 *data;
BUG_ON(nhead < 0);
@@ -1257,6 +1257,14 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
skb->hdr_len = 0;
skb->nohdr = 0;
atomic_set(&skb_shinfo(skb)->dataref, 1);
+
+ /* It is not generally safe to change skb->truesize.
+ * For the moment, we really care of rx path, or
+ * when skb is orphaned (not attached to a socket).
+ */
+ if (!skb->sk || skb->destructor == sock_edemux)
+ skb->truesize += size - osize;
+
return 0;
nofrags:
diff --git a/net/core/sock.c b/net/core/sock.c
index 31f72f3..8b35debf 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -222,7 +222,7 @@ static const char *const af_family_key_strings[AF_MAX+1] = {
"sk_lock-AF_RXRPC" , "sk_lock-AF_ISDN" , "sk_lock-AF_PHONET" ,
"sk_lock-AF_IEEE802154", "sk_lock-AF_CAIF" , "sk_lock-AF_ALG" ,
"sk_lock-AF_NFC" , "sk_lock-AF_VSOCK" , "sk_lock-AF_KCM" ,
- "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
+ "sk_lock-AF_QIPCRTR", "sk_lock-AF_SMC" , "sk_lock-AF_MAX"
};
static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_UNSPEC", "slock-AF_UNIX" , "slock-AF_INET" ,
@@ -239,7 +239,7 @@ static const char *const af_family_slock_key_strings[AF_MAX+1] = {
"slock-AF_RXRPC" , "slock-AF_ISDN" , "slock-AF_PHONET" ,
"slock-AF_IEEE802154", "slock-AF_CAIF" , "slock-AF_ALG" ,
"slock-AF_NFC" , "slock-AF_VSOCK" ,"slock-AF_KCM" ,
- "slock-AF_SMC" , "slock-AF_MAX"
+ "slock-AF_QIPCRTR", "slock-AF_SMC" , "slock-AF_MAX"
};
static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_UNSPEC", "clock-AF_UNIX" , "clock-AF_INET" ,
@@ -256,7 +256,7 @@ static const char *const af_family_clock_key_strings[AF_MAX+1] = {
"clock-AF_RXRPC" , "clock-AF_ISDN" , "clock-AF_PHONET" ,
"clock-AF_IEEE802154", "clock-AF_CAIF" , "clock-AF_ALG" ,
"clock-AF_NFC" , "clock-AF_VSOCK" , "clock-AF_KCM" ,
- "closck-AF_smc" , "clock-AF_MAX"
+ "clock-AF_QIPCRTR", "clock-AF_SMC" , "clock-AF_MAX"
};
/*
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index d859a5c..b043ec8 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -904,7 +904,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv4_af_ops = {
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
- .bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index adfc790..cef60a4 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -227,7 +227,7 @@ static int dccp_v6_send_response(const struct sock *sk, struct request_sock *req
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, &fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -281,7 +281,7 @@ static void dccp_v6_ctl_send_reset(const struct sock *sk, struct sk_buff *rxskb)
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(skb, dst);
- ip6_xmit(ctl_sk, skb, &fl6, NULL, 0);
+ ip6_xmit(ctl_sk, skb, &fl6, 0, NULL, 0);
DCCP_INC_STATS(DCCP_MIB_OUTSEGS);
DCCP_INC_STATS(DCCP_MIB_OUTRSTS);
return;
@@ -937,7 +937,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_af_ops = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
- .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
@@ -958,7 +957,6 @@ static const struct inet_connection_sock_af_ops dccp_ipv6_mapped = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
- .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
diff --git a/net/dsa/Kconfig b/net/dsa/Kconfig
index 39bb5b3..9649238 100644
--- a/net/dsa/Kconfig
+++ b/net/dsa/Kconfig
@@ -15,17 +15,6 @@ config NET_DSA
if NET_DSA
-config NET_DSA_HWMON
- bool "Distributed Switch Architecture HWMON support"
- default y
- depends on HWMON && !(NET_DSA=y && HWMON=m)
- ---help---
- Say Y if you want to expose thermal sensor data on switches supported
- by the Distributed Switch Architecture.
-
- Some of those switches contain thermal sensors. This data is available
- via the hwmon sysfs interface and exposes the onboard sensors.
-
# tagging formats
config NET_DSA_TAG_BRCM
bool
diff --git a/net/dsa/Makefile b/net/dsa/Makefile
index 560b674..a3380ed 100644
--- a/net/dsa/Makefile
+++ b/net/dsa/Makefile
@@ -1,7 +1,6 @@
# the core
obj-$(CONFIG_NET_DSA) += dsa_core.o
dsa_core-y += dsa.o slave.o dsa2.o
-dsa_core-$(CONFIG_NET_DSA_HWMON) += hwmon.o
# tagging formats
dsa_core-$(CONFIG_NET_DSA_TAG_BRCM) += tag_brcm.o
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index fd53248..619e57a 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -110,8 +110,9 @@ dsa_switch_probe(struct device *parent, struct device *host_dev, int sw_addr,
/* basic switch operations **************************************************/
int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
- struct device_node *port_dn, int port)
+ struct dsa_port *dport, int port)
{
+ struct device_node *port_dn = dport->dn;
struct phy_device *phydev;
int ret, mode;
@@ -141,15 +142,15 @@ int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
static int dsa_cpu_dsa_setups(struct dsa_switch *ds, struct device *dev)
{
- struct device_node *port_dn;
+ struct dsa_port *dport;
int ret, port;
- for (port = 0; port < DSA_MAX_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
continue;
- port_dn = ds->ports[port].dn;
- ret = dsa_cpu_dsa_setup(ds, dev, port_dn, port);
+ dport = &ds->ports[port];
+ ret = dsa_cpu_dsa_setup(ds, dev, dport, port);
if (ret)
return ret;
}
@@ -217,7 +218,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
/*
* Validate supplied switch configuration.
*/
- for (i = 0; i < DSA_MAX_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
char *name;
name = cd->port_names[i];
@@ -225,12 +226,12 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
continue;
if (!strcmp(name, "cpu")) {
- if (dst->cpu_switch != -1) {
+ if (dst->cpu_switch) {
netdev_err(dst->master_netdev,
"multiple cpu ports?!\n");
return -EINVAL;
}
- dst->cpu_switch = index;
+ dst->cpu_switch = ds;
dst->cpu_port = i;
ds->cpu_port_mask |= 1 << i;
} else if (!strcmp(name, "dsa")) {
@@ -241,7 +242,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
valid_name_found = true;
}
- if (!valid_name_found && i == DSA_MAX_PORTS)
+ if (!valid_name_found && i == ds->num_ports)
return -EINVAL;
/* Make the built-in MII bus mask match the number of ports,
@@ -254,7 +255,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
* tagging protocol to the preferred tagging format of this
* switch.
*/
- if (dst->cpu_switch == index) {
+ if (dst->cpu_switch == ds) {
enum dsa_tag_protocol tag_protocol;
tag_protocol = ops->get_tag_protocol(ds);
@@ -294,7 +295,7 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
/*
* Create network devices for physical switch ports.
*/
- for (i = 0; i < DSA_MAX_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
ds->ports[i].dn = cd->port_dn[i];
if (!(ds->enabled_port_mask & (1 << i)))
@@ -316,8 +317,6 @@ static int dsa_switch_setup_one(struct dsa_switch *ds, struct device *parent)
if (ret)
return ret;
- dsa_hwmon_register(ds);
-
return 0;
}
@@ -348,8 +347,8 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
/*
* Allocate and initialise switch state.
*/
- ds = devm_kzalloc(parent, sizeof(*ds), GFP_KERNEL);
- if (ds == NULL)
+ ds = dsa_switch_alloc(parent, DSA_MAX_PORTS);
+ if (!ds)
return ERR_PTR(-ENOMEM);
ds->dst = dst;
@@ -357,7 +356,6 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
ds->cd = cd;
ds->ops = ops;
ds->priv = priv;
- ds->dev = parent;
ret = dsa_switch_setup_one(ds, parent);
if (ret)
@@ -366,8 +364,10 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
return ds;
}
-void dsa_cpu_dsa_destroy(struct device_node *port_dn)
+void dsa_cpu_dsa_destroy(struct dsa_port *port)
{
+ struct device_node *port_dn = port->dn;
+
if (of_phy_is_fixed_link(port_dn))
of_phy_deregister_fixed_link(port_dn);
}
@@ -376,10 +376,8 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
{
int port;
- dsa_hwmon_unregister(ds);
-
/* Destroy network devices for physical switch ports. */
- for (port = 0; port < DSA_MAX_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (!(ds->enabled_port_mask & (1 << port)))
continue;
@@ -390,10 +388,10 @@ static void dsa_switch_destroy(struct dsa_switch *ds)
}
/* Disable configuration of the CPU and DSA ports */
- for (port = 0; port < DSA_MAX_PORTS; port++) {
+ for (port = 0; port < ds->num_ports; port++) {
if (!(dsa_is_cpu_port(ds, port) || dsa_is_dsa_port(ds, port)))
continue;
- dsa_cpu_dsa_destroy(ds->ports[port].dn);
+ dsa_cpu_dsa_destroy(&ds->ports[port]);
/* Clearing a bit which is not set does no harm */
ds->cpu_port_mask |= ~(1 << port);
@@ -410,7 +408,7 @@ int dsa_switch_suspend(struct dsa_switch *ds)
int i, ret = 0;
/* Suspend slave network devices */
- for (i = 0; i < DSA_MAX_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
if (!dsa_is_port_initialized(ds, i))
continue;
@@ -437,7 +435,7 @@ int dsa_switch_resume(struct dsa_switch *ds)
return ret;
/* Resume slave network devices */
- for (i = 0; i < DSA_MAX_PORTS; i++) {
+ for (i = 0; i < ds->num_ports; i++) {
if (!dsa_is_port_initialized(ds, i))
continue;
@@ -757,7 +755,6 @@ static int dsa_setup_dst(struct dsa_switch_tree *dst, struct net_device *dev,
dst->pd = pd;
dst->master_netdev = dev;
- dst->cpu_switch = -1;
dst->cpu_port = -1;
for (i = 0; i < pd->nr_chips; i++) {
@@ -869,7 +866,7 @@ static void dsa_remove_dst(struct dsa_switch_tree *dst)
dsa_switch_destroy(ds);
}
- dsa_cpu_port_ethtool_restore(dst->ds[0]);
+ dsa_cpu_port_ethtool_restore(dst->cpu_switch);
dev_put(dst->master_netdev);
}
diff --git a/net/dsa/dsa2.c b/net/dsa/dsa2.c
index bad119c..9f8cc26 100644
--- a/net/dsa/dsa2.c
+++ b/net/dsa/dsa2.c
@@ -57,7 +57,6 @@ static struct dsa_switch_tree *dsa_add_dst(u32 tree)
if (!dst)
return NULL;
dst->tree = tree;
- dst->cpu_switch = -1;
INIT_LIST_HEAD(&dst->list);
list_add_tail(&dsa_switch_trees, &dst->list);
kref_init(&dst->refcount);
@@ -79,47 +78,34 @@ static void dsa_dst_del_ds(struct dsa_switch_tree *dst,
kref_put(&dst->refcount, dsa_free_dst);
}
-static bool dsa_port_is_dsa(struct device_node *port)
+static bool dsa_port_is_valid(struct dsa_port *port)
{
- const char *name;
-
- name = of_get_property(port, "label", NULL);
- if (!name)
- return false;
-
- if (!strcmp(name, "dsa"))
- return true;
-
- return false;
+ return !!port->dn;
}
-static bool dsa_port_is_cpu(struct device_node *port)
+static bool dsa_port_is_dsa(struct dsa_port *port)
{
- const char *name;
-
- name = of_get_property(port, "label", NULL);
- if (!name)
- return false;
-
- if (!strcmp(name, "cpu"))
- return true;
+ return !!of_parse_phandle(port->dn, "link", 0);
+}
- return false;
+static bool dsa_port_is_cpu(struct dsa_port *port)
+{
+ return !!of_parse_phandle(port->dn, "ethernet", 0);
}
-static bool dsa_ds_find_port(struct dsa_switch *ds,
- struct device_node *port)
+static bool dsa_ds_find_port_dn(struct dsa_switch *ds,
+ struct device_node *port)
{
u32 index;
- for (index = 0; index < DSA_MAX_PORTS; index++)
+ for (index = 0; index < ds->num_ports; index++)
if (ds->ports[index].dn == port)
return true;
return false;
}
-static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
- struct device_node *port)
+static struct dsa_switch *dsa_dst_find_port_dn(struct dsa_switch_tree *dst,
+ struct device_node *port)
{
struct dsa_switch *ds;
u32 index;
@@ -129,7 +115,7 @@ static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
if (!ds)
continue;
- if (dsa_ds_find_port(ds, port))
+ if (dsa_ds_find_port_dn(ds, port))
return ds;
}
@@ -138,7 +124,7 @@ static struct dsa_switch *dsa_dst_find_port(struct dsa_switch_tree *dst,
static int dsa_port_complete(struct dsa_switch_tree *dst,
struct dsa_switch *src_ds,
- struct device_node *port,
+ struct dsa_port *port,
u32 src_port)
{
struct device_node *link;
@@ -146,11 +132,11 @@ static int dsa_port_complete(struct dsa_switch_tree *dst,
struct dsa_switch *dst_ds;
for (index = 0;; index++) {
- link = of_parse_phandle(port, "link", index);
+ link = of_parse_phandle(port->dn, "link", index);
if (!link)
break;
- dst_ds = dsa_dst_find_port(dst, link);
+ dst_ds = dsa_dst_find_port_dn(dst, link);
of_node_put(link);
if (!dst_ds)
@@ -169,13 +155,13 @@ static int dsa_port_complete(struct dsa_switch_tree *dst,
*/
static int dsa_ds_complete(struct dsa_switch_tree *dst, struct dsa_switch *ds)
{
- struct device_node *port;
+ struct dsa_port *port;
u32 index;
int err;
- for (index = 0; index < DSA_MAX_PORTS; index++) {
- port = ds->ports[index].dn;
- if (!port)
+ for (index = 0; index < ds->num_ports; index++) {
+ port = &ds->ports[index];
+ if (!dsa_port_is_valid(port))
continue;
if (!dsa_port_is_dsa(port))
@@ -215,7 +201,7 @@ static int dsa_dst_complete(struct dsa_switch_tree *dst)
return 0;
}
-static int dsa_dsa_port_apply(struct device_node *port, u32 index,
+static int dsa_dsa_port_apply(struct dsa_port *port, u32 index,
struct dsa_switch *ds)
{
int err;
@@ -230,13 +216,13 @@ static int dsa_dsa_port_apply(struct device_node *port, u32 index,
return 0;
}
-static void dsa_dsa_port_unapply(struct device_node *port, u32 index,
+static void dsa_dsa_port_unapply(struct dsa_port *port, u32 index,
struct dsa_switch *ds)
{
dsa_cpu_dsa_destroy(port);
}
-static int dsa_cpu_port_apply(struct device_node *port, u32 index,
+static int dsa_cpu_port_apply(struct dsa_port *port, u32 index,
struct dsa_switch *ds)
{
int err;
@@ -253,7 +239,7 @@ static int dsa_cpu_port_apply(struct device_node *port, u32 index,
return 0;
}
-static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
+static void dsa_cpu_port_unapply(struct dsa_port *port, u32 index,
struct dsa_switch *ds)
{
dsa_cpu_dsa_destroy(port);
@@ -261,13 +247,15 @@ static void dsa_cpu_port_unapply(struct device_node *port, u32 index,
}
-static int dsa_user_port_apply(struct device_node *port, u32 index,
+static int dsa_user_port_apply(struct dsa_port *port, u32 index,
struct dsa_switch *ds)
{
const char *name;
int err;
- name = of_get_property(port, "label", NULL);
+ name = of_get_property(port->dn, "label", NULL);
+ if (!name)
+ name = "eth%d";
err = dsa_slave_create(ds, ds->dev, index, name);
if (err) {
@@ -279,7 +267,7 @@ static int dsa_user_port_apply(struct device_node *port, u32 index,
return 0;
}
-static void dsa_user_port_unapply(struct device_node *port, u32 index,
+static void dsa_user_port_unapply(struct dsa_port *port, u32 index,
struct dsa_switch *ds)
{
if (ds->ports[index].netdev) {
@@ -291,7 +279,7 @@ static void dsa_user_port_unapply(struct device_node *port, u32 index,
static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
{
- struct device_node *port;
+ struct dsa_port *port;
u32 index;
int err;
@@ -324,9 +312,9 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
return err;
}
- for (index = 0; index < DSA_MAX_PORTS; index++) {
- port = ds->ports[index].dn;
- if (!port)
+ for (index = 0; index < ds->num_ports; index++) {
+ port = &ds->ports[index];
+ if (!dsa_port_is_valid(port))
continue;
if (dsa_port_is_dsa(port)) {
@@ -353,12 +341,12 @@ static int dsa_ds_apply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
static void dsa_ds_unapply(struct dsa_switch_tree *dst, struct dsa_switch *ds)
{
- struct device_node *port;
+ struct dsa_port *port;
u32 index;
- for (index = 0; index < DSA_MAX_PORTS; index++) {
- port = ds->ports[index].dn;
- if (!port)
+ for (index = 0; index < ds->num_ports; index++) {
+ port = &ds->ports[index];
+ if (!dsa_port_is_valid(port))
continue;
if (dsa_port_is_dsa(port)) {
@@ -394,9 +382,11 @@ static int dsa_dst_apply(struct dsa_switch_tree *dst)
return err;
}
- err = dsa_cpu_port_ethtool_setup(dst->ds[0]);
- if (err)
- return err;
+ if (dst->cpu_switch) {
+ err = dsa_cpu_port_ethtool_setup(dst->cpu_switch);
+ if (err)
+ return err;
+ }
/* If we use a tagging format that doesn't have an ethertype
* field, make sure that all packets from this point on get
@@ -433,13 +423,14 @@ static void dsa_dst_unapply(struct dsa_switch_tree *dst)
dsa_ds_unapply(dst, ds);
}
- dsa_cpu_port_ethtool_restore(dst->ds[0]);
+ if (dst->cpu_switch)
+ dsa_cpu_port_ethtool_restore(dst->cpu_switch);
pr_info("DSA: tree %d unapplied\n", dst->tree);
dst->applied = false;
}
-static int dsa_cpu_parse(struct device_node *port, u32 index,
+static int dsa_cpu_parse(struct dsa_port *port, u32 index,
struct dsa_switch_tree *dst,
struct dsa_switch *ds)
{
@@ -447,7 +438,7 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
struct net_device *ethernet_dev;
struct device_node *ethernet;
- ethernet = of_parse_phandle(port, "ethernet", 0);
+ ethernet = of_parse_phandle(port->dn, "ethernet", 0);
if (!ethernet)
return -EINVAL;
@@ -461,8 +452,8 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
if (!dst->master_netdev)
dst->master_netdev = ethernet_dev;
- if (dst->cpu_switch == -1) {
- dst->cpu_switch = ds->index;
+ if (!dst->cpu_switch) {
+ dst->cpu_switch = ds;
dst->cpu_port = index;
}
@@ -480,13 +471,13 @@ static int dsa_cpu_parse(struct device_node *port, u32 index,
static int dsa_ds_parse(struct dsa_switch_tree *dst, struct dsa_switch *ds)
{
- struct device_node *port;
+ struct dsa_port *port;
u32 index;
int err;
- for (index = 0; index < DSA_MAX_PORTS; index++) {
- port = ds->ports[index].dn;
- if (!port)
+ for (index = 0; index < ds->num_ports; index++) {
+ port = &ds->ports[index];
+ if (!dsa_port_is_valid(port))
continue;
if (dsa_port_is_cpu(port)) {
@@ -538,7 +529,7 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
if (err)
return err;
- if (reg >= DSA_MAX_PORTS)
+ if (reg >= ds->num_ports)
return -EINVAL;
ds->ports[reg].dn = port;
@@ -547,14 +538,14 @@ static int dsa_parse_ports_dn(struct device_node *ports, struct dsa_switch *ds)
* to have access to a correct value, just like what
* net/dsa/dsa.c::dsa_switch_setup_one does.
*/
- if (!dsa_port_is_cpu(port))
+ if (!dsa_port_is_cpu(&ds->ports[reg]))
ds->enabled_port_mask |= 1 << reg;
}
return 0;
}
-static int dsa_parse_member(struct device_node *np, u32 *tree, u32 *index)
+static int dsa_parse_member_dn(struct device_node *np, u32 *tree, u32 *index)
{
int err;
@@ -592,17 +583,19 @@ static struct device_node *dsa_get_ports(struct dsa_switch *ds,
return ports;
}
-static int _dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
+static int _dsa_register_switch(struct dsa_switch *ds, struct device *dev)
{
- struct device_node *ports = dsa_get_ports(ds, np);
+ struct device_node *np = dev->of_node;
struct dsa_switch_tree *dst;
+ struct device_node *ports;
u32 tree, index;
int i, err;
- err = dsa_parse_member(np, &tree, &index);
+ err = dsa_parse_member_dn(np, &tree, &index);
if (err)
return err;
+ ports = dsa_get_ports(ds, np);
if (IS_ERR(ports))
return PTR_ERR(ports);
@@ -673,12 +666,34 @@ out:
return err;
}
-int dsa_register_switch(struct dsa_switch *ds, struct device_node *np)
+struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n)
+{
+ size_t size = sizeof(struct dsa_switch) + n * sizeof(struct dsa_port);
+ struct dsa_switch *ds;
+ int i;
+
+ ds = devm_kzalloc(dev, size, GFP_KERNEL);
+ if (!ds)
+ return NULL;
+
+ ds->dev = dev;
+ ds->num_ports = n;
+
+ for (i = 0; i < ds->num_ports; ++i) {
+ ds->ports[i].index = i;
+ ds->ports[i].ds = ds;
+ }
+
+ return ds;
+}
+EXPORT_SYMBOL_GPL(dsa_switch_alloc);
+
+int dsa_register_switch(struct dsa_switch *ds, struct device *dev)
{
int err;
mutex_lock(&dsa2_mutex);
- err = _dsa_register_switch(ds, np);
+ err = _dsa_register_switch(ds, dev);
mutex_unlock(&dsa2_mutex);
return err;
diff --git a/net/dsa/dsa_priv.h b/net/dsa/dsa_priv.h
index 7e3385e..a5509b7 100644
--- a/net/dsa/dsa_priv.h
+++ b/net/dsa/dsa_priv.h
@@ -25,12 +25,8 @@ struct dsa_slave_priv {
struct sk_buff * (*xmit)(struct sk_buff *skb,
struct net_device *dev);
- /*
- * Which switch this port is a part of, and the port index
- * for this port.
- */
- struct dsa_switch *parent;
- u8 port;
+ /* DSA port data, such as switch, port index, etc. */
+ struct dsa_port *dp;
/*
* The phylib phy_device pointer for the PHY connected
@@ -42,29 +38,22 @@ struct dsa_slave_priv {
int old_pause;
int old_duplex;
- struct net_device *bridge_dev;
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *netpoll;
#endif
+
+ /* TC context */
+ struct list_head mall_tc_list;
};
/* dsa.c */
int dsa_cpu_dsa_setup(struct dsa_switch *ds, struct device *dev,
- struct device_node *port_dn, int port);
-void dsa_cpu_dsa_destroy(struct device_node *port_dn);
+ struct dsa_port *dport, int port);
+void dsa_cpu_dsa_destroy(struct dsa_port *dport);
const struct dsa_device_ops *dsa_resolve_tag_protocol(int tag_protocol);
int dsa_cpu_port_ethtool_setup(struct dsa_switch *ds);
void dsa_cpu_port_ethtool_restore(struct dsa_switch *ds);
-/* hwmon.c */
-#ifdef CONFIG_NET_DSA_HWMON
-void dsa_hwmon_register(struct dsa_switch *ds);
-void dsa_hwmon_unregister(struct dsa_switch *ds);
-#else
-static inline void dsa_hwmon_register(struct dsa_switch *ds) { }
-static inline void dsa_hwmon_unregister(struct dsa_switch *ds) { }
-#endif
-
/* slave.c */
extern const struct dsa_device_ops notag_netdev_ops;
void dsa_slave_mii_bus_init(struct dsa_switch *ds);
diff --git a/net/dsa/hwmon.c b/net/dsa/hwmon.c
deleted file mode 100644
index 08831a8..0000000
--- a/net/dsa/hwmon.c
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * net/dsa/hwmon.c - HWMON subsystem support
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#include <linux/ctype.h>
-#include <linux/hwmon.h>
-#include <net/dsa.h>
-
-#include "dsa_priv.h"
-
-static ssize_t temp1_input_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dsa_switch *ds = dev_get_drvdata(dev);
- int temp, ret;
-
- ret = ds->ops->get_temp(ds, &temp);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", temp * 1000);
-}
-static DEVICE_ATTR_RO(temp1_input);
-
-static ssize_t temp1_max_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dsa_switch *ds = dev_get_drvdata(dev);
- int temp, ret;
-
- ret = ds->ops->get_temp_limit(ds, &temp);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", temp * 1000);
-}
-
-static ssize_t temp1_max_store(struct device *dev,
- struct device_attribute *attr, const char *buf,
- size_t count)
-{
- struct dsa_switch *ds = dev_get_drvdata(dev);
- int temp, ret;
-
- ret = kstrtoint(buf, 0, &temp);
- if (ret < 0)
- return ret;
-
- ret = ds->ops->set_temp_limit(ds, DIV_ROUND_CLOSEST(temp, 1000));
- if (ret < 0)
- return ret;
-
- return count;
-}
-static DEVICE_ATTR_RW(temp1_max);
-
-static ssize_t temp1_max_alarm_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct dsa_switch *ds = dev_get_drvdata(dev);
- bool alarm;
- int ret;
-
- ret = ds->ops->get_temp_alarm(ds, &alarm);
- if (ret < 0)
- return ret;
-
- return sprintf(buf, "%d\n", alarm);
-}
-static DEVICE_ATTR_RO(temp1_max_alarm);
-
-static struct attribute *dsa_hwmon_attrs[] = {
- &dev_attr_temp1_input.attr, /* 0 */
- &dev_attr_temp1_max.attr, /* 1 */
- &dev_attr_temp1_max_alarm.attr, /* 2 */
- NULL
-};
-
-static umode_t dsa_hwmon_attrs_visible(struct kobject *kobj,
- struct attribute *attr, int index)
-{
- struct device *dev = container_of(kobj, struct device, kobj);
- struct dsa_switch *ds = dev_get_drvdata(dev);
- const struct dsa_switch_ops *ops = ds->ops;
- umode_t mode = attr->mode;
-
- if (index == 1) {
- if (!ops->get_temp_limit)
- mode = 0;
- else if (!ops->set_temp_limit)
- mode &= ~S_IWUSR;
- } else if (index == 2 && !ops->get_temp_alarm) {
- mode = 0;
- }
- return mode;
-}
-
-static const struct attribute_group dsa_hwmon_group = {
- .attrs = dsa_hwmon_attrs,
- .is_visible = dsa_hwmon_attrs_visible,
-};
-__ATTRIBUTE_GROUPS(dsa_hwmon);
-
-void dsa_hwmon_register(struct dsa_switch *ds)
-{
- const char *netname = netdev_name(ds->dst->master_netdev);
- char hname[IFNAMSIZ + 1];
- int i, j;
-
- /* If the switch provides temperature accessors, register with hardware
- * monitoring subsystem. Treat registration error as non-fatal.
- */
- if (!ds->ops->get_temp)
- return;
-
- /* Create valid hwmon 'name' attribute */
- for (i = j = 0; i < IFNAMSIZ && netname[i]; i++) {
- if (isalnum(netname[i]))
- hname[j++] = netname[i];
- }
- hname[j] = '\0';
- scnprintf(ds->hwmon_name, sizeof(ds->hwmon_name), "%s_dsa%d", hname,
- ds->index);
- ds->hwmon_dev = hwmon_device_register_with_groups(NULL, ds->hwmon_name,
- ds, dsa_hwmon_groups);
- if (IS_ERR(ds->hwmon_dev)) {
- pr_warn("DSA: failed to register HWMON subsystem for switch %d\n",
- ds->index);
- ds->hwmon_dev = NULL;
- } else {
- pr_info("DSA: registered HWMON subsystem for switch %d\n",
- ds->index);
- }
-}
-
-void dsa_hwmon_unregister(struct dsa_switch *ds)
-{
- if (ds->hwmon_dev) {
- hwmon_device_unregister(ds->hwmon_dev);
- ds->hwmon_dev = NULL;
- }
-}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 5cd5b81..09fc3e9 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -16,12 +16,17 @@
#include <linux/of_net.h>
#include <linux/of_mdio.h>
#include <linux/mdio.h>
+#include <linux/list.h>
#include <net/rtnetlink.h>
#include <net/switchdev.h>
+#include <net/pkt_cls.h>
+#include <net/tc_act/tc_mirred.h>
#include <linux/if_bridge.h>
#include <linux/netpoll.h>
#include "dsa_priv.h"
+static bool dsa_slave_dev_check(struct net_device *dev);
+
/* slave mii_bus handling ***************************************************/
static int dsa_slave_phy_read(struct mii_bus *bus, int addr, int reg)
{
@@ -61,12 +66,12 @@ static int dsa_slave_get_iflink(const struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- return p->parent->dst->master_netdev->ifindex;
+ return p->dp->ds->dst->master_netdev->ifindex;
}
-static inline bool dsa_port_is_bridged(struct dsa_slave_priv *p)
+static inline bool dsa_port_is_bridged(struct dsa_port *dp)
{
- return !!p->bridge_dev;
+ return !!dp->bridge_dev;
}
static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state)
@@ -96,9 +101,9 @@ static void dsa_port_set_stp_state(struct dsa_switch *ds, int port, u8 state)
static int dsa_slave_open(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->parent->dst->master_netdev;
- struct dsa_switch *ds = p->parent;
- u8 stp_state = dsa_port_is_bridged(p) ?
+ struct net_device *master = p->dp->ds->dst->master_netdev;
+ struct dsa_switch *ds = p->dp->ds;
+ u8 stp_state = dsa_port_is_bridged(p->dp) ?
BR_STATE_BLOCKING : BR_STATE_FORWARDING;
int err;
@@ -123,12 +128,12 @@ static int dsa_slave_open(struct net_device *dev)
}
if (ds->ops->port_enable) {
- err = ds->ops->port_enable(ds, p->port, p->phy);
+ err = ds->ops->port_enable(ds, p->dp->index, p->phy);
if (err)
goto clear_promisc;
}
- dsa_port_set_stp_state(ds, p->port, stp_state);
+ dsa_port_set_stp_state(ds, p->dp->index, stp_state);
if (p->phy)
phy_start(p->phy);
@@ -151,8 +156,8 @@ out:
static int dsa_slave_close(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->parent->dst->master_netdev;
- struct dsa_switch *ds = p->parent;
+ struct net_device *master = p->dp->ds->dst->master_netdev;
+ struct dsa_switch *ds = p->dp->ds;
if (p->phy)
phy_stop(p->phy);
@@ -168,9 +173,9 @@ static int dsa_slave_close(struct net_device *dev)
dev_uc_del(master, dev->dev_addr);
if (ds->ops->port_disable)
- ds->ops->port_disable(ds, p->port, p->phy);
+ ds->ops->port_disable(ds, p->dp->index, p->phy);
- dsa_port_set_stp_state(ds, p->port, BR_STATE_DISABLED);
+ dsa_port_set_stp_state(ds, p->dp->index, BR_STATE_DISABLED);
return 0;
}
@@ -178,7 +183,7 @@ static int dsa_slave_close(struct net_device *dev)
static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->parent->dst->master_netdev;
+ struct net_device *master = p->dp->ds->dst->master_netdev;
if (change & IFF_ALLMULTI)
dev_set_allmulti(master, dev->flags & IFF_ALLMULTI ? 1 : -1);
@@ -189,7 +194,7 @@ static void dsa_slave_change_rx_flags(struct net_device *dev, int change)
static void dsa_slave_set_rx_mode(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->parent->dst->master_netdev;
+ struct net_device *master = p->dp->ds->dst->master_netdev;
dev_mc_sync(master, dev);
dev_uc_sync(master, dev);
@@ -198,7 +203,7 @@ static void dsa_slave_set_rx_mode(struct net_device *dev)
static int dsa_slave_set_mac_address(struct net_device *dev, void *a)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct net_device *master = p->parent->dst->master_netdev;
+ struct net_device *master = p->dp->ds->dst->master_netdev;
struct sockaddr *addr = a;
int err;
@@ -228,16 +233,17 @@ static int dsa_slave_port_vlan_add(struct net_device *dev,
struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_port *dp = p->dp;
+ struct dsa_switch *ds = dp->ds;
if (switchdev_trans_ph_prepare(trans)) {
if (!ds->ops->port_vlan_prepare || !ds->ops->port_vlan_add)
return -EOPNOTSUPP;
- return ds->ops->port_vlan_prepare(ds, p->port, vlan, trans);
+ return ds->ops->port_vlan_prepare(ds, dp->index, vlan, trans);
}
- ds->ops->port_vlan_add(ds, p->port, vlan, trans);
+ ds->ops->port_vlan_add(ds, dp->index, vlan, trans);
return 0;
}
@@ -246,12 +252,12 @@ static int dsa_slave_port_vlan_del(struct net_device *dev,
const struct switchdev_obj_port_vlan *vlan)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (!ds->ops->port_vlan_del)
return -EOPNOTSUPP;
- return ds->ops->port_vlan_del(ds, p->port, vlan);
+ return ds->ops->port_vlan_del(ds, p->dp->index, vlan);
}
static int dsa_slave_port_vlan_dump(struct net_device *dev,
@@ -259,10 +265,10 @@ static int dsa_slave_port_vlan_dump(struct net_device *dev,
switchdev_obj_dump_cb_t *cb)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->port_vlan_dump)
- return ds->ops->port_vlan_dump(ds, p->port, vlan, cb);
+ return ds->ops->port_vlan_dump(ds, p->dp->index, vlan, cb);
return -EOPNOTSUPP;
}
@@ -272,16 +278,16 @@ static int dsa_slave_port_fdb_add(struct net_device *dev,
struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (switchdev_trans_ph_prepare(trans)) {
if (!ds->ops->port_fdb_prepare || !ds->ops->port_fdb_add)
return -EOPNOTSUPP;
- return ds->ops->port_fdb_prepare(ds, p->port, fdb, trans);
+ return ds->ops->port_fdb_prepare(ds, p->dp->index, fdb, trans);
}
- ds->ops->port_fdb_add(ds, p->port, fdb, trans);
+ ds->ops->port_fdb_add(ds, p->dp->index, fdb, trans);
return 0;
}
@@ -290,11 +296,11 @@ static int dsa_slave_port_fdb_del(struct net_device *dev,
const struct switchdev_obj_port_fdb *fdb)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
int ret = -EOPNOTSUPP;
if (ds->ops->port_fdb_del)
- ret = ds->ops->port_fdb_del(ds, p->port, fdb);
+ ret = ds->ops->port_fdb_del(ds, p->dp->index, fdb);
return ret;
}
@@ -304,10 +310,10 @@ static int dsa_slave_port_fdb_dump(struct net_device *dev,
switchdev_obj_dump_cb_t *cb)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->port_fdb_dump)
- return ds->ops->port_fdb_dump(ds, p->port, fdb, cb);
+ return ds->ops->port_fdb_dump(ds, p->dp->index, fdb, cb);
return -EOPNOTSUPP;
}
@@ -317,16 +323,16 @@ static int dsa_slave_port_mdb_add(struct net_device *dev,
struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (switchdev_trans_ph_prepare(trans)) {
if (!ds->ops->port_mdb_prepare || !ds->ops->port_mdb_add)
return -EOPNOTSUPP;
- return ds->ops->port_mdb_prepare(ds, p->port, mdb, trans);
+ return ds->ops->port_mdb_prepare(ds, p->dp->index, mdb, trans);
}
- ds->ops->port_mdb_add(ds, p->port, mdb, trans);
+ ds->ops->port_mdb_add(ds, p->dp->index, mdb, trans);
return 0;
}
@@ -335,10 +341,10 @@ static int dsa_slave_port_mdb_del(struct net_device *dev,
const struct switchdev_obj_port_mdb *mdb)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->port_mdb_del)
- return ds->ops->port_mdb_del(ds, p->port, mdb);
+ return ds->ops->port_mdb_del(ds, p->dp->index, mdb);
return -EOPNOTSUPP;
}
@@ -348,10 +354,10 @@ static int dsa_slave_port_mdb_dump(struct net_device *dev,
switchdev_obj_dump_cb_t *cb)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->port_mdb_dump)
- return ds->ops->port_mdb_dump(ds, p->port, mdb, cb);
+ return ds->ops->port_mdb_dump(ds, p->dp->index, mdb, cb);
return -EOPNOTSUPP;
}
@@ -371,12 +377,12 @@ static int dsa_slave_stp_state_set(struct net_device *dev,
struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (switchdev_trans_ph_prepare(trans))
return ds->ops->port_stp_state_set ? 0 : -EOPNOTSUPP;
- dsa_port_set_stp_state(ds, p->port, attr->u.stp_state);
+ dsa_port_set_stp_state(ds, p->dp->index, attr->u.stp_state);
return 0;
}
@@ -386,14 +392,14 @@ static int dsa_slave_vlan_filtering(struct net_device *dev,
struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
/* bridge skips -EOPNOTSUPP, so skip the prepare phase */
if (switchdev_trans_ph_prepare(trans))
return 0;
if (ds->ops->port_vlan_filtering)
- return ds->ops->port_vlan_filtering(ds, p->port,
+ return ds->ops->port_vlan_filtering(ds, p->dp->index,
attr->u.vlan_filtering);
return 0;
@@ -404,7 +410,7 @@ static int dsa_fastest_ageing_time(struct dsa_switch *ds,
{
int i;
- for (i = 0; i < DSA_MAX_PORTS; ++i) {
+ for (i = 0; i < ds->num_ports; ++i) {
struct dsa_port *dp = &ds->ports[i];
if (dp && dp->ageing_time && dp->ageing_time < ageing_time)
@@ -419,7 +425,7 @@ static int dsa_slave_ageing_time(struct net_device *dev,
struct switchdev_trans *trans)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
unsigned long ageing_jiffies = clock_t_to_jiffies(attr->u.ageing_time);
unsigned int ageing_time = jiffies_to_msecs(ageing_jiffies);
@@ -428,7 +434,7 @@ static int dsa_slave_ageing_time(struct net_device *dev,
return 0;
/* Keep the fastest ageing time in case of multiple bridges */
- ds->ports[p->port].ageing_time = ageing_time;
+ p->dp->ageing_time = ageing_time;
ageing_time = dsa_fastest_ageing_time(ds, ageing_time);
if (ds->ops->set_ageing_time)
@@ -553,39 +559,39 @@ static int dsa_slave_bridge_port_join(struct net_device *dev,
struct net_device *br)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
int ret = -EOPNOTSUPP;
- p->bridge_dev = br;
+ p->dp->bridge_dev = br;
if (ds->ops->port_bridge_join)
- ret = ds->ops->port_bridge_join(ds, p->port, br);
+ ret = ds->ops->port_bridge_join(ds, p->dp->index, br);
return ret == -EOPNOTSUPP ? 0 : ret;
}
-static void dsa_slave_bridge_port_leave(struct net_device *dev)
+static void dsa_slave_bridge_port_leave(struct net_device *dev,
+ struct net_device *br)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
+ p->dp->bridge_dev = NULL;
if (ds->ops->port_bridge_leave)
- ds->ops->port_bridge_leave(ds, p->port);
-
- p->bridge_dev = NULL;
+ ds->ops->port_bridge_leave(ds, p->dp->index, br);
/* Port left the bridge, put in BR_STATE_DISABLED by the bridge layer,
* so allow it to be in BR_STATE_FORWARDING to be kept functional
*/
- dsa_port_set_stp_state(ds, p->port, BR_STATE_FORWARDING);
+ dsa_port_set_stp_state(ds, p->dp->index, BR_STATE_FORWARDING);
}
static int dsa_slave_port_attr_get(struct net_device *dev,
struct switchdev_attr *attr)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
switch (attr->id) {
case SWITCHDEV_ATTR_ID_PORT_PARENT_ID:
@@ -633,7 +639,7 @@ static netdev_tx_t dsa_slave_xmit(struct sk_buff *skb, struct net_device *dev)
/* Queue the SKB for transmission on the parent interface, but
* do not modify its EtherType
*/
- nskb->dev = p->parent->dst->master_netdev;
+ nskb->dev = p->dp->ds->dst->master_netdev;
dev_queue_xmit(nskb);
return NETDEV_TX_OK;
@@ -680,10 +686,10 @@ static void dsa_slave_get_drvinfo(struct net_device *dev,
static int dsa_slave_get_regs_len(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->get_regs_len)
- return ds->ops->get_regs_len(ds, p->port);
+ return ds->ops->get_regs_len(ds, p->dp->index);
return -EOPNOTSUPP;
}
@@ -692,10 +698,10 @@ static void
dsa_slave_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->get_regs)
- ds->ops->get_regs(ds, p->port, regs, _p);
+ ds->ops->get_regs(ds, p->dp->index, regs, _p);
}
static int dsa_slave_nway_reset(struct net_device *dev)
@@ -723,7 +729,7 @@ static u32 dsa_slave_get_link(struct net_device *dev)
static int dsa_slave_get_eeprom_len(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->cd && ds->cd->eeprom_len)
return ds->cd->eeprom_len;
@@ -738,7 +744,7 @@ static int dsa_slave_get_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->get_eeprom)
return ds->ops->get_eeprom(ds, eeprom, data);
@@ -750,7 +756,7 @@ static int dsa_slave_set_eeprom(struct net_device *dev,
struct ethtool_eeprom *eeprom, u8 *data)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->set_eeprom)
return ds->ops->set_eeprom(ds, eeprom, data);
@@ -762,7 +768,7 @@ static void dsa_slave_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (stringset == ETH_SS_STATS) {
int len = ETH_GSTRING_LEN;
@@ -772,7 +778,7 @@ static void dsa_slave_get_strings(struct net_device *dev,
strncpy(data + 2 * len, "rx_packets", len);
strncpy(data + 3 * len, "rx_bytes", len);
if (ds->ops->get_strings)
- ds->ops->get_strings(ds, p->port, data + 4 * len);
+ ds->ops->get_strings(ds, p->dp->index, data + 4 * len);
}
}
@@ -781,7 +787,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
uint64_t *data)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->ds[0];
+ struct dsa_switch *ds = dst->cpu_switch;
s8 cpu_port = dst->cpu_port;
int count = 0;
@@ -798,7 +804,7 @@ static void dsa_cpu_port_get_ethtool_stats(struct net_device *dev,
static int dsa_cpu_port_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->ds[0];
+ struct dsa_switch *ds = dst->cpu_switch;
int count = 0;
if (dst->master_ethtool_ops.get_sset_count)
@@ -814,7 +820,7 @@ static void dsa_cpu_port_get_strings(struct net_device *dev,
uint32_t stringset, uint8_t *data)
{
struct dsa_switch_tree *dst = dev->dsa_ptr;
- struct dsa_switch *ds = dst->ds[0];
+ struct dsa_switch *ds = dst->cpu_switch;
s8 cpu_port = dst->cpu_port;
int len = ETH_GSTRING_LEN;
int mcount = 0, count;
@@ -853,20 +859,20 @@ static void dsa_slave_get_ethtool_stats(struct net_device *dev,
uint64_t *data)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
data[0] = dev->stats.tx_packets;
data[1] = dev->stats.tx_bytes;
data[2] = dev->stats.rx_packets;
data[3] = dev->stats.rx_bytes;
if (ds->ops->get_ethtool_stats)
- ds->ops->get_ethtool_stats(ds, p->port, data + 4);
+ ds->ops->get_ethtool_stats(ds, p->dp->index, data + 4);
}
static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (sset == ETH_SS_STATS) {
int count;
@@ -884,20 +890,20 @@ static int dsa_slave_get_sset_count(struct net_device *dev, int sset)
static void dsa_slave_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
if (ds->ops->get_wol)
- ds->ops->get_wol(ds, p->port, w);
+ ds->ops->get_wol(ds, p->dp->index, w);
}
static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
int ret = -EOPNOTSUPP;
if (ds->ops->set_wol)
- ret = ds->ops->set_wol(ds, p->port, w);
+ ret = ds->ops->set_wol(ds, p->dp->index, w);
return ret;
}
@@ -905,13 +911,13 @@ static int dsa_slave_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
int ret;
if (!ds->ops->set_eee)
return -EOPNOTSUPP;
- ret = ds->ops->set_eee(ds, p->port, p->phy, e);
+ ret = ds->ops->set_eee(ds, p->dp->index, p->phy, e);
if (ret)
return ret;
@@ -924,13 +930,13 @@ static int dsa_slave_set_eee(struct net_device *dev, struct ethtool_eee *e)
static int dsa_slave_get_eee(struct net_device *dev, struct ethtool_eee *e)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
int ret;
if (!ds->ops->get_eee)
return -EOPNOTSUPP;
- ret = ds->ops->get_eee(ds, p->port, e);
+ ret = ds->ops->get_eee(ds, p->dp->index, e);
if (ret)
return ret;
@@ -945,7 +951,7 @@ static int dsa_slave_netpoll_setup(struct net_device *dev,
struct netpoll_info *ni)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
struct net_device *master = ds->dst->master_netdev;
struct netpoll *netpoll;
int err = 0;
@@ -983,17 +989,144 @@ static void dsa_slave_poll_controller(struct net_device *dev)
}
#endif
-static int dsa_slave_get_phys_port_id(struct net_device *dev,
- struct netdev_phys_item_id *ppid)
+static int dsa_slave_get_phys_port_name(struct net_device *dev,
+ char *name, size_t len)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- ppid->id_len = sizeof(p->port);
- memcpy(ppid->id, &p->port, ppid->id_len);
+ if (snprintf(name, len, "p%d", p->dp->index) >= len)
+ return -EINVAL;
return 0;
}
+static struct dsa_mall_tc_entry *
+dsa_slave_mall_tc_entry_find(struct dsa_slave_priv *p,
+ unsigned long cookie)
+{
+ struct dsa_mall_tc_entry *mall_tc_entry;
+
+ list_for_each_entry(mall_tc_entry, &p->mall_tc_list, list)
+ if (mall_tc_entry->cookie == cookie)
+ return mall_tc_entry;
+
+ return NULL;
+}
+
+static int dsa_slave_add_cls_matchall(struct net_device *dev,
+ __be16 protocol,
+ struct tc_cls_matchall_offload *cls,
+ bool ingress)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_tc_entry *mall_tc_entry;
+ struct dsa_switch *ds = p->dp->ds;
+ struct net *net = dev_net(dev);
+ struct dsa_slave_priv *to_p;
+ struct net_device *to_dev;
+ const struct tc_action *a;
+ int err = -EOPNOTSUPP;
+ LIST_HEAD(actions);
+ int ifindex;
+
+ if (!ds->ops->port_mirror_add)
+ return err;
+
+ if (!tc_single_action(cls->exts))
+ return err;
+
+ tcf_exts_to_list(cls->exts, &actions);
+ a = list_first_entry(&actions, struct tc_action, list);
+
+ if (is_tcf_mirred_egress_mirror(a) && protocol == htons(ETH_P_ALL)) {
+ struct dsa_mall_mirror_tc_entry *mirror;
+
+ ifindex = tcf_mirred_ifindex(a);
+ to_dev = __dev_get_by_index(net, ifindex);
+ if (!to_dev)
+ return -EINVAL;
+
+ if (!dsa_slave_dev_check(to_dev))
+ return -EOPNOTSUPP;
+
+ mall_tc_entry = kzalloc(sizeof(*mall_tc_entry), GFP_KERNEL);
+ if (!mall_tc_entry)
+ return -ENOMEM;
+
+ mall_tc_entry->cookie = cls->cookie;
+ mall_tc_entry->type = DSA_PORT_MALL_MIRROR;
+ mirror = &mall_tc_entry->mirror;
+
+ to_p = netdev_priv(to_dev);
+
+ mirror->to_local_port = to_p->dp->index;
+ mirror->ingress = ingress;
+
+ err = ds->ops->port_mirror_add(ds, p->dp->index, mirror,
+ ingress);
+ if (err) {
+ kfree(mall_tc_entry);
+ return err;
+ }
+
+ list_add_tail(&mall_tc_entry->list, &p->mall_tc_list);
+ }
+
+ return 0;
+}
+
+static void dsa_slave_del_cls_matchall(struct net_device *dev,
+ struct tc_cls_matchall_offload *cls)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_mall_tc_entry *mall_tc_entry;
+ struct dsa_switch *ds = p->dp->ds;
+
+ if (!ds->ops->port_mirror_del)
+ return;
+
+ mall_tc_entry = dsa_slave_mall_tc_entry_find(p, cls->cookie);
+ if (!mall_tc_entry)
+ return;
+
+ list_del(&mall_tc_entry->list);
+
+ switch (mall_tc_entry->type) {
+ case DSA_PORT_MALL_MIRROR:
+ ds->ops->port_mirror_del(ds, p->dp->index,
+ &mall_tc_entry->mirror);
+ break;
+ default:
+ WARN_ON(1);
+ }
+
+ kfree(mall_tc_entry);
+}
+
+static int dsa_slave_setup_tc(struct net_device *dev, u32 handle,
+ __be16 protocol, struct tc_to_netdev *tc)
+{
+ bool ingress = TC_H_MAJ(handle) == TC_H_MAJ(TC_H_INGRESS);
+ int ret = -EOPNOTSUPP;
+
+ switch (tc->type) {
+ case TC_SETUP_MATCHALL:
+ switch (tc->cls_mall->command) {
+ case TC_CLSMATCHALL_REPLACE:
+ return dsa_slave_add_cls_matchall(dev, protocol,
+ tc->cls_mall,
+ ingress);
+ case TC_CLSMATCHALL_DESTROY:
+ dsa_slave_del_cls_matchall(dev, tc->cls_mall);
+ return 0;
+ }
+ default:
+ break;
+ }
+
+ return ret;
+}
+
void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
{
ops->get_sset_count = dsa_cpu_port_get_sset_count;
@@ -1001,6 +1134,30 @@ void dsa_cpu_port_ethtool_init(struct ethtool_ops *ops)
ops->get_strings = dsa_cpu_port_get_strings;
}
+static int dsa_slave_get_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc, u32 *rule_locs)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->dp->ds;
+
+ if (!ds->ops->get_rxnfc)
+ return -EOPNOTSUPP;
+
+ return ds->ops->get_rxnfc(ds, p->dp->index, nfc, rule_locs);
+}
+
+static int dsa_slave_set_rxnfc(struct net_device *dev,
+ struct ethtool_rxnfc *nfc)
+{
+ struct dsa_slave_priv *p = netdev_priv(dev);
+ struct dsa_switch *ds = p->dp->ds;
+
+ if (!ds->ops->set_rxnfc)
+ return -EOPNOTSUPP;
+
+ return ds->ops->set_rxnfc(ds, p->dp->index, nfc);
+}
+
static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_drvinfo = dsa_slave_get_drvinfo,
.get_regs_len = dsa_slave_get_regs_len,
@@ -1019,6 +1176,8 @@ static const struct ethtool_ops dsa_slave_ethtool_ops = {
.get_eee = dsa_slave_get_eee,
.get_link_ksettings = dsa_slave_get_link_ksettings,
.set_link_ksettings = dsa_slave_set_link_ksettings,
+ .get_rxnfc = dsa_slave_get_rxnfc,
+ .set_rxnfc = dsa_slave_set_rxnfc,
};
static const struct net_device_ops dsa_slave_netdev_ops = {
@@ -1041,7 +1200,8 @@ static const struct net_device_ops dsa_slave_netdev_ops = {
.ndo_bridge_getlink = switchdev_port_bridge_getlink,
.ndo_bridge_setlink = switchdev_port_bridge_setlink,
.ndo_bridge_dellink = switchdev_port_bridge_dellink,
- .ndo_get_phys_port_id = dsa_slave_get_phys_port_id,
+ .ndo_get_phys_port_name = dsa_slave_get_phys_port_name,
+ .ndo_setup_tc = dsa_slave_setup_tc,
};
static const struct switchdev_ops dsa_slave_switchdev_ops = {
@@ -1059,7 +1219,7 @@ static struct device_type dsa_type = {
static void dsa_slave_adjust_link(struct net_device *dev)
{
struct dsa_slave_priv *p = netdev_priv(dev);
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
unsigned int status_changed = 0;
if (p->old_link != p->phy->link) {
@@ -1078,7 +1238,7 @@ static void dsa_slave_adjust_link(struct net_device *dev)
}
if (ds->ops->adjust_link && status_changed)
- ds->ops->adjust_link(ds, p->port, p->phy);
+ ds->ops->adjust_link(ds, p->dp->index, p->phy);
if (status_changed)
phy_print_status(p->phy);
@@ -1092,9 +1252,9 @@ static int dsa_slave_fixed_link_update(struct net_device *dev,
if (dev) {
p = netdev_priv(dev);
- ds = p->parent;
+ ds = p->dp->ds;
if (ds->ops->fixed_link_update)
- ds->ops->fixed_link_update(ds, p->port, status);
+ ds->ops->fixed_link_update(ds, p->dp->index, status);
}
return 0;
@@ -1105,7 +1265,7 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
struct net_device *slave_dev,
int addr)
{
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
p->phy = mdiobus_get_phy(ds->slave_mii_bus, addr);
if (!p->phy) {
@@ -1116,22 +1276,20 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
/* Use already configured phy mode */
if (p->phy_interface == PHY_INTERFACE_MODE_NA)
p->phy_interface = p->phy->interface;
- phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
- p->phy_interface);
-
- return 0;
+ return phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
+ p->phy_interface);
}
static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
struct net_device *slave_dev)
{
- struct dsa_switch *ds = p->parent;
+ struct dsa_switch *ds = p->dp->ds;
struct device_node *phy_dn, *port_dn;
bool phy_is_fixed = false;
u32 phy_flags = 0;
int mode, ret;
- port_dn = ds->ports[p->port].dn;
+ port_dn = p->dp->dn;
mode = of_get_phy_mode(port_dn);
if (mode < 0)
mode = PHY_INTERFACE_MODE_NA;
@@ -1152,7 +1310,7 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
}
if (ds->ops->get_phy_flags)
- phy_flags = ds->ops->get_phy_flags(ds, p->port);
+ phy_flags = ds->ops->get_phy_flags(ds, p->dp->index);
if (phy_dn) {
int phy_id = of_mdio_parse_addr(&slave_dev->dev, phy_dn);
@@ -1187,9 +1345,10 @@ static int dsa_slave_phy_setup(struct dsa_slave_priv *p,
* MDIO bus instead
*/
if (!p->phy) {
- ret = dsa_slave_phy_connect(p, slave_dev, p->port);
+ ret = dsa_slave_phy_connect(p, slave_dev, p->dp->index);
if (ret) {
- netdev_err(slave_dev, "failed to connect to port %d: %d\n", p->port, ret);
+ netdev_err(slave_dev, "failed to connect to port %d: %d\n",
+ p->dp->index, ret);
if (phy_is_fixed)
of_phy_deregister_fixed_link(port_dn);
return ret;
@@ -1214,6 +1373,8 @@ int dsa_slave_suspend(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
+ netif_device_detach(slave_dev);
+
if (p->phy) {
phy_stop(p->phy);
p->old_pause = -1;
@@ -1257,7 +1418,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
if (slave_dev == NULL)
return -ENOMEM;
- slave_dev->features = master->vlan_features;
+ slave_dev->features = master->vlan_features | NETIF_F_HW_TC;
+ slave_dev->hw_features |= NETIF_F_HW_TC;
slave_dev->ethtool_ops = &dsa_slave_ethtool_ops;
eth_hw_addr_inherit(slave_dev, master);
slave_dev->priv_flags |= IFF_NO_QUEUE;
@@ -1275,8 +1437,8 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
slave_dev->vlan_features = master->vlan_features;
p = netdev_priv(slave_dev);
- p->parent = ds;
- p->port = port;
+ p->dp = &ds->ports[port];
+ INIT_LIST_HEAD(&p->mall_tc_list);
p->xmit = dst->tag_ops->xmit;
p->old_pause = -1;
@@ -1309,10 +1471,9 @@ int dsa_slave_create(struct dsa_switch *ds, struct device *parent,
void dsa_slave_destroy(struct net_device *slave_dev)
{
struct dsa_slave_priv *p = netdev_priv(slave_dev);
- struct dsa_switch *ds = p->parent;
struct device_node *port_dn;
- port_dn = ds->ports[p->port].dn;
+ port_dn = p->dp->dn;
netif_carrier_off(slave_dev);
if (p->phy) {
@@ -1343,7 +1504,7 @@ static int dsa_slave_port_upper_event(struct net_device *dev,
if (info->linking)
err = dsa_slave_bridge_port_join(dev, upper);
else
- dsa_slave_bridge_port_leave(dev);
+ dsa_slave_bridge_port_leave(dev, upper);
}
break;
diff --git a/net/dsa/tag_brcm.c b/net/dsa/tag_brcm.c
index 21bffde..5d925b6 100644
--- a/net/dsa/tag_brcm.c
+++ b/net/dsa/tag_brcm.c
@@ -80,9 +80,9 @@ static struct sk_buff *brcm_tag_xmit(struct sk_buff *skb, struct net_device *dev
((skb->priority << BRCM_IG_TC_SHIFT) & BRCM_IG_TC_MASK);
brcm_tag[1] = 0;
brcm_tag[2] = 0;
- if (p->port == 8)
+ if (p->dp->index == 8)
brcm_tag[2] = BRCM_IG_DSTMAP2_MASK;
- brcm_tag[3] = (1 << p->port) & BRCM_IG_DSTMAP1_MASK;
+ brcm_tag[3] = (1 << p->dp->index) & BRCM_IG_DSTMAP1_MASK;
return skb;
@@ -102,7 +102,7 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
if (unlikely(dst == NULL))
goto out_drop;
- ds = dst->ds[0];
+ ds = dst->cpu_switch;
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL)
@@ -121,13 +121,14 @@ static int brcm_tag_rcv(struct sk_buff *skb, struct net_device *dev,
/* We should never see a reserved reason code without knowing how to
* handle it
*/
- WARN_ON(brcm_tag[2] & BRCM_EG_RC_RSVD);
+ if (unlikely(brcm_tag[2] & BRCM_EG_RC_RSVD))
+ goto out_drop;
/* Locate which port this is coming from */
source_port = brcm_tag[3] & BRCM_EG_PID_MASK;
/* Validate port against switch setup, either the port is totally */
- if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+ if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
goto out_drop;
/* Remove Broadcom tag and update checksum */
diff --git a/net/dsa/tag_dsa.c b/net/dsa/tag_dsa.c
index bce79ffe..72579ce 100644
--- a/net/dsa/tag_dsa.c
+++ b/net/dsa/tag_dsa.c
@@ -33,8 +33,8 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
* Construct tagged FROM_CPU DSA tag from 802.1q tag.
*/
dsa_header = skb->data + 2 * ETH_ALEN;
- dsa_header[0] = 0x60 | p->parent->index;
- dsa_header[1] = p->port << 3;
+ dsa_header[0] = 0x60 | p->dp->ds->index;
+ dsa_header[1] = p->dp->index << 3;
/*
* Move CFI field from byte 2 to byte 1.
@@ -54,8 +54,8 @@ static struct sk_buff *dsa_xmit(struct sk_buff *skb, struct net_device *dev)
* Construct untagged FROM_CPU DSA tag.
*/
dsa_header = skb->data + 2 * ETH_ALEN;
- dsa_header[0] = 0x40 | p->parent->index;
- dsa_header[1] = p->port << 3;
+ dsa_header[0] = 0x40 | p->dp->ds->index;
+ dsa_header[1] = p->dp->index << 3;
dsa_header[2] = 0x00;
dsa_header[3] = 0x00;
}
@@ -114,7 +114,7 @@ static int dsa_rcv(struct sk_buff *skb, struct net_device *dev,
if (!ds)
goto out_drop;
- if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+ if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
goto out_drop;
/*
diff --git a/net/dsa/tag_edsa.c b/net/dsa/tag_edsa.c
index 6c1720e..648c051 100644
--- a/net/dsa/tag_edsa.c
+++ b/net/dsa/tag_edsa.c
@@ -42,8 +42,8 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
edsa_header[1] = ETH_P_EDSA & 0xff;
edsa_header[2] = 0x00;
edsa_header[3] = 0x00;
- edsa_header[4] = 0x60 | p->parent->index;
- edsa_header[5] = p->port << 3;
+ edsa_header[4] = 0x60 | p->dp->ds->index;
+ edsa_header[5] = p->dp->index << 3;
/*
* Move CFI field from byte 6 to byte 5.
@@ -67,8 +67,8 @@ static struct sk_buff *edsa_xmit(struct sk_buff *skb, struct net_device *dev)
edsa_header[1] = ETH_P_EDSA & 0xff;
edsa_header[2] = 0x00;
edsa_header[3] = 0x00;
- edsa_header[4] = 0x40 | p->parent->index;
- edsa_header[5] = p->port << 3;
+ edsa_header[4] = 0x40 | p->dp->ds->index;
+ edsa_header[5] = p->dp->index << 3;
edsa_header[6] = 0x00;
edsa_header[7] = 0x00;
}
@@ -127,7 +127,7 @@ static int edsa_rcv(struct sk_buff *skb, struct net_device *dev,
if (!ds)
goto out_drop;
- if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+ if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
goto out_drop;
/*
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index 0c90cac..30240f3 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -54,7 +54,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
/* Set the version field, and set destination port information */
hdr = QCA_HDR_VERSION << QCA_HDR_XMIT_VERSION_S |
QCA_HDR_XMIT_FROM_CPU |
- BIT(p->port);
+ BIT(p->dp->index);
*phdr = htons(hdr);
@@ -104,7 +104,7 @@ static int qca_tag_rcv(struct sk_buff *skb, struct net_device *dev,
/* This protocol doesn't support cascading multiple switches so it's
* safe to assume the switch is first in the tree
*/
- ds = dst->ds[0];
+ ds = dst->cpu_switch;
if (!ds)
goto out_drop;
diff --git a/net/dsa/tag_trailer.c b/net/dsa/tag_trailer.c
index 5e3903e..26f9771 100644
--- a/net/dsa/tag_trailer.c
+++ b/net/dsa/tag_trailer.c
@@ -50,7 +50,7 @@ static struct sk_buff *trailer_xmit(struct sk_buff *skb, struct net_device *dev)
trailer = skb_put(nskb, 4);
trailer[0] = 0x80;
- trailer[1] = 1 << p->port;
+ trailer[1] = 1 << p->dp->index;
trailer[2] = 0x10;
trailer[3] = 0x00;
@@ -67,7 +67,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
if (unlikely(dst == NULL))
goto out_drop;
- ds = dst->ds[0];
+ ds = dst->cpu_switch;
skb = skb_unshare(skb, GFP_ATOMIC);
if (skb == NULL)
@@ -82,7 +82,7 @@ static int trailer_rcv(struct sk_buff *skb, struct net_device *dev,
goto out_drop;
source_port = trailer[1] & 7;
- if (source_port >= DSA_MAX_PORTS || !ds->ports[source_port].netdev)
+ if (source_port >= ds->num_ports || !ds->ports[source_port].netdev)
goto out_drop;
pskb_trim_rcsum(skb, skb->len - 4);
diff --git a/net/ethernet/eth.c b/net/ethernet/eth.c
index 8c5a479..efdaaab 100644
--- a/net/ethernet/eth.c
+++ b/net/ethernet/eth.c
@@ -392,6 +392,34 @@ struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
}
EXPORT_SYMBOL(alloc_etherdev_mqs);
+static void devm_free_netdev(struct device *dev, void *res)
+{
+ free_netdev(*(struct net_device **)res);
+}
+
+struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
+ unsigned int txqs, unsigned int rxqs)
+{
+ struct net_device **dr;
+ struct net_device *netdev;
+
+ dr = devres_alloc(devm_free_netdev, sizeof(*dr), GFP_KERNEL);
+ if (!dr)
+ return NULL;
+
+ netdev = alloc_etherdev_mqs(sizeof_priv, txqs, rxqs);
+ if (!netdev) {
+ devres_free(dr);
+ return NULL;
+ }
+
+ *dr = netdev;
+ devres_add(dev, dr);
+
+ return netdev;
+}
+EXPORT_SYMBOL(devm_alloc_etherdev_mqs);
+
ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len)
{
return scnprintf(buf, PAGE_SIZE, "%*phC\n", len, addr);
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index aae410b..685ba53 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -479,7 +479,7 @@ int inet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
snum = ntohs(addr->sin_port);
err = -EACCES;
- if (snum && snum < PROT_SOCK &&
+ if (snum && snum < inet_prot_sock(net) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
goto out;
@@ -570,19 +570,30 @@ static long inet_wait_for_connect(struct sock *sk, long timeo, int writebias)
* TCP 'magic' in here.
*/
int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
- int addr_len, int flags)
+ int addr_len, int flags, int is_sendmsg)
{
struct sock *sk = sock->sk;
int err;
long timeo;
- if (addr_len < sizeof(uaddr->sa_family))
- return -EINVAL;
+ /*
+ * uaddr can be NULL and addr_len can be 0 if:
+ * sk is a TCP fastopen active socket and
+ * TCP_FASTOPEN_CONNECT sockopt is set and
+ * we already have a valid cookie for this socket.
+ * In this case, user can call write() after connect().
+ * write() will invoke tcp_sendmsg_fastopen() which calls
+ * __inet_stream_connect().
+ */
+ if (uaddr) {
+ if (addr_len < sizeof(uaddr->sa_family))
+ return -EINVAL;
- if (uaddr->sa_family == AF_UNSPEC) {
- err = sk->sk_prot->disconnect(sk, flags);
- sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
- goto out;
+ if (uaddr->sa_family == AF_UNSPEC) {
+ err = sk->sk_prot->disconnect(sk, flags);
+ sock->state = err ? SS_DISCONNECTING : SS_UNCONNECTED;
+ goto out;
+ }
}
switch (sock->state) {
@@ -593,7 +604,10 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
err = -EISCONN;
goto out;
case SS_CONNECTING:
- err = -EALREADY;
+ if (inet_sk(sk)->defer_connect)
+ err = is_sendmsg ? -EINPROGRESS : -EISCONN;
+ else
+ err = -EALREADY;
/* Fall out of switch with err, set for this state */
break;
case SS_UNCONNECTED:
@@ -607,6 +621,9 @@ int __inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
sock->state = SS_CONNECTING;
+ if (!err && inet_sk(sk)->defer_connect)
+ goto out;
+
/* Just entered SS_CONNECTING state; the only
* difference is that return value in non-blocking
* case is EINPROGRESS, rather than EALREADY.
@@ -662,7 +679,7 @@ int inet_stream_connect(struct socket *sock, struct sockaddr *uaddr,
int err;
lock_sock(sock->sk);
- err = __inet_stream_connect(sock, uaddr, addr_len, flags);
+ err = __inet_stream_connect(sock, uaddr, addr_len, flags, 0);
release_sock(sock->sk);
return err;
}
@@ -1700,6 +1717,9 @@ static __net_init int inet_init_net(struct net *net)
net->ipv4.sysctl_ip_default_ttl = IPDEFTTL;
net->ipv4.sysctl_ip_dynaddr = 0;
net->ipv4.sysctl_ip_early_demux = 1;
+#ifdef CONFIG_SYSCTL
+ net->ipv4.sysctl_ip_prot_sock = PROT_SOCK;
+#endif
return 0;
}
diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
index f2a7102..22377c8 100644
--- a/net/ipv4/ah4.c
+++ b/net/ipv4/ah4.c
@@ -270,6 +270,9 @@ static void ah_input_done(struct crypto_async_request *base, int err)
int ihl = ip_hdrlen(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
+ if (err)
+ goto out;
+
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, ihl);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 20fb25e..b1e2444 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -18,6 +18,8 @@
#include <net/protocol.h>
#include <net/udp.h>
+#include <linux/highmem.h>
+
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
void *tmp;
@@ -92,11 +94,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+{
+ struct esp_output_extra *extra = esp_tmp_extra(tmp);
+ struct crypto_aead *aead = x->data;
+ int extralen = 0;
+ u8 *iv;
+ struct aead_request *req;
+ struct scatterlist *sg;
+
+ if (x->props.flags & XFRM_STATE_ESN)
+ extralen += sizeof(*extra);
+
+ extra = esp_tmp_extra(tmp);
+ iv = esp_tmp_iv(aead, tmp, extralen);
+ req = esp_tmp_req(aead, iv);
+
+ /* Unref skb_frag_pages in the src scatterlist if necessary.
+ * Skip the first sg which comes from skb->data.
+ */
+ if (req->src != req->dst)
+ for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+ put_page(sg_page(sg));
+}
+
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ void *tmp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
- kfree(ESP_SKB_CB(skb)->tmp);
+ tmp = ESP_SKB_CB(skb)->tmp;
+ esp_ssg_unref(x, tmp);
+ kfree(tmp);
xfrm_output_resume(skb, err);
}
@@ -120,6 +151,29 @@ static void esp_output_restore_header(struct sk_buff *skb)
sizeof(__be32));
}
+static struct ip_esp_hdr *esp_output_set_extra(struct sk_buff *skb,
+ struct ip_esp_hdr *esph,
+ struct esp_output_extra *extra)
+{
+ struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+ /* For ESN we move the header forward by 4 bytes to
+ * accomodate the high bits. We will move it back after
+ * encryption.
+ */
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ extra->esphoff = (unsigned char *)esph -
+ skb_transport_header(skb);
+ esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
+ extra->seqhi = esph->spi;
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ }
+
+ esph->spi = x->id.spi;
+
+ return esph;
+}
+
static void esp_output_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
@@ -128,18 +182,36 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
esp_output_done(base, err);
}
+static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+ /* Fill padding... */
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
+ do {
+ int i;
+ for (i = 0; i < plen - 2; i++)
+ tail[i] = i + 1;
+ } while (0);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = proto;
+}
+
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
- int err;
struct esp_output_extra *extra;
+ int err = -ENOMEM;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
- struct scatterlist *sg;
+ struct scatterlist *sg, *dsg;
struct sk_buff *trailer;
+ struct page *page;
void *tmp;
u8 *iv;
u8 *tail;
+ u8 *vaddr;
int blksize;
int clen;
int alen;
@@ -149,7 +221,9 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
int nfrags;
int assoclen;
int extralen;
+ int tailen;
__be64 seqno;
+ __u8 proto = *skb_mac_header(skb);
/* skb is pure payload to encrypt */
@@ -169,12 +243,7 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
plen = clen - skb->len - tfclen;
-
- err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
- if (err < 0)
- goto error;
- nfrags = err;
-
+ tailen = tfclen + plen + alen;
assoclen = sizeof(*esph);
extralen = 0;
@@ -183,35 +252,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
assoclen += sizeof(__be32);
}
- tmp = esp_alloc_tmp(aead, nfrags, extralen);
- if (!tmp) {
- err = -ENOMEM;
- goto error;
- }
-
- extra = esp_tmp_extra(tmp);
- iv = esp_tmp_iv(aead, tmp, extralen);
- req = esp_tmp_req(aead, iv);
- sg = esp_req_sg(aead, req);
-
- /* Fill padding... */
- tail = skb_tail_pointer(trailer);
- if (tfclen) {
- memset(tail, 0, tfclen);
- tail += tfclen;
- }
- do {
- int i;
- for (i = 0; i < plen - 2; i++)
- tail[i] = i + 1;
- } while (0);
- tail[plen - 2] = plen - 2;
- tail[plen - 1] = *skb_mac_header(skb);
- pskb_put(skb, trailer, clen - skb->len + alen);
-
- skb_push(skb, -skb_network_offset(skb));
- esph = ip_esp_hdr(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
+ esph = ip_esp_hdr(skb);
/* this is non-NULL only with UDP Encapsulation */
if (x->encap) {
@@ -230,7 +272,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
uh = (struct udphdr *)esph;
uh->source = sport;
uh->dest = dport;
- uh->len = htons(skb->len - skb_transport_offset(skb));
+ uh->len = htons(skb->len + tailen
+ - skb_transport_offset(skb));
uh->check = 0;
switch (encap_type) {
@@ -248,31 +291,148 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
*skb_mac_header(skb) = IPPROTO_UDP;
}
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+ if (!skb_cloned(skb)) {
+ if (tailen <= skb_availroom(skb)) {
+ nfrags = 1;
+ trailer = skb;
+ tail = skb_tail_pointer(trailer);
- aead_request_set_callback(req, 0, esp_output_done, skb);
+ goto skip_cow;
+ } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
+ && !skb_has_frag_list(skb)) {
+ int allocsize;
+ struct sock *sk = skb->sk;
+ struct page_frag *pfrag = &x->xfrag;
- /* For ESN we move the header forward by 4 bytes to
- * accomodate the high bits. We will move it back after
- * encryption.
- */
- if ((x->props.flags & XFRM_STATE_ESN)) {
- extra->esphoff = (unsigned char *)esph -
- skb_transport_header(skb);
- esph = (struct ip_esp_hdr *)((unsigned char *)esph - 4);
- extra->seqhi = esph->spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
- aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+ allocsize = ALIGN(tailen, L1_CACHE_BYTES);
+
+ spin_lock_bh(&x->lock);
+
+ if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+ spin_unlock_bh(&x->lock);
+ goto cow;
+ }
+
+ page = pfrag->page;
+ get_page(page);
+
+ vaddr = kmap_atomic(page);
+
+ tail = vaddr + pfrag->offset;
+
+ esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+ kunmap_atomic(vaddr);
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
+ tailen);
+ skb_shinfo(skb)->nr_frags = ++nfrags;
+
+ pfrag->offset = pfrag->offset + allocsize;
+ nfrags++;
+
+ skb->len += tailen;
+ skb->data_len += tailen;
+ skb->truesize += tailen;
+ if (sk)
+ atomic_add(tailen, &sk->sk_wmem_alloc);
+
+ skb_push(skb, -skb_network_offset(skb));
+
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+ esph->spi = x->id.spi;
+
+ tmp = esp_alloc_tmp(aead, nfrags + 2, extralen);
+ if (!tmp) {
+ spin_unlock_bh(&x->lock);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ extra = esp_tmp_extra(tmp);
+ iv = esp_tmp_iv(aead, tmp, extralen);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = &sg[nfrags];
+
+ esph = esp_output_set_extra(skb, esph, extra);
+
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+
+ allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
+
+ if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+ spin_unlock_bh(&x->lock);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ skb_shinfo(skb)->nr_frags = 1;
+
+ page = pfrag->page;
+ get_page(page);
+ /* replace page frags in skb with new page */
+ __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
+ pfrag->offset = pfrag->offset + allocsize;
+
+ sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
+ skb_to_sgvec(skb, dsg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+
+ spin_unlock_bh(&x->lock);
+
+ goto skip_cow2;
+ }
}
+cow:
+ err = skb_cow_data(skb, tailen, &trailer);
+ if (err < 0)
+ goto error;
+ nfrags = err;
+ tail = skb_tail_pointer(trailer);
+ esph = ip_esp_hdr(skb);
+
+skip_cow:
+ esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+ pskb_put(skb, trailer, clen - skb->len + alen);
+ skb_push(skb, -skb_network_offset(skb));
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
esph->spi = x->id.spi;
+ tmp = esp_alloc_tmp(aead, nfrags, extralen);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ extra = esp_tmp_extra(tmp);
+ iv = esp_tmp_iv(aead, tmp, extralen);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = sg;
+
+ esph = esp_output_set_extra(skb, esph, extra);
+
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
- aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
+skip_cow2:
+ if ((x->props.flags & XFRM_STATE_ESN))
+ aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+ else
+ aead_request_set_callback(req, 0, esp_output_done, skb);
+
+ aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
@@ -298,6 +458,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
esp_output_restore_header(skb);
}
+ if (sg != dsg)
+ esp_ssg_unref(x, tmp);
kfree(tmp);
error:
@@ -401,6 +563,23 @@ static void esp_input_restore_header(struct sk_buff *skb)
__skb_pull(skb, 4);
}
+static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
+
+ /* For ESN we move the header forward by 4 bytes to
+ * accomodate the high bits. We will move it back after
+ * decryption.
+ */
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ esph = (void *)skb_push(skb, 4);
+ *seqhi = esph->spi;
+ esph->spi = esph->seq_no;
+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+ }
+}
+
static void esp_input_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
@@ -437,12 +616,6 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
if (elen <= 0)
goto out;
- err = skb_cow_data(skb, 0, &trailer);
- if (err < 0)
- goto out;
-
- nfrags = err;
-
assoclen = sizeof(*esph);
seqhilen = 0;
@@ -451,6 +624,26 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
assoclen += seqhilen;
}
+ if (!skb_cloned(skb)) {
+ if (!skb_is_nonlinear(skb)) {
+ nfrags = 1;
+
+ goto skip_cow;
+ } else if (!skb_has_frag_list(skb)) {
+ nfrags = skb_shinfo(skb)->nr_frags;
+ nfrags++;
+
+ goto skip_cow;
+ }
+ }
+
+ err = skb_cow_data(skb, 0, &trailer);
+ if (err < 0)
+ goto out;
+
+ nfrags = err;
+
+skip_cow:
err = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
if (!tmp)
@@ -462,26 +655,17 @@ static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
- skb->ip_summed = CHECKSUM_NONE;
+ esp_input_set_header(skb, seqhi);
- esph = (struct ip_esp_hdr *)skb->data;
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
- aead_request_set_callback(req, 0, esp_input_done, skb);
+ skb->ip_summed = CHECKSUM_NONE;
- /* For ESN we move the header forward by 4 bytes to
- * accomodate the high bits. We will move it back after
- * decryption.
- */
- if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)skb_push(skb, 4);
- *seqhi = esph->spi;
- esph->spi = esph->seq_no;
- esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+ if ((x->props.flags & XFRM_STATE_ESN))
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
- }
-
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ else
+ aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index eae0332..7db2ad2 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -46,6 +46,7 @@
#include <net/rtnetlink.h>
#include <net/xfrm.h>
#include <net/l3mdev.h>
+#include <net/lwtunnel.h>
#include <trace/events/fib.h>
#ifndef CONFIG_IP_MULTIPLE_TABLES
@@ -677,6 +678,10 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
cfg->fc_mx_len = nla_len(attr);
break;
case RTA_MULTIPATH:
+ err = lwtunnel_valid_encap_type_attr(nla_data(attr),
+ nla_len(attr));
+ if (err < 0)
+ goto errout;
cfg->fc_mp = nla_data(attr);
cfg->fc_mp_len = nla_len(attr);
break;
@@ -691,6 +696,9 @@ static int rtm_to_fib_config(struct net *net, struct sk_buff *skb,
break;
case RTA_ENCAP_TYPE:
cfg->fc_encap_type = nla_get_u16(attr);
+ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+ if (err < 0)
+ goto errout;
break;
}
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index 05c911d..6306a67 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -471,7 +471,6 @@ static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
int remaining, struct fib_config *cfg)
{
- struct net *net = cfg->fc_nlinfo.nl_net;
int ret;
change_nexthops(fi) {
@@ -503,16 +502,14 @@ static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
nla = nla_find(attrs, attrlen, RTA_ENCAP);
if (nla) {
struct lwtunnel_state *lwtstate;
- struct net_device *dev = NULL;
struct nlattr *nla_entype;
nla_entype = nla_find(attrs, attrlen,
RTA_ENCAP_TYPE);
if (!nla_entype)
goto err_inval;
- if (cfg->fc_oif)
- dev = __dev_get_by_index(net, cfg->fc_oif);
- ret = lwtunnel_build_state(dev, nla_get_u16(
+
+ ret = lwtunnel_build_state(nla_get_u16(
nla_entype),
nla, AF_INET, cfg,
&lwtstate);
@@ -597,21 +594,18 @@ static inline void fib_add_weight(struct fib_info *fi,
#endif /* CONFIG_IP_ROUTE_MULTIPATH */
-static int fib_encap_match(struct net *net, u16 encap_type,
+static int fib_encap_match(u16 encap_type,
struct nlattr *encap,
- int oif, const struct fib_nh *nh,
+ const struct fib_nh *nh,
const struct fib_config *cfg)
{
struct lwtunnel_state *lwtstate;
- struct net_device *dev = NULL;
int ret, result = 0;
if (encap_type == LWTUNNEL_ENCAP_NONE)
return 0;
- if (oif)
- dev = __dev_get_by_index(net, oif);
- ret = lwtunnel_build_state(dev, encap_type, encap,
+ ret = lwtunnel_build_state(encap_type, encap,
AF_INET, cfg, &lwtstate);
if (!ret) {
result = lwtunnel_cmp_encap(lwtstate, nh->nh_lwtstate);
@@ -623,7 +617,6 @@ static int fib_encap_match(struct net *net, u16 encap_type,
int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
{
- struct net *net = cfg->fc_nlinfo.nl_net;
#ifdef CONFIG_IP_ROUTE_MULTIPATH
struct rtnexthop *rtnh;
int remaining;
@@ -634,9 +627,8 @@ int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
if (cfg->fc_oif || cfg->fc_gw) {
if (cfg->fc_encap) {
- if (fib_encap_match(net, cfg->fc_encap_type,
- cfg->fc_encap, cfg->fc_oif,
- fi->fib_nh, cfg))
+ if (fib_encap_match(cfg->fc_encap_type,
+ cfg->fc_encap, fi->fib_nh, cfg))
return 1;
}
if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
@@ -1093,13 +1085,10 @@ struct fib_info *fib_create_info(struct fib_config *cfg)
if (cfg->fc_encap) {
struct lwtunnel_state *lwtstate;
- struct net_device *dev = NULL;
if (cfg->fc_encap_type == LWTUNNEL_ENCAP_NONE)
goto err_inval;
- if (cfg->fc_oif)
- dev = __dev_get_by_index(net, cfg->fc_oif);
- err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ err = lwtunnel_build_state(cfg->fc_encap_type,
cfg->fc_encap, AF_INET, cfg,
&lwtstate);
if (err)
@@ -1279,8 +1268,9 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
goto nla_put_failure;
#endif
- if (fi->fib_nh->nh_lwtstate)
- lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate);
+ if (fi->fib_nh->nh_lwtstate &&
+ lwtunnel_fill_encap(skb, fi->fib_nh->nh_lwtstate) < 0)
+ goto nla_put_failure;
}
#ifdef CONFIG_IP_ROUTE_MULTIPATH
if (fi->fib_nhs > 1) {
@@ -1316,8 +1306,10 @@ int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
goto nla_put_failure;
#endif
- if (nh->nh_lwtstate)
- lwtunnel_fill_encap(skb, nh->nh_lwtstate);
+ if (nh->nh_lwtstate &&
+ lwtunnel_fill_encap(skb, nh->nh_lwtstate) < 0)
+ goto nla_put_failure;
+
/* length of rtnetlink header + attributes */
rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
} endfor_nexthops(fi);
@@ -1618,8 +1610,13 @@ void fib_select_multipath(struct fib_result *res, int hash)
void fib_select_path(struct net *net, struct fib_result *res,
struct flowi4 *fl4, int mp_hash)
{
+ bool oif_check;
+
+ oif_check = (fl4->flowi4_oif == 0 ||
+ fl4->flowi4_flags & FLOWI_FLAG_SKIP_NH_OIF);
+
#ifdef CONFIG_IP_ROUTE_MULTIPATH
- if (res->fi->fib_nhs > 1 && fl4->flowi4_oif == 0) {
+ if (res->fi->fib_nhs > 1 && oif_check) {
if (mp_hash < 0)
mp_hash = get_hash_from_flowi4(fl4) >> 1;
@@ -1629,7 +1626,7 @@ void fib_select_path(struct net *net, struct fib_result *res,
#endif
if (!res->prefixlen &&
res->table->tb_num_default > 1 &&
- res->type == RTN_UNICAST && !fl4->flowi4_oif)
+ res->type == RTN_UNICAST && oif_check)
fib_select_default(fl4, res);
if (!fl4->saddr)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 19ea045..b4d5980 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -31,6 +31,86 @@ const char inet_csk_timer_bug_msg[] = "inet_csk BUG: unknown timer value\n";
EXPORT_SYMBOL(inet_csk_timer_bug_msg);
#endif
+#if IS_ENABLED(CONFIG_IPV6)
+/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
+ * only, and any IPv4 addresses if not IPv6 only
+ * match_wildcard == false: addresses must be exactly the same, i.e.
+ * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
+ * and 0.0.0.0 equals to 0.0.0.0 only
+ */
+static int ipv6_rcv_saddr_equal(const struct in6_addr *sk1_rcv_saddr6,
+ const struct in6_addr *sk2_rcv_saddr6,
+ __be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
+ bool sk1_ipv6only, bool sk2_ipv6only,
+ bool match_wildcard)
+{
+ int addr_type = ipv6_addr_type(sk1_rcv_saddr6);
+ int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
+
+ /* if both are mapped, treat as IPv4 */
+ if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
+ if (!sk2_ipv6only) {
+ if (sk1_rcv_saddr == sk2_rcv_saddr)
+ return 1;
+ if (!sk1_rcv_saddr || !sk2_rcv_saddr)
+ return match_wildcard;
+ }
+ return 0;
+ }
+
+ if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
+ return 1;
+
+ if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
+ !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
+ return 1;
+
+ if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
+ !(sk1_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+ return 1;
+
+ if (sk2_rcv_saddr6 &&
+ ipv6_addr_equal(sk1_rcv_saddr6, sk2_rcv_saddr6))
+ return 1;
+
+ return 0;
+}
+#endif
+
+/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
+ * match_wildcard == false: addresses must be exactly the same, i.e.
+ * 0.0.0.0 only equals to 0.0.0.0
+ */
+static int ipv4_rcv_saddr_equal(__be32 sk1_rcv_saddr, __be32 sk2_rcv_saddr,
+ bool sk2_ipv6only, bool match_wildcard)
+{
+ if (!sk2_ipv6only) {
+ if (sk1_rcv_saddr == sk2_rcv_saddr)
+ return 1;
+ if (!sk1_rcv_saddr || !sk2_rcv_saddr)
+ return match_wildcard;
+ }
+ return 0;
+}
+
+int inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
+ bool match_wildcard)
+{
+#if IS_ENABLED(CONFIG_IPV6)
+ if (sk->sk_family == AF_INET6)
+ return ipv6_rcv_saddr_equal(&sk->sk_v6_rcv_saddr,
+ inet6_rcv_saddr(sk2),
+ sk->sk_rcv_saddr,
+ sk2->sk_rcv_saddr,
+ ipv6_only_sock(sk),
+ ipv6_only_sock(sk2),
+ match_wildcard);
+#endif
+ return ipv4_rcv_saddr_equal(sk->sk_rcv_saddr, sk2->sk_rcv_saddr,
+ ipv6_only_sock(sk2), match_wildcard);
+}
+EXPORT_SYMBOL(inet_rcv_saddr_equal);
+
void inet_get_local_port_range(struct net *net, int *low, int *high)
{
unsigned int seq;
@@ -44,9 +124,9 @@ void inet_get_local_port_range(struct net *net, int *low, int *high)
}
EXPORT_SYMBOL(inet_get_local_port_range);
-int inet_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax,
- bool reuseport_ok)
+static int inet_csk_bind_conflict(const struct sock *sk,
+ const struct inet_bind_bucket *tb,
+ bool relax, bool reuseport_ok)
{
struct sock *sk2;
bool reuse = sk->sk_reuse;
@@ -62,7 +142,6 @@ int inet_csk_bind_conflict(const struct sock *sk,
sk_for_each_bound(sk2, &tb->owners) {
if (sk != sk2 &&
- !inet_v6_ipv6only(sk2) &&
(!sk->sk_bound_dev_if ||
!sk2->sk_bound_dev_if ||
sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
@@ -72,54 +151,34 @@ int inet_csk_bind_conflict(const struct sock *sk,
rcu_access_pointer(sk->sk_reuseport_cb) ||
(sk2->sk_state != TCP_TIME_WAIT &&
!uid_eq(uid, sock_i_uid(sk2))))) {
-
- if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
- sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
+ if (inet_rcv_saddr_equal(sk, sk2, true))
break;
}
if (!relax && reuse && sk2->sk_reuse &&
sk2->sk_state != TCP_LISTEN) {
-
- if (!sk2->sk_rcv_saddr || !sk->sk_rcv_saddr ||
- sk2->sk_rcv_saddr == sk->sk_rcv_saddr)
+ if (inet_rcv_saddr_equal(sk, sk2, true))
break;
}
}
}
return sk2 != NULL;
}
-EXPORT_SYMBOL_GPL(inet_csk_bind_conflict);
-/* Obtain a reference to a local port for the given sock,
- * if snum is zero it means select any available local port.
- * We try to allocate an odd port (and leave even ports for connect())
+/*
+ * Find an open port number for the socket. Returns with the
+ * inet_bind_hashbucket lock held.
*/
-int inet_csk_get_port(struct sock *sk, unsigned short snum)
+static struct inet_bind_hashbucket *
+inet_csk_find_open_port(struct sock *sk, struct inet_bind_bucket **tb_ret, int *port_ret)
{
- bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
- int ret = 1, attempts = 5, port = snum;
- int smallest_size = -1, smallest_port;
+ int port = 0;
struct inet_bind_hashbucket *head;
struct net *net = sock_net(sk);
int i, low, high, attempt_half;
struct inet_bind_bucket *tb;
- kuid_t uid = sock_i_uid(sk);
u32 remaining, offset;
- bool reuseport_ok = !!snum;
- if (port) {
-have_port:
- head = &hinfo->bhash[inet_bhashfn(net, port,
- hinfo->bhash_size)];
- spin_lock_bh(&head->lock);
- inet_bind_bucket_for_each(tb, &head->chain)
- if (net_eq(ib_net(tb), net) && tb->port == port)
- goto tb_found;
-
- goto tb_not_found;
- }
-again:
attempt_half = (sk->sk_reuse == SK_CAN_REUSE) ? 1 : 0;
other_half_scan:
inet_get_local_port_range(net, &low, &high);
@@ -143,8 +202,6 @@ other_half_scan:
* We do the opposite to not pollute connect() users.
*/
offset |= 1U;
- smallest_size = -1;
- smallest_port = low; /* avoid compiler warning */
other_parity_scan:
port = low + offset;
@@ -158,30 +215,17 @@ other_parity_scan:
spin_lock_bh(&head->lock);
inet_bind_bucket_for_each(tb, &head->chain)
if (net_eq(ib_net(tb), net) && tb->port == port) {
- if (((tb->fastreuse > 0 && reuse) ||
- (tb->fastreuseport > 0 &&
- sk->sk_reuseport &&
- !rcu_access_pointer(sk->sk_reuseport_cb) &&
- uid_eq(tb->fastuid, uid))) &&
- (tb->num_owners < smallest_size || smallest_size == -1)) {
- smallest_size = tb->num_owners;
- smallest_port = port;
- }
- if (!inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, false,
- reuseport_ok))
- goto tb_found;
+ if (!inet_csk_bind_conflict(sk, tb, false, false))
+ goto success;
goto next_port;
}
- goto tb_not_found;
+ tb = NULL;
+ goto success;
next_port:
spin_unlock_bh(&head->lock);
cond_resched();
}
- if (smallest_size != -1) {
- port = smallest_port;
- goto have_port;
- }
offset--;
if (!(offset & 1))
goto other_parity_scan;
@@ -191,8 +235,74 @@ next_port:
attempt_half = 2;
goto other_half_scan;
}
- return ret;
+ return NULL;
+success:
+ *port_ret = port;
+ *tb_ret = tb;
+ return head;
+}
+static inline int sk_reuseport_match(struct inet_bind_bucket *tb,
+ struct sock *sk)
+{
+ kuid_t uid = sock_i_uid(sk);
+
+ if (tb->fastreuseport <= 0)
+ return 0;
+ if (!sk->sk_reuseport)
+ return 0;
+ if (rcu_access_pointer(sk->sk_reuseport_cb))
+ return 0;
+ if (!uid_eq(tb->fastuid, uid))
+ return 0;
+ /* We only need to check the rcv_saddr if this tb was once marked
+ * without fastreuseport and then was reset, as we can only know that
+ * the fast_*rcv_saddr doesn't have any conflicts with the socks on the
+ * owners list.
+ */
+ if (tb->fastreuseport == FASTREUSEPORT_ANY)
+ return 1;
+#if IS_ENABLED(CONFIG_IPV6)
+ if (tb->fast_sk_family == AF_INET6)
+ return ipv6_rcv_saddr_equal(&tb->fast_v6_rcv_saddr,
+ &sk->sk_v6_rcv_saddr,
+ tb->fast_rcv_saddr,
+ sk->sk_rcv_saddr,
+ tb->fast_ipv6_only,
+ ipv6_only_sock(sk), true);
+#endif
+ return ipv4_rcv_saddr_equal(tb->fast_rcv_saddr, sk->sk_rcv_saddr,
+ ipv6_only_sock(sk), true);
+}
+
+/* Obtain a reference to a local port for the given sock,
+ * if snum is zero it means select any available local port.
+ * We try to allocate an odd port (and leave even ports for connect())
+ */
+int inet_csk_get_port(struct sock *sk, unsigned short snum)
+{
+ bool reuse = sk->sk_reuse && sk->sk_state != TCP_LISTEN;
+ struct inet_hashinfo *hinfo = sk->sk_prot->h.hashinfo;
+ int ret = 1, port = snum;
+ struct inet_bind_hashbucket *head;
+ struct net *net = sock_net(sk);
+ struct inet_bind_bucket *tb = NULL;
+ kuid_t uid = sock_i_uid(sk);
+
+ if (!port) {
+ head = inet_csk_find_open_port(sk, &tb, &port);
+ if (!head)
+ return ret;
+ if (!tb)
+ goto tb_not_found;
+ goto success;
+ }
+ head = &hinfo->bhash[inet_bhashfn(net, port,
+ hinfo->bhash_size)];
+ spin_lock_bh(&head->lock);
+ inet_bind_bucket_for_each(tb, &head->chain)
+ if (net_eq(ib_net(tb), net) && tb->port == port)
+ goto tb_found;
tb_not_found:
tb = inet_bind_bucket_create(hinfo->bind_bucket_cachep,
net, head, port);
@@ -203,39 +313,54 @@ tb_found:
if (sk->sk_reuse == SK_FORCE_REUSE)
goto success;
- if (((tb->fastreuse > 0 && reuse) ||
- (tb->fastreuseport > 0 &&
- !rcu_access_pointer(sk->sk_reuseport_cb) &&
- sk->sk_reuseport && uid_eq(tb->fastuid, uid))) &&
- smallest_size == -1)
+ if ((tb->fastreuse > 0 && reuse) ||
+ sk_reuseport_match(tb, sk))
goto success;
- if (inet_csk(sk)->icsk_af_ops->bind_conflict(sk, tb, true,
- reuseport_ok)) {
- if ((reuse ||
- (tb->fastreuseport > 0 &&
- sk->sk_reuseport &&
- !rcu_access_pointer(sk->sk_reuseport_cb) &&
- uid_eq(tb->fastuid, uid))) &&
- !snum && smallest_size != -1 && --attempts >= 0) {
- spin_unlock_bh(&head->lock);
- goto again;
- }
+ if (inet_csk_bind_conflict(sk, tb, true, true))
goto fail_unlock;
+ }
+success:
+ if (!hlist_empty(&tb->owners)) {
+ tb->fastreuse = reuse;
+ if (sk->sk_reuseport) {
+ tb->fastreuseport = FASTREUSEPORT_ANY;
+ tb->fastuid = uid;
+ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+ tb->fast_ipv6_only = ipv6_only_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+ } else {
+ tb->fastreuseport = 0;
}
+ } else {
if (!reuse)
tb->fastreuse = 0;
- if (!sk->sk_reuseport || !uid_eq(tb->fastuid, uid))
- tb->fastreuseport = 0;
- } else {
- tb->fastreuse = reuse;
if (sk->sk_reuseport) {
- tb->fastreuseport = 1;
- tb->fastuid = uid;
+ /* We didn't match or we don't have fastreuseport set on
+ * the tb, but we have sk_reuseport set on this socket
+ * and we know that there are no bind conflicts with
+ * this socket in this tb, so reset our tb's reuseport
+ * settings so that any subsequent sockets that match
+ * our current socket will be put on the fast path.
+ *
+ * If we reset we need to set FASTREUSEPORT_STRICT so we
+ * do extra checking for all subsequent sk_reuseport
+ * socks.
+ */
+ if (!sk_reuseport_match(tb, sk)) {
+ tb->fastreuseport = FASTREUSEPORT_STRICT;
+ tb->fastuid = uid;
+ tb->fast_rcv_saddr = sk->sk_rcv_saddr;
+ tb->fast_ipv6_only = ipv6_only_sock(sk);
+#if IS_ENABLED(CONFIG_IPV6)
+ tb->fast_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
+#endif
+ }
} else {
tb->fastreuseport = 0;
}
}
-success:
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, port);
WARN_ON(inet_csk(sk)->icsk_bind_hash != tb);
@@ -711,9 +836,8 @@ void inet_csk_destroy_sock(struct sock *sk)
sk_refcnt_debug_release(sk);
- local_bh_disable();
percpu_counter_dec(sk->sk_prot->orphan_count);
- local_bh_enable();
+
sock_put(sk);
}
EXPORT_SYMBOL(inet_csk_destroy_sock);
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 4dea33e..3828b3a 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -215,7 +215,7 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
}
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
r->idiag_timer = 1;
r->idiag_retrans = icsk->icsk_retransmits;
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index ca97835..8bea742 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -73,7 +73,6 @@ struct inet_bind_bucket *inet_bind_bucket_create(struct kmem_cache *cachep,
tb->port = snum;
tb->fastreuse = 0;
tb->fastreuseport = 0;
- tb->num_owners = 0;
INIT_HLIST_HEAD(&tb->owners);
hlist_add_head(&tb->node, &head->chain);
}
@@ -96,7 +95,6 @@ void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
{
inet_sk(sk)->inet_num = snum;
sk_add_bind_node(sk, &tb->owners);
- tb->num_owners++;
inet_csk(sk)->icsk_bind_hash = tb;
}
@@ -114,7 +112,6 @@ static void __inet_put_port(struct sock *sk)
spin_lock(&head->lock);
tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk);
- tb->num_owners--;
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->inet_num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
@@ -435,10 +432,7 @@ bool inet_ehash_nolisten(struct sock *sk, struct sock *osk)
EXPORT_SYMBOL_GPL(inet_ehash_nolisten);
static int inet_reuseport_add_sock(struct sock *sk,
- struct inet_listen_hashbucket *ilb,
- int (*saddr_same)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard))
+ struct inet_listen_hashbucket *ilb)
{
struct inet_bind_bucket *tb = inet_csk(sk)->icsk_bind_hash;
struct sock *sk2;
@@ -451,7 +445,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
sk2->sk_bound_dev_if == sk->sk_bound_dev_if &&
inet_csk(sk2)->icsk_bind_hash == tb &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
- saddr_same(sk, sk2, false))
+ inet_rcv_saddr_equal(sk, sk2, false))
return reuseport_add_sock(sk, sk2);
}
@@ -461,10 +455,7 @@ static int inet_reuseport_add_sock(struct sock *sk,
return 0;
}
-int __inet_hash(struct sock *sk, struct sock *osk,
- int (*saddr_same)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard))
+int __inet_hash(struct sock *sk, struct sock *osk)
{
struct inet_hashinfo *hashinfo = sk->sk_prot->h.hashinfo;
struct inet_listen_hashbucket *ilb;
@@ -479,7 +470,7 @@ int __inet_hash(struct sock *sk, struct sock *osk,
spin_lock(&ilb->lock);
if (sk->sk_reuseport) {
- err = inet_reuseport_add_sock(sk, ilb, saddr_same);
+ err = inet_reuseport_add_sock(sk, ilb);
if (err)
goto unlock;
}
@@ -503,7 +494,7 @@ int inet_hash(struct sock *sk)
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- err = __inet_hash(sk, NULL, ipv4_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL);
local_bh_enable();
}
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index fac275c4..b67719f 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1629,6 +1629,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
sk->sk_protocol = ip_hdr(skb)->protocol;
sk->sk_bound_dev_if = arg->bound_dev_if;
sk->sk_sndbuf = sysctl_wmem_default;
+ sk->sk_mark = fl4.flowi4_mark;
err = ip_append_data(sk, &fl4, ip_reply_glue_bits, arg->iov->iov_base,
len, 0, &ipc, &rt, MSG_DONTWAIT);
if (unlikely(err)) {
diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c
index 5476110..a31f47c 100644
--- a/net/ipv4/ip_tunnel_core.c
+++ b/net/ipv4/ip_tunnel_core.c
@@ -226,7 +226,7 @@ static const struct nla_policy ip_tun_policy[LWTUNNEL_IP_MAX + 1] = {
[LWTUNNEL_IP_FLAGS] = { .type = NLA_U16 },
};
-static int ip_tun_build_state(struct net_device *dev, struct nlattr *attr,
+static int ip_tun_build_state(struct nlattr *attr,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
@@ -311,6 +311,7 @@ static const struct lwtunnel_encap_ops ip_tun_lwt_ops = {
.fill_encap = ip_tun_fill_encap_info,
.get_encap_size = ip_tun_encap_nlsize,
.cmp_encap = ip_tun_cmp_encap,
+ .owner = THIS_MODULE,
};
static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
@@ -322,7 +323,7 @@ static const struct nla_policy ip6_tun_policy[LWTUNNEL_IP6_MAX + 1] = {
[LWTUNNEL_IP6_FLAGS] = { .type = NLA_U16 },
};
-static int ip6_tun_build_state(struct net_device *dev, struct nlattr *attr,
+static int ip6_tun_build_state(struct nlattr *attr,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
@@ -401,6 +402,7 @@ static const struct lwtunnel_encap_ops ip6_tun_lwt_ops = {
.fill_encap = ip6_tun_fill_encap_info,
.get_encap_size = ip6_tun_encap_nlsize,
.cmp_encap = ip_tun_cmp_encap,
+ .owner = THIS_MODULE,
};
void __init ip_tunnel_core_init(void)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 824c4fd..beacd02 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -299,10 +299,29 @@ static void __net_exit ipmr_rules_exit(struct net *net)
}
#endif
+static inline int ipmr_hash_cmp(struct rhashtable_compare_arg *arg,
+ const void *ptr)
+{
+ const struct mfc_cache_cmp_arg *cmparg = arg->key;
+ struct mfc_cache *c = (struct mfc_cache *)ptr;
+
+ return cmparg->mfc_mcastgrp != c->mfc_mcastgrp ||
+ cmparg->mfc_origin != c->mfc_origin;
+}
+
+static const struct rhashtable_params ipmr_rht_params = {
+ .head_offset = offsetof(struct mfc_cache, mnode),
+ .key_offset = offsetof(struct mfc_cache, cmparg),
+ .key_len = sizeof(struct mfc_cache_cmp_arg),
+ .nelem_hint = 3,
+ .locks_mul = 1,
+ .obj_cmpfn = ipmr_hash_cmp,
+ .automatic_shrinking = true,
+};
+
static struct mr_table *ipmr_new_table(struct net *net, u32 id)
{
struct mr_table *mrt;
- unsigned int i;
/* "pimreg%u" should not exceed 16 bytes (IFNAMSIZ) */
if (id != RT_TABLE_DEFAULT && id >= 1000000000)
@@ -318,10 +337,8 @@ static struct mr_table *ipmr_new_table(struct net *net, u32 id)
write_pnet(&mrt->net, net);
mrt->id = id;
- /* Forwarding cache */
- for (i = 0; i < MFC_LINES; i++)
- INIT_LIST_HEAD(&mrt->mfc_cache_array[i]);
-
+ rhltable_init(&mrt->mfc_hash, &ipmr_rht_params);
+ INIT_LIST_HEAD(&mrt->mfc_cache_list);
INIT_LIST_HEAD(&mrt->mfc_unres_queue);
setup_timer(&mrt->ipmr_expire_timer, ipmr_expire_process,
@@ -338,6 +355,7 @@ static void ipmr_free_table(struct mr_table *mrt)
{
del_timer_sync(&mrt->ipmr_expire_timer);
mroute_clean_tables(mrt, true);
+ rhltable_destroy(&mrt->mfc_hash);
kfree(mrt);
}
@@ -839,13 +857,17 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
__be32 origin,
__be32 mcastgrp)
{
- int line = MFC_HASH(mcastgrp, origin);
+ struct mfc_cache_cmp_arg arg = {
+ .mfc_mcastgrp = mcastgrp,
+ .mfc_origin = origin
+ };
+ struct rhlist_head *tmp, *list;
struct mfc_cache *c;
- list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list) {
- if (c->mfc_origin == origin && c->mfc_mcastgrp == mcastgrp)
- return c;
- }
+ list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+ rhl_for_each_entry_rcu(c, tmp, list, mnode)
+ return c;
+
return NULL;
}
@@ -853,13 +875,16 @@ static struct mfc_cache *ipmr_cache_find(struct mr_table *mrt,
static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
int vifi)
{
- int line = MFC_HASH(htonl(INADDR_ANY), htonl(INADDR_ANY));
+ struct mfc_cache_cmp_arg arg = {
+ .mfc_mcastgrp = htonl(INADDR_ANY),
+ .mfc_origin = htonl(INADDR_ANY)
+ };
+ struct rhlist_head *tmp, *list;
struct mfc_cache *c;
- list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
- if (c->mfc_origin == htonl(INADDR_ANY) &&
- c->mfc_mcastgrp == htonl(INADDR_ANY) &&
- c->mfc_un.res.ttls[vifi] < 255)
+ list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+ rhl_for_each_entry_rcu(c, tmp, list, mnode)
+ if (c->mfc_un.res.ttls[vifi] < 255)
return c;
return NULL;
@@ -869,29 +894,51 @@ static struct mfc_cache *ipmr_cache_find_any_parent(struct mr_table *mrt,
static struct mfc_cache *ipmr_cache_find_any(struct mr_table *mrt,
__be32 mcastgrp, int vifi)
{
- int line = MFC_HASH(mcastgrp, htonl(INADDR_ANY));
+ struct mfc_cache_cmp_arg arg = {
+ .mfc_mcastgrp = mcastgrp,
+ .mfc_origin = htonl(INADDR_ANY)
+ };
+ struct rhlist_head *tmp, *list;
struct mfc_cache *c, *proxy;
if (mcastgrp == htonl(INADDR_ANY))
goto skip;
- list_for_each_entry_rcu(c, &mrt->mfc_cache_array[line], list)
- if (c->mfc_origin == htonl(INADDR_ANY) &&
- c->mfc_mcastgrp == mcastgrp) {
- if (c->mfc_un.res.ttls[vifi] < 255)
- return c;
-
- /* It's ok if the vifi is part of the static tree */
- proxy = ipmr_cache_find_any_parent(mrt,
- c->mfc_parent);
- if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
- return c;
- }
+ list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+ rhl_for_each_entry_rcu(c, tmp, list, mnode) {
+ if (c->mfc_un.res.ttls[vifi] < 255)
+ return c;
+
+ /* It's ok if the vifi is part of the static tree */
+ proxy = ipmr_cache_find_any_parent(mrt, c->mfc_parent);
+ if (proxy && proxy->mfc_un.res.ttls[vifi] < 255)
+ return c;
+ }
skip:
return ipmr_cache_find_any_parent(mrt, vifi);
}
+/* Look for a (S,G,iif) entry if parent != -1 */
+static struct mfc_cache *ipmr_cache_find_parent(struct mr_table *mrt,
+ __be32 origin, __be32 mcastgrp,
+ int parent)
+{
+ struct mfc_cache_cmp_arg arg = {
+ .mfc_mcastgrp = mcastgrp,
+ .mfc_origin = origin,
+ };
+ struct rhlist_head *tmp, *list;
+ struct mfc_cache *c;
+
+ list = rhltable_lookup(&mrt->mfc_hash, &arg, ipmr_rht_params);
+ rhl_for_each_entry_rcu(c, tmp, list, mnode)
+ if (parent == -1 || parent == c->mfc_parent)
+ return c;
+
+ return NULL;
+}
+
/* Allocate a multicast cache entry */
static struct mfc_cache *ipmr_cache_alloc(void)
{
@@ -1028,10 +1075,10 @@ static int ipmr_cache_report(struct mr_table *mrt,
static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
struct sk_buff *skb)
{
+ const struct iphdr *iph = ip_hdr(skb);
+ struct mfc_cache *c;
bool found = false;
int err;
- struct mfc_cache *c;
- const struct iphdr *iph = ip_hdr(skb);
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(c, &mrt->mfc_unres_queue, list) {
@@ -1095,46 +1142,39 @@ static int ipmr_cache_unresolved(struct mr_table *mrt, vifi_t vifi,
static int ipmr_mfc_delete(struct mr_table *mrt, struct mfcctl *mfc, int parent)
{
- int line;
- struct mfc_cache *c, *next;
+ struct mfc_cache *c;
- line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
+ /* The entries are added/deleted only under RTNL */
+ rcu_read_lock();
+ c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
+ mfc->mfcc_mcastgrp.s_addr, parent);
+ rcu_read_unlock();
+ if (!c)
+ return -ENOENT;
+ rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
+ list_del_rcu(&c->list);
+ mroute_netlink_event(mrt, c, RTM_DELROUTE);
+ ipmr_cache_free(c);
- list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[line], list) {
- if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
- c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
- (parent == -1 || parent == c->mfc_parent)) {
- list_del_rcu(&c->list);
- mroute_netlink_event(mrt, c, RTM_DELROUTE);
- ipmr_cache_free(c);
- return 0;
- }
- }
- return -ENOENT;
+ return 0;
}
static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
struct mfcctl *mfc, int mrtsock, int parent)
{
- bool found = false;
- int line;
struct mfc_cache *uc, *c;
+ bool found;
+ int ret;
if (mfc->mfcc_parent >= MAXVIFS)
return -ENFILE;
- line = MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
-
- list_for_each_entry(c, &mrt->mfc_cache_array[line], list) {
- if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
- c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr &&
- (parent == -1 || parent == c->mfc_parent)) {
- found = true;
- break;
- }
- }
-
- if (found) {
+ /* The entries are added/deleted only under RTNL */
+ rcu_read_lock();
+ c = ipmr_cache_find_parent(mrt, mfc->mfcc_origin.s_addr,
+ mfc->mfcc_mcastgrp.s_addr, parent);
+ rcu_read_unlock();
+ if (c) {
write_lock_bh(&mrt_lock);
c->mfc_parent = mfc->mfcc_parent;
ipmr_update_thresholds(mrt, c, mfc->mfcc_ttls);
@@ -1160,8 +1200,14 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
if (!mrtsock)
c->mfc_flags |= MFC_STATIC;
- list_add_rcu(&c->list, &mrt->mfc_cache_array[line]);
-
+ ret = rhltable_insert_key(&mrt->mfc_hash, &c->cmparg, &c->mnode,
+ ipmr_rht_params);
+ if (ret) {
+ pr_err("ipmr: rhtable insert error %d\n", ret);
+ ipmr_cache_free(c);
+ return ret;
+ }
+ list_add_tail_rcu(&c->list, &mrt->mfc_cache_list);
/* Check to see if we resolved a queued list. If so we
* need to send on the frames and tidy up.
*/
@@ -1191,9 +1237,9 @@ static int ipmr_mfc_add(struct net *net, struct mr_table *mrt,
/* Close the multicast socket, and clear the vif tables etc */
static void mroute_clean_tables(struct mr_table *mrt, bool all)
{
- int i;
+ struct mfc_cache *c, *tmp;
LIST_HEAD(list);
- struct mfc_cache *c, *next;
+ int i;
/* Shut down all active vif entries */
for (i = 0; i < mrt->maxvif; i++) {
@@ -1204,19 +1250,18 @@ static void mroute_clean_tables(struct mr_table *mrt, bool all)
unregister_netdevice_many(&list);
/* Wipe the cache */
- for (i = 0; i < MFC_LINES; i++) {
- list_for_each_entry_safe(c, next, &mrt->mfc_cache_array[i], list) {
- if (!all && (c->mfc_flags & MFC_STATIC))
- continue;
- list_del_rcu(&c->list);
- mroute_netlink_event(mrt, c, RTM_DELROUTE);
- ipmr_cache_free(c);
- }
+ list_for_each_entry_safe(c, tmp, &mrt->mfc_cache_list, list) {
+ if (!all && (c->mfc_flags & MFC_STATIC))
+ continue;
+ rhltable_remove(&mrt->mfc_hash, &c->mnode, ipmr_rht_params);
+ list_del_rcu(&c->list);
+ mroute_netlink_event(mrt, c, RTM_DELROUTE);
+ ipmr_cache_free(c);
}
if (atomic_read(&mrt->cache_resolve_queue_len) != 0) {
spin_lock_bh(&mfc_unres_lock);
- list_for_each_entry_safe(c, next, &mrt->mfc_unres_queue, list) {
+ list_for_each_entry_safe(c, tmp, &mrt->mfc_unres_queue, list) {
list_del(&c->list);
mroute_netlink_event(mrt, c, RTM_DELROUTE);
ipmr_destroy_unres(mrt, c);
@@ -1791,9 +1836,9 @@ static void ip_mr_forward(struct net *net, struct mr_table *mrt,
struct sk_buff *skb, struct mfc_cache *cache,
int local)
{
+ int true_vifi = ipmr_find_vif(mrt, skb->dev);
int psend = -1;
int vif, ct;
- int true_vifi = ipmr_find_vif(mrt, skb->dev);
vif = cache->mfc_parent;
cache->mfc_un.res.pkt++;
@@ -2293,34 +2338,30 @@ static int ipmr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb)
struct mr_table *mrt;
struct mfc_cache *mfc;
unsigned int t = 0, s_t;
- unsigned int h = 0, s_h;
unsigned int e = 0, s_e;
s_t = cb->args[0];
- s_h = cb->args[1];
- s_e = cb->args[2];
+ s_e = cb->args[1];
rcu_read_lock();
ipmr_for_each_table(mrt, net) {
if (t < s_t)
goto next_table;
- if (t > s_t)
- s_h = 0;
- for (h = s_h; h < MFC_LINES; h++) {
- list_for_each_entry_rcu(mfc, &mrt->mfc_cache_array[h], list) {
- if (e < s_e)
- goto next_entry;
- if (ipmr_fill_mroute(mrt, skb,
- NETLINK_CB(cb->skb).portid,
- cb->nlh->nlmsg_seq,
- mfc, RTM_NEWROUTE,
- NLM_F_MULTI) < 0)
- goto done;
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list) {
+ if (e < s_e)
+ goto next_entry;
+ if (ipmr_fill_mroute(mrt, skb,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq,
+ mfc, RTM_NEWROUTE,
+ NLM_F_MULTI) < 0)
+ goto done;
next_entry:
- e++;
- }
- e = s_e = 0;
+ e++;
}
+ e = 0;
+ s_e = 0;
+
spin_lock_bh(&mfc_unres_lock);
list_for_each_entry(mfc, &mrt->mfc_unres_queue, list) {
if (e < s_e)
@@ -2337,16 +2378,15 @@ next_entry2:
e++;
}
spin_unlock_bh(&mfc_unres_lock);
- e = s_e = 0;
- s_h = 0;
+ e = 0;
+ s_e = 0;
next_table:
t++;
}
done:
rcu_read_unlock();
- cb->args[2] = e;
- cb->args[1] = h;
+ cb->args[1] = e;
cb->args[0] = t;
return skb->len;
@@ -2590,10 +2630,8 @@ struct ipmr_mfc_iter {
struct seq_net_private p;
struct mr_table *mrt;
struct list_head *cache;
- int ct;
};
-
static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
struct ipmr_mfc_iter *it, loff_t pos)
{
@@ -2601,12 +2639,10 @@ static struct mfc_cache *ipmr_mfc_seq_idx(struct net *net,
struct mfc_cache *mfc;
rcu_read_lock();
- for (it->ct = 0; it->ct < MFC_LINES; it->ct++) {
- it->cache = &mrt->mfc_cache_array[it->ct];
- list_for_each_entry_rcu(mfc, it->cache, list)
- if (pos-- == 0)
- return mfc;
- }
+ it->cache = &mrt->mfc_cache_list;
+ list_for_each_entry_rcu(mfc, &mrt->mfc_cache_list, list)
+ if (pos-- == 0)
+ return mfc;
rcu_read_unlock();
spin_lock_bh(&mfc_unres_lock);
@@ -2633,17 +2669,16 @@ static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
it->mrt = mrt;
it->cache = NULL;
- it->ct = 0;
return *pos ? ipmr_mfc_seq_idx(net, seq->private, *pos - 1)
: SEQ_START_TOKEN;
}
static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- struct mfc_cache *mfc = v;
struct ipmr_mfc_iter *it = seq->private;
struct net *net = seq_file_net(seq);
struct mr_table *mrt = it->mrt;
+ struct mfc_cache *mfc = v;
++*pos;
@@ -2656,19 +2691,9 @@ static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
if (it->cache == &mrt->mfc_unres_queue)
goto end_of_list;
- BUG_ON(it->cache != &mrt->mfc_cache_array[it->ct]);
-
- while (++it->ct < MFC_LINES) {
- it->cache = &mrt->mfc_cache_array[it->ct];
- if (list_empty(it->cache))
- continue;
- return list_first_entry(it->cache, struct mfc_cache, list);
- }
-
/* exhausted cache_array, show unresolved */
rcu_read_unlock();
it->cache = &mrt->mfc_unres_queue;
- it->ct = 0;
spin_lock_bh(&mfc_unres_lock);
if (!list_empty(it->cache))
@@ -2688,7 +2713,7 @@ static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
if (it->cache == &mrt->mfc_unres_queue)
spin_unlock_bh(&mfc_unres_lock);
- else if (it->cache == &mrt->mfc_cache_array[it->ct])
+ else if (it->cache == &mrt->mfc_cache_list)
rcu_read_unlock();
}
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index a6b8c1a..0a783cd 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -144,7 +144,12 @@ clusterip_config_find_get(struct net *net, __be32 clusterip, int entry)
rcu_read_lock_bh();
c = __clusterip_config_find(net, clusterip);
if (c) {
- if (!c->pde || unlikely(!atomic_inc_not_zero(&c->refcount)))
+#ifdef CONFIG_PROC_FS
+ if (!c->pde)
+ c = NULL;
+ else
+#endif
+ if (unlikely(!atomic_inc_not_zero(&c->refcount)))
c = NULL;
else if (entry)
atomic_inc(&c->entries);
diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c
index f273098..37fb955 100644
--- a/net/ipv4/netfilter/ipt_rpfilter.c
+++ b/net/ipv4/netfilter/ipt_rpfilter.c
@@ -63,10 +63,10 @@ static bool rpfilter_lookup_reverse(struct net *net, struct flowi4 *fl4,
return dev_match || flags & XT_RPFILTER_LOOSE;
}
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
{
- const struct rtable *rt = skb_rtable(skb);
- return rt && (rt->rt_flags & RTCF_LOCAL);
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -79,7 +79,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
info = par->matchinfo;
invert = info->flags & XT_RPFILTER_INVERT;
- if (rpfilter_is_local(skb))
+ if (rpfilter_is_loopback(skb, xt_in(par)))
return true ^ invert;
iph = ip_hdr(skb);
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index fd82202..146d861 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -126,6 +126,8 @@ void nf_send_reset(struct net *net, struct sk_buff *oldskb, int hook)
/* ip_route_me_harder expects skb->dst to be set */
skb_dst_set_noref(nskb, skb_dst(oldskb));
+ nskb->mark = IP4_REPLY_MARK(net, oldskb->mark);
+
skb_reserve(nskb, LL_MAX_HEADER);
niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
ip4_dst_hoplimit(skb_dst(nskb)));
diff --git a/net/ipv4/netfilter/nft_fib_ipv4.c b/net/ipv4/netfilter/nft_fib_ipv4.c
index 965b1a1..29812919 100644
--- a/net/ipv4/netfilter/nft_fib_ipv4.c
+++ b/net/ipv4/netfilter/nft_fib_ipv4.c
@@ -26,13 +26,6 @@ static __be32 get_saddr(__be32 addr)
return addr;
}
-static bool fib4_is_local(const struct sk_buff *skb)
-{
- const struct rtable *rt = skb_rtable(skb);
-
- return rt && (rt->rt_flags & RTCF_LOCAL);
-}
-
#define DSCP_BITS 0xfc
void nft_fib4_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
@@ -95,8 +88,10 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
else
oif = NULL;
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib4_is_local(pkt->skb)) {
- nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+ nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+ nft_fib_store_result(dest, priv->result, pkt,
+ nft_in(pkt)->ifindex);
return;
}
@@ -131,7 +126,7 @@ void nft_fib4_eval(const struct nft_expr *expr, struct nft_regs *regs,
switch (res.type) {
case RTN_UNICAST:
break;
- case RTN_LOCAL: /* should not appear here, see fib4_is_local() above */
+ case RTN_LOCAL: /* Should not see RTN_LOCAL here */
return;
default:
break;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 86cca61..592db6a 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -433,9 +433,9 @@ int ping_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
goto out;
}
- pr_debug("after bind(): num = %d, dif = %d\n",
- (int)isk->inet_num,
- (int)sk->sk_bound_dev_if);
+ pr_debug("after bind(): num = %hu, dif = %d\n",
+ isk->inet_num,
+ sk->sk_bound_dev_if);
err = 0;
if (sk->sk_family == AF_INET && isk->inet_rcv_saddr)
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 0247ca0..a9deeb9 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -57,10 +57,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
unsigned int frag_mem;
int orphans, sockets;
- local_bh_disable();
orphans = percpu_counter_sum_positive(&tcp_orphan_count);
sockets = proto_sockets_allocated_sum_positive(&tcp_prot);
- local_bh_enable();
socket_seq_show(seq);
seq_printf(seq, "TCP: inuse %d orphan %d tw %d alloc %d mem %ld\n",
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 7144288..4b7c231 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2471,7 +2471,7 @@ static int rt_fill_info(struct net *net, __be32 dst, __be32 src, u32 table_id,
r->rtm_dst_len = 32;
r->rtm_src_len = 0;
r->rtm_tos = fl4->flowi4_tos;
- r->rtm_table = table_id;
+ r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table_id))
goto nla_put_failure;
r->rtm_type = rt->rt_type;
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 134d8e1..d6880a6 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -35,6 +35,8 @@ static int ip_local_port_range_min[] = { 1, 1 };
static int ip_local_port_range_max[] = { 65535, 65535 };
static int tcp_adv_win_scale_min = -31;
static int tcp_adv_win_scale_max = 31;
+static int ip_privileged_port_min;
+static int ip_privileged_port_max = 65535;
static int ip_ttl_min = 1;
static int ip_ttl_max = 255;
static int tcp_syn_retries_min = 1;
@@ -79,7 +81,12 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
if (write && ret == 0) {
- if (range[1] < range[0])
+ /* Ensure that the upper limit is not smaller than the lower,
+ * and that the lower does not encroach upon the privileged
+ * port limit.
+ */
+ if ((range[1] < range[0]) ||
+ (range[0] < net->ipv4.sysctl_ip_prot_sock))
ret = -EINVAL;
else
set_local_port_range(net, range);
@@ -88,6 +95,40 @@ static int ipv4_local_port_range(struct ctl_table *table, int write,
return ret;
}
+/* Validate changes from /proc interface. */
+static int ipv4_privileged_ports(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ struct net *net = container_of(table->data, struct net,
+ ipv4.sysctl_ip_prot_sock);
+ int ret;
+ int pports;
+ int range[2];
+ struct ctl_table tmp = {
+ .data = &pports,
+ .maxlen = sizeof(pports),
+ .mode = table->mode,
+ .extra1 = &ip_privileged_port_min,
+ .extra2 = &ip_privileged_port_max,
+ };
+
+ pports = net->ipv4.sysctl_ip_prot_sock;
+
+ ret = proc_dointvec_minmax(&tmp, write, buffer, lenp, ppos);
+
+ if (write && ret == 0) {
+ inet_get_local_port_range(net, &range[0], &range[1]);
+ /* Ensure that the local port range doesn't overlap with the
+ * privileged port range.
+ */
+ if (range[0] < pports)
+ ret = -EINVAL;
+ else
+ net->ipv4.sysctl_ip_prot_sock = pports;
+ }
+
+ return ret;
+}
static void inet_get_ping_group_range_table(struct ctl_table *table, kgid_t *low, kgid_t *high)
{
@@ -537,13 +578,6 @@ static struct ctl_table ipv4_table[] = {
.proc_handler = proc_dointvec
},
{
- .procname = "tcp_thin_dupack",
- .data = &sysctl_tcp_thin_dupack,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec
- },
- {
.procname = "tcp_early_retrans",
.data = &sysctl_tcp_early_retrans,
.maxlen = sizeof(int),
@@ -930,7 +964,7 @@ static struct ctl_table ipv4_net_table[] = {
.data = &init_net.ipv4.sysctl_tcp_notsent_lowat,
.maxlen = sizeof(unsigned int),
.mode = 0644,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_douintvec,
},
{
.procname = "tcp_tw_reuse",
@@ -971,6 +1005,24 @@ static struct ctl_table ipv4_net_table[] = {
.extra2 = &one,
},
#endif
+ {
+ .procname = "ip_unprivileged_port_start",
+ .maxlen = sizeof(int),
+ .data = &init_net.ipv4.sysctl_ip_prot_sock,
+ .mode = 0644,
+ .proc_handler = ipv4_privileged_ports,
+ },
+#ifdef CONFIG_NET_L3_MASTER_DEV
+ {
+ .procname = "udp_l3mdev_accept",
+ .data = &init_net.ipv4.sysctl_udp_l3mdev_accept,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &one,
+ },
+#endif
{ }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index c8d46c1..b751abc 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -406,7 +406,6 @@ void tcp_init_sock(struct sock *sk)
tp->mss_cache = TCP_MSS_DEFAULT;
tp->reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
- tcp_enable_early_retrans(tp);
tcp_assign_congestion_control(sk);
tp->tsoffset = 0;
@@ -421,9 +420,7 @@ void tcp_init_sock(struct sock *sk)
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
- local_bh_disable();
sk_sockets_allocated_inc(sk);
- local_bh_enable();
}
EXPORT_SYMBOL(tcp_init_sock);
@@ -536,6 +533,12 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
if (tp->urg_data & TCP_URG_VALID)
mask |= POLLPRI;
+ } else if (sk->sk_state == TCP_SYN_SENT && inet_sk(sk)->defer_connect) {
+ /* Active TCP fastopen socket with defer_connect
+ * Return POLLOUT so application can call write()
+ * in order for kernel to generate SYN+data
+ */
+ mask |= POLLOUT | POLLWRNORM;
}
/* This barrier is coupled with smp_wmb() in tcp_reset() */
smp_rmb();
@@ -1074,6 +1077,7 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
int *copied, size_t size)
{
struct tcp_sock *tp = tcp_sk(sk);
+ struct inet_sock *inet = inet_sk(sk);
int err, flags;
if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
@@ -1088,9 +1092,19 @@ static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
tp->fastopen_req->data = msg;
tp->fastopen_req->size = size;
+ if (inet->defer_connect) {
+ err = tcp_connect(sk);
+ /* Same failure procedure as in tcp_v4/6_connect */
+ if (err) {
+ tcp_set_state(sk, TCP_CLOSE);
+ inet->inet_dport = 0;
+ sk->sk_route_caps = 0;
+ }
+ }
flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
- msg->msg_namelen, flags);
+ msg->msg_namelen, flags, 1);
+ inet->defer_connect = 0;
*copied = tp->fastopen_req->copied;
tcp_free_fastopen_req(tp);
return err;
@@ -1110,7 +1124,7 @@ int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size)
lock_sock(sk);
flags = msg->msg_flags;
- if (flags & MSG_FASTOPEN) {
+ if (unlikely(flags & MSG_FASTOPEN || inet_sk(sk)->defer_connect)) {
err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
if (err == -EINPROGRESS && copied_syn > 0)
goto out;
@@ -2475,11 +2489,6 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
case TCP_THIN_DUPACK:
if (val < 0 || val > 1)
err = -EINVAL;
- else {
- tp->thin_dupack = val;
- if (tp->thin_dupack)
- tcp_disable_early_retrans(tp);
- }
break;
case TCP_REPAIR:
@@ -2664,6 +2673,18 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
err = -EINVAL;
}
break;
+ case TCP_FASTOPEN_CONNECT:
+ if (val > 1 || val < 0) {
+ err = -EINVAL;
+ } else if (sysctl_tcp_fastopen & TFO_CLIENT_ENABLE) {
+ if (sk->sk_state == TCP_CLOSE)
+ tp->fastopen_connect = val;
+ else
+ err = -EINVAL;
+ } else {
+ err = -EOPNOTSUPP;
+ }
+ break;
case TCP_TIMESTAMP:
if (!tp->repair)
err = -EPERM;
@@ -2849,7 +2870,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
struct sk_buff *stats;
struct tcp_info info;
- stats = alloc_skb(3 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
+ stats = alloc_skb(5 * nla_total_size_64bit(sizeof(u64)), GFP_ATOMIC);
if (!stats)
return NULL;
@@ -2860,6 +2881,10 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk)
info.tcpi_rwnd_limited, TCP_NLA_PAD);
nla_put_u64_64bit(stats, TCP_NLA_SNDBUF_LIMITED,
info.tcpi_sndbuf_limited, TCP_NLA_PAD);
+ nla_put_u64_64bit(stats, TCP_NLA_DATA_SEGS_OUT,
+ tp->data_segs_out, TCP_NLA_PAD);
+ nla_put_u64_64bit(stats, TCP_NLA_TOTAL_RETRANS,
+ tp->total_retrans, TCP_NLA_PAD);
return stats;
}
@@ -2969,8 +2994,9 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
case TCP_THIN_LINEAR_TIMEOUTS:
val = tp->thin_lto;
break;
+
case TCP_THIN_DUPACK:
- val = tp->thin_dupack;
+ val = 0;
break;
case TCP_REPAIR:
@@ -3023,6 +3049,10 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
val = icsk->icsk_accept_queue.fastopenq.max_qlen;
break;
+ case TCP_FASTOPEN_CONNECT:
+ val = tp->fastopen_connect;
+ break;
+
case TCP_TIMESTAMP:
val = tcp_time_stamp + tp->tsoffset;
break;
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 4e777a3..8ea4e97 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -113,7 +113,7 @@ static bool tcp_fastopen_cookie_gen(struct request_sock *req,
struct tcp_fastopen_cookie tmp;
if (__tcp_fastopen_cookie_gen(&ip6h->saddr, &tmp)) {
- struct in6_addr *buf = (struct in6_addr *) tmp.val;
+ struct in6_addr *buf = &tmp.addr;
int i;
for (i = 0; i < 4; i++)
@@ -205,6 +205,7 @@ static struct sock *tcp_fastopen_create_child(struct sock *sk,
* scaled. So correct it appropriately.
*/
tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
+ tp->max_window = tp->snd_wnd;
/* Activate the retrans timer so that SYNACK can be retransmitted.
* The request socket is not added to the ehash
@@ -325,3 +326,57 @@ fastopen:
*foc = valid_foc;
return NULL;
}
+
+bool tcp_fastopen_cookie_check(struct sock *sk, u16 *mss,
+ struct tcp_fastopen_cookie *cookie)
+{
+ unsigned long last_syn_loss = 0;
+ int syn_loss = 0;
+
+ tcp_fastopen_cache_get(sk, mss, cookie, &syn_loss, &last_syn_loss);
+
+ /* Recurring FO SYN losses: no cookie or data in SYN */
+ if (syn_loss > 1 &&
+ time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
+ cookie->len = -1;
+ return false;
+ }
+ if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE) {
+ cookie->len = -1;
+ return true;
+ }
+ return cookie->len > 0;
+}
+
+/* This function checks if we want to defer sending SYN until the first
+ * write(). We defer under the following conditions:
+ * 1. fastopen_connect sockopt is set
+ * 2. we have a valid cookie
+ * Return value: return true if we want to defer until application writes data
+ * return false if we want to send out SYN immediately
+ */
+bool tcp_fastopen_defer_connect(struct sock *sk, int *err)
+{
+ struct tcp_fastopen_cookie cookie = { .len = 0 };
+ struct tcp_sock *tp = tcp_sk(sk);
+ u16 mss;
+
+ if (tp->fastopen_connect && !tp->fastopen_req) {
+ if (tcp_fastopen_cookie_check(sk, &mss, &cookie)) {
+ inet_sk(sk)->defer_connect = 1;
+ return true;
+ }
+
+ /* Alloc fastopen_req in order for FO option to be included
+ * in SYN
+ */
+ tp->fastopen_req = kzalloc(sizeof(*tp->fastopen_req),
+ sk->sk_allocation);
+ if (tp->fastopen_req)
+ tp->fastopen_req->cookie = cookie;
+ else
+ *err = -ENOBUFS;
+ }
+ return false;
+}
+EXPORT_SYMBOL(tcp_fastopen_defer_connect);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index ec6d843..27c95ac 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -79,7 +79,7 @@
int sysctl_tcp_timestamps __read_mostly = 1;
int sysctl_tcp_window_scaling __read_mostly = 1;
int sysctl_tcp_sack __read_mostly = 1;
-int sysctl_tcp_fack __read_mostly = 1;
+int sysctl_tcp_fack __read_mostly;
int sysctl_tcp_max_reordering __read_mostly = 300;
int sysctl_tcp_dsack __read_mostly = 1;
int sysctl_tcp_app_win __read_mostly = 31;
@@ -95,9 +95,6 @@ int sysctl_tcp_rfc1337 __read_mostly;
int sysctl_tcp_max_orphans __read_mostly = NR_FILE;
int sysctl_tcp_frto __read_mostly = 2;
int sysctl_tcp_min_rtt_wlen __read_mostly = 300;
-
-int sysctl_tcp_thin_dupack __read_mostly;
-
int sysctl_tcp_moderate_rcvbuf __read_mostly = 1;
int sysctl_tcp_early_retrans __read_mostly = 3;
int sysctl_tcp_invalid_ratelimit __read_mostly = HZ/2;
@@ -904,8 +901,6 @@ static void tcp_update_reordering(struct sock *sk, const int metric,
tcp_disable_fack(tp);
}
- if (metric > 0)
- tcp_disable_early_retrans(tp);
tp->rack.reord = 1;
}
@@ -916,10 +911,6 @@ static void tcp_verify_retransmit_hint(struct tcp_sock *tp, struct sk_buff *skb)
before(TCP_SKB_CB(skb)->seq,
TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
tp->retransmit_skb_hint = skb;
-
- if (!tp->lost_out ||
- after(TCP_SKB_CB(skb)->end_seq, tp->retransmit_high))
- tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
/* Sum the number of packets on the wire we have marked as lost.
@@ -1135,6 +1126,7 @@ struct tcp_sacktag_state {
*/
struct skb_mstamp first_sackt;
struct skb_mstamp last_sackt;
+ struct skb_mstamp ack_time; /* Timestamp when the S/ACK was received */
struct rate_sample *rate;
int flag;
};
@@ -1217,7 +1209,8 @@ static u8 tcp_sacktag_one(struct sock *sk,
return sacked;
if (!(sacked & TCPCB_SACKED_ACKED)) {
- tcp_rack_advance(tp, xmit_time, sacked);
+ tcp_rack_advance(tp, sacked, end_seq,
+ xmit_time, &state->ack_time);
if (sacked & TCPCB_SACKED_RETRANS) {
/* If the segment is not tagged as lost,
@@ -1937,7 +1930,6 @@ void tcp_enter_loss(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct net *net = sock_net(sk);
struct sk_buff *skb;
- bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
bool is_reneg; /* is receiver reneging on SACKs? */
bool mark_lost;
@@ -1982,7 +1974,6 @@ void tcp_enter_loss(struct sock *sk)
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_ACKED;
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
- tp->retransmit_high = TCP_SKB_CB(skb)->end_seq;
}
}
tcp_verify_left_out(tp);
@@ -1998,13 +1989,15 @@ void tcp_enter_loss(struct sock *sk)
tp->high_seq = tp->snd_nxt;
tcp_ecn_queue_cwr(tp);
- /* F-RTO RFC5682 sec 3.1 step 1: retransmit SND.UNA if no previous
- * loss recovery is underway except recurring timeout(s) on
- * the same SND.UNA (sec 3.2). Disable F-RTO on path MTU probing
+ /* F-RTO RFC5682 sec 3.1 step 1 mandates to disable F-RTO
+ * if a previous recovery is underway, otherwise it may incorrectly
+ * call a timeout spurious if some previously retransmitted packets
+ * are s/acked (sec 3.2). We do not apply that retriction since
+ * retransmitted skbs are permanently tagged with TCPCB_EVER_RETRANS
+ * so FLAG_ORIG_SACK_ACKED is always correct. But we do disable F-RTO
+ * on PTMU discovery to avoid sending new data.
*/
- tp->frto = sysctl_tcp_frto &&
- (new_recovery || icsk->icsk_retransmits) &&
- !inet_csk(sk)->icsk_mtup.probe_size;
+ tp->frto = sysctl_tcp_frto && !inet_csk(sk)->icsk_mtup.probe_size;
}
/* If ACK arrived pointing to a remembered SACK, it means that our
@@ -2056,30 +2049,6 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
return tcp_is_fack(tp) ? tp->fackets_out : tp->sacked_out + 1;
}
-static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
-{
- struct tcp_sock *tp = tcp_sk(sk);
- unsigned long delay;
-
- /* Delay early retransmit and entering fast recovery for
- * max(RTT/4, 2msec) unless ack has ECE mark, no RTT samples
- * available, or RTO is scheduled to fire first.
- */
- if (sysctl_tcp_early_retrans < 2 || sysctl_tcp_early_retrans > 3 ||
- (flag & FLAG_ECE) || !tp->srtt_us)
- return false;
-
- delay = max(usecs_to_jiffies(tp->srtt_us >> 5),
- msecs_to_jiffies(2));
-
- if (!time_after(inet_csk(sk)->icsk_timeout, (jiffies + delay)))
- return false;
-
- inet_csk_reset_xmit_timer(sk, ICSK_TIME_EARLY_RETRANS, delay,
- TCP_RTO_MAX);
- return true;
-}
-
/* Linux NewReno/SACK/FACK/ECN state machine.
* --------------------------------------
*
@@ -2127,10 +2096,26 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
* F.e. after RTO, when all the queue is considered as lost,
* lost_out = packets_out and in_flight = retrans_out.
*
- * Essentially, we have now two algorithms counting
+ * Essentially, we have now a few algorithms detecting
* lost packets.
*
- * FACK: It is the simplest heuristics. As soon as we decided
+ * If the receiver supports SACK:
+ *
+ * RFC6675/3517: It is the conventional algorithm. A packet is
+ * considered lost if the number of higher sequence packets
+ * SACKed is greater than or equal the DUPACK thoreshold
+ * (reordering). This is implemented in tcp_mark_head_lost and
+ * tcp_update_scoreboard.
+ *
+ * RACK (draft-ietf-tcpm-rack-01): it is a newer algorithm
+ * (2017-) that checks timing instead of counting DUPACKs.
+ * Essentially a packet is considered lost if it's not S/ACKed
+ * after RTT + reordering_window, where both metrics are
+ * dynamically measured and adjusted. This is implemented in
+ * tcp_rack_mark_lost.
+ *
+ * FACK (Disabled by default. Subsumbed by RACK):
+ * It is the simplest heuristics. As soon as we decided
* that something is lost, we decide that _all_ not SACKed
* packets until the most forward SACK are lost. I.e.
* lost_out = fackets_out - sacked_out and left_out = fackets_out.
@@ -2139,16 +2124,14 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
* takes place. We use FACK by default until reordering
* is suspected on the path to this destination.
*
- * NewReno: when Recovery is entered, we assume that one segment
+ * If the receiver does not support SACK:
+ *
+ * NewReno (RFC6582): in Recovery we assume that one segment
* is lost (classic Reno). While we are in Recovery and
* a partial ACK arrives, we assume that one more packet
* is lost (NewReno). This heuristics are the same in NewReno
* and SACK.
*
- * Imagine, that's all! Forget about all this shamanism about CWND inflation
- * deflation etc. CWND is real congestion window, never inflated, changes
- * only according to classic VJ rules.
- *
* Really tricky (and requiring careful tuning) part of algorithm
* is hidden in functions tcp_time_to_recover() and tcp_xmit_retransmit_queue().
* The first determines the moment _when_ we should reduce CWND and,
@@ -2176,8 +2159,6 @@ static bool tcp_pause_early_retransmit(struct sock *sk, int flag)
static bool tcp_time_to_recover(struct sock *sk, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
- __u32 packets_out;
- int tcp_reordering = sock_net(sk)->ipv4.sysctl_tcp_reordering;
/* Trick#1: The loss is proven. */
if (tp->lost_out)
@@ -2187,39 +2168,6 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
if (tcp_dupack_heuristics(tp) > tp->reordering)
return true;
- /* Trick#4: It is still not OK... But will it be useful to delay
- * recovery more?
- */
- packets_out = tp->packets_out;
- if (packets_out <= tp->reordering &&
- tp->sacked_out >= max_t(__u32, packets_out/2, tcp_reordering) &&
- !tcp_may_send_now(sk)) {
- /* We have nothing to send. This connection is limited
- * either by receiver window or by application.
- */
- return true;
- }
-
- /* If a thin stream is detected, retransmit after first
- * received dupack. Employ only if SACK is supported in order
- * to avoid possible corner-case series of spurious retransmissions
- * Use only if there are no unsent data.
- */
- if ((tp->thin_dupack || sysctl_tcp_thin_dupack) &&
- tcp_stream_is_thin(tp) && tcp_dupack_heuristics(tp) > 1 &&
- tcp_is_sack(tp) && !tcp_send_head(sk))
- return true;
-
- /* Trick#6: TCP early retransmit, per RFC5827. To avoid spurious
- * retransmissions due to small network reorderings, we implement
- * Mitigation A.3 in the RFC and delay the retransmission for a short
- * interval if appropriate.
- */
- if (tp->do_early_retrans && !tp->retrans_out && tp->sacked_out &&
- (tp->packets_out >= (tp->sacked_out + 1) && tp->packets_out < 4) &&
- !tcp_may_send_now(sk))
- return !tcp_pause_early_retransmit(sk, flag);
-
return false;
}
@@ -2521,8 +2469,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
tcp_ecn_queue_cwr(tp);
}
-static void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked,
- int flag)
+void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
int sndcnt = 0;
@@ -2690,7 +2637,7 @@ void tcp_simple_retransmit(struct sock *sk)
}
EXPORT_SYMBOL(tcp_simple_retransmit);
-static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
+void tcp_enter_recovery(struct sock *sk, bool ece_ack)
{
struct tcp_sock *tp = tcp_sk(sk);
int mib_idx;
@@ -2726,14 +2673,18 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack,
tcp_try_undo_loss(sk, false))
return;
- if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
- /* Step 3.b. A timeout is spurious if not all data are
- * lost, i.e., never-retransmitted data are (s)acked.
- */
- if ((flag & FLAG_ORIG_SACK_ACKED) &&
- tcp_try_undo_loss(sk, true))
- return;
+ /* The ACK (s)acks some never-retransmitted data meaning not all
+ * the data packets before the timeout were lost. Therefore we
+ * undo the congestion window and state. This is essentially
+ * the operation in F-RTO (RFC5682 section 3.1 step 3.b). Since
+ * a retransmitted skb is permantly marked, we can apply such an
+ * operation even if F-RTO was not used.
+ */
+ if ((flag & FLAG_ORIG_SACK_ACKED) &&
+ tcp_try_undo_loss(sk, tp->undo_marker))
+ return;
+ if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
if (after(tp->snd_nxt, tp->high_seq)) {
if (flag & FLAG_DATA_SACKED || is_dupack)
tp->frto = 0; /* Step 3.a. loss was real */
@@ -2800,6 +2751,21 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
return false;
}
+static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag,
+ const struct skb_mstamp *ack_time)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ /* Use RACK to detect loss */
+ if (sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) {
+ u32 prior_retrans = tp->retrans_out;
+
+ tcp_rack_mark_lost(sk, ack_time);
+ if (prior_retrans > tp->retrans_out)
+ *ack_flag |= FLAG_LOST_RETRANS;
+ }
+}
+
/* Process an event, which can update packets-in-flight not trivially.
* Main goal of this function is to calculate new estimate for left_out,
* taking into account both packets sitting in receiver's buffer and
@@ -2813,7 +2779,8 @@ static bool tcp_try_undo_partial(struct sock *sk, const int acked)
* tcp_xmit_retransmit_queue().
*/
static void tcp_fastretrans_alert(struct sock *sk, const int acked,
- bool is_dupack, int *ack_flag, int *rexmit)
+ bool is_dupack, int *ack_flag, int *rexmit,
+ const struct skb_mstamp *ack_time)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
@@ -2864,13 +2831,6 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
}
}
- /* Use RACK to detect loss */
- if (sysctl_tcp_recovery & TCP_RACK_LOST_RETRANS &&
- tcp_rack_mark_lost(sk)) {
- flag |= FLAG_LOST_RETRANS;
- *ack_flag |= FLAG_LOST_RETRANS;
- }
-
/* E. Process state. */
switch (icsk->icsk_ca_state) {
case TCP_CA_Recovery:
@@ -2888,11 +2848,13 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
tcp_try_keep_open(sk);
return;
}
+ tcp_rack_identify_loss(sk, ack_flag, ack_time);
break;
case TCP_CA_Loss:
tcp_process_loss(sk, flag, is_dupack, rexmit);
- if (icsk->icsk_ca_state != TCP_CA_Open &&
- !(flag & FLAG_LOST_RETRANS))
+ tcp_rack_identify_loss(sk, ack_flag, ack_time);
+ if (!(icsk->icsk_ca_state == TCP_CA_Open ||
+ (*ack_flag & FLAG_LOST_RETRANS)))
return;
/* Change state if cwnd is undone or retransmits are lost */
default:
@@ -2906,6 +2868,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const int acked,
if (icsk->icsk_ca_state <= TCP_CA_Disorder)
tcp_try_undo_dsack(sk);
+ tcp_rack_identify_loss(sk, ack_flag, ack_time);
if (!tcp_time_to_recover(sk, flag)) {
tcp_try_to_open(sk, flag);
return;
@@ -3024,7 +2987,7 @@ void tcp_rearm_rto(struct sock *sk)
} else {
u32 rto = inet_csk(sk)->icsk_rto;
/* Offset the time elapsed after installing regular RTO */
- if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ if (icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
struct sk_buff *skb = tcp_write_queue_head(sk);
const u32 rto_time_stamp =
@@ -3041,24 +3004,6 @@ void tcp_rearm_rto(struct sock *sk)
}
}
-/* This function is called when the delayed ER timer fires. TCP enters
- * fast recovery and performs fast-retransmit.
- */
-void tcp_resume_early_retransmit(struct sock *sk)
-{
- struct tcp_sock *tp = tcp_sk(sk);
-
- tcp_rearm_rto(sk);
-
- /* Stop if ER is disabled after the delayed ER timer is scheduled */
- if (!tp->do_early_retrans)
- return;
-
- tcp_enter_recovery(sk, false);
- tcp_update_scoreboard(sk, 1);
- tcp_xmit_retransmit_queue(sk);
-}
-
/* If we get here, the whole TSO packet has not been acked. */
static u32 tcp_tso_acked(struct sock *sk, struct sk_buff *skb)
{
@@ -3101,11 +3046,11 @@ static void tcp_ack_tstamp(struct sock *sk, struct sk_buff *skb,
*/
static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
u32 prior_snd_una, int *acked,
- struct tcp_sacktag_state *sack,
- struct skb_mstamp *now)
+ struct tcp_sacktag_state *sack)
{
const struct inet_connection_sock *icsk = inet_csk(sk);
struct skb_mstamp first_ackt, last_ackt;
+ struct skb_mstamp *now = &sack->ack_time;
struct tcp_sock *tp = tcp_sk(sk);
u32 prior_sacked = tp->sacked_out;
u32 reord = tp->packets_out;
@@ -3165,7 +3110,9 @@ static int tcp_clean_rtx_queue(struct sock *sk, int prior_fackets,
} else if (tcp_is_sack(tp)) {
tp->delivered += acked_pcount;
if (!tcp_skb_spurious_retrans(tp, skb))
- tcp_rack_advance(tp, &skb->skb_mstamp, sacked);
+ tcp_rack_advance(tp, sacked, scb->end_seq,
+ &skb->skb_mstamp,
+ &sack->ack_time);
}
if (sacked & TCPCB_LOST)
tp->lost_out -= acked_pcount;
@@ -3595,7 +3542,6 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
u32 lost = tp->lost;
int acked = 0; /* Number of packets newly acked */
int rexmit = REXMIT_NONE; /* Flag to (re)transmit to recover losses */
- struct skb_mstamp now;
sack_state.first_sackt.v64 = 0;
sack_state.rate = &rs;
@@ -3621,10 +3567,9 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
if (after(ack, tp->snd_nxt))
goto invalid_ack;
- skb_mstamp_get(&now);
+ skb_mstamp_get(&sack_state.ack_time);
- if (icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
+ if (icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
if (after(ack, prior_snd_una)) {
@@ -3689,11 +3634,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
/* See if we can take anything off of the retransmit queue. */
flag |= tcp_clean_rtx_queue(sk, prior_fackets, prior_snd_una, &acked,
- &sack_state, &now);
+ &sack_state);
if (tcp_ack_is_dubious(sk, flag)) {
is_dupack = !(flag & (FLAG_SND_UNA_ADVANCED | FLAG_NOT_DUP));
- tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+ tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+ &sack_state.ack_time);
}
if (tp->tlp_high_seq)
tcp_process_tlp_ack(sk, ack, flag);
@@ -3708,15 +3654,17 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
tcp_schedule_loss_probe(sk);
delivered = tp->delivered - delivered; /* freshly ACKed or SACKed */
lost = tp->lost - lost; /* freshly marked lost */
- tcp_rate_gen(sk, delivered, lost, &now, &rs);
- tcp_cong_control(sk, ack, delivered, flag, &rs);
+ tcp_rate_gen(sk, delivered, lost, &sack_state.ack_time,
+ sack_state.rate);
+ tcp_cong_control(sk, ack, delivered, flag, sack_state.rate);
tcp_xmit_recovery(sk, rexmit);
return 1;
no_queue:
/* If data was DSACKed, see if we can undo a cwnd reduction. */
if (flag & FLAG_DSACKING_ACK)
- tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+ tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+ &sack_state.ack_time);
/* If this ack opens up a zero window, clear backoff. It was
* being used to time the probes, and is probably far higher than
* it needs to be for normal retransmission.
@@ -3737,9 +3685,11 @@ old_ack:
* If data was DSACKed, see if we can undo a cwnd reduction.
*/
if (TCP_SKB_CB(skb)->sacked) {
+ skb_mstamp_get(&sack_state.ack_time);
flag |= tcp_sacktag_write_queue(sk, skb, prior_snd_una,
&sack_state);
- tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit);
+ tcp_fastretrans_alert(sk, acked, is_dupack, &flag, &rexmit,
+ &sack_state.ack_time);
tcp_xmit_recovery(sk, rexmit);
}
@@ -4557,6 +4507,7 @@ add_sack:
end:
if (skb) {
tcp_grow_window(sk, skb);
+ skb_condense(skb);
skb_set_owner_r(skb, sk);
}
}
@@ -5078,7 +5029,7 @@ static void tcp_check_space(struct sock *sk)
if (sock_flag(sk, SOCK_QUEUE_SHRUNK)) {
sock_reset_flag(sk, SOCK_QUEUE_SHRUNK);
/* pairs with tcp_poll() */
- smp_mb__after_atomic();
+ smp_mb();
if (sk->sk_socket &&
test_bit(SOCK_NOSPACE, &sk->sk_socket->flags)) {
tcp_new_space(sk);
@@ -5249,6 +5200,23 @@ static int tcp_copy_to_iovec(struct sock *sk, struct sk_buff *skb, int hlen)
return err;
}
+/* Accept RST for rcv_nxt - 1 after a FIN.
+ * When tcp connections are abruptly terminated from Mac OSX (via ^C), a
+ * FIN is sent followed by a RST packet. The RST is sent with the same
+ * sequence number as the FIN, and thus according to RFC 5961 a challenge
+ * ACK should be sent. However, Mac OSX rate limits replies to challenge
+ * ACKs on the closed socket. In addition middleboxes can drop either the
+ * challenge ACK or a subsequent RST.
+ */
+static bool tcp_reset_check(const struct sock *sk, const struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ return unlikely(TCP_SKB_CB(skb)->seq == (tp->rcv_nxt - 1) &&
+ (1 << sk->sk_state) & (TCPF_CLOSE_WAIT | TCPF_LAST_ACK |
+ TCPF_CLOSING));
+}
+
/* Does PAWS and seqno based validation of an incoming segment, flags will
* play significant role here.
*/
@@ -5287,20 +5255,25 @@ static bool tcp_validate_incoming(struct sock *sk, struct sk_buff *skb,
LINUX_MIB_TCPACKSKIPPEDSEQ,
&tp->last_oow_ack_time))
tcp_send_dupack(sk, skb);
+ } else if (tcp_reset_check(sk, skb)) {
+ tcp_reset(sk);
}
goto discard;
}
/* Step 2: check RST bit */
if (th->rst) {
- /* RFC 5961 3.2 (extend to match against SACK too if available):
- * If seq num matches RCV.NXT or the right-most SACK block,
+ /* RFC 5961 3.2 (extend to match against (RCV.NXT - 1) after a
+ * FIN and SACK too if available):
+ * If seq num matches RCV.NXT or (RCV.NXT - 1) after a FIN, or
+ * the right-most SACK block,
* then
* RESET the connection
* else
* Send a challenge ACK
*/
- if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt) {
+ if (TCP_SKB_CB(skb)->seq == tp->rcv_nxt ||
+ tcp_reset_check(sk, skb)) {
rst_seq_match = true;
} else if (tcp_is_sack(tp) && tp->rx_opt.num_sacks > 0) {
struct tcp_sack_block *sp = &tp->selective_acks[0];
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 56d756e..8c9e9aa 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -232,6 +232,7 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
/* OK, now commit destination to socket. */
sk->sk_gso_type = SKB_GSO_TCPV4;
sk_setup_caps(sk, &rt->dst);
+ rt = NULL;
if (!tp->write_seq && likely(!tp->repair))
tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
@@ -242,9 +243,13 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
inet->inet_id = tp->write_seq ^ jiffies;
+ if (tcp_fastopen_defer_connect(sk, &err))
+ return err;
+ if (err)
+ goto failure;
+
err = tcp_connect(sk);
- rt = NULL;
if (err)
goto failure;
@@ -1556,8 +1561,7 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
* It has been noticed pure SACK packets were sometimes dropped
* (if cooked by drivers without copybreak feature).
*/
- if (!skb->data_len)
- skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
+ skb_condense(skb);
if (unlikely(sk_add_backlog(sk, skb, limit))) {
bh_unlock_sock(sk);
@@ -1817,7 +1821,6 @@ const struct inet_connection_sock_af_ops ipv4_specific = {
.getsockopt = ip_getsockopt,
.addr2sockaddr = inet_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in),
- .bind_conflict = inet_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ip_setsockopt,
.compat_getsockopt = compat_ip_getsockopt,
@@ -1888,9 +1891,7 @@ void tcp_v4_destroy_sock(struct sock *sk)
tcp_free_fastopen_req(tp);
tcp_saved_syn_free(tp);
- local_bh_disable();
sk_sockets_allocated_dec(sk);
- local_bh_enable();
}
EXPORT_SYMBOL(tcp_v4_destroy_sock);
@@ -2229,7 +2230,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
int state;
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
diff --git a/net/ipv4/tcp_metrics.c b/net/ipv4/tcp_metrics.c
index d46f4d5..b9ed0d5 100644
--- a/net/ipv4/tcp_metrics.c
+++ b/net/ipv4/tcp_metrics.c
@@ -522,7 +522,6 @@ void tcp_init_metrics(struct sock *sk)
val = tcp_metric_get(tm, TCP_METRIC_REORDERING);
if (val && tp->reordering != val) {
tcp_disable_fack(tp);
- tcp_disable_early_retrans(tp);
tp->reordering = val;
}
@@ -606,7 +605,6 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
return ret;
}
-EXPORT_SYMBOL_GPL(tcp_peer_is_proven);
void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst)
{
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 06fde26..bdb4434 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -468,7 +468,6 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->sacked_out = 0;
newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
- tcp_enable_early_retrans(newtp);
newtp->tlp_high_seq = 0;
newtp->lsndtime = treq->snt_synack.stamp_jiffies;
newsk->sk_txhash = treq->txhash;
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 1d5331a..7b2d876 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -76,10 +76,8 @@ static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb)
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tp->packets_out += tcp_skb_pcount(skb);
- if (!prior_packets || icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
+ if (!prior_packets || icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)
tcp_rearm_rto(sk);
- }
NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPORIGDATASENT,
tcp_skb_pcount(skb));
@@ -2289,8 +2287,6 @@ bool tcp_schedule_loss_probe(struct sock *sk)
u32 timeout, tlp_time_stamp, rto_time_stamp;
u32 rtt = usecs_to_jiffies(tp->srtt_us >> 3);
- if (WARN_ON(icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS))
- return false;
/* No consecutive loss probes. */
if (WARN_ON(icsk->icsk_pending == ICSK_TIME_LOSS_PROBE)) {
tcp_rearm_rto(sk);
@@ -2309,8 +2305,9 @@ bool tcp_schedule_loss_probe(struct sock *sk)
/* Schedule a loss probe in 2*RTT for SACK capable connections
* in Open state, that are either limited by cwnd or application.
*/
- if (sysctl_tcp_early_retrans < 3 || !tp->packets_out ||
- !tcp_is_sack(tp) || inet_csk(sk)->icsk_ca_state != TCP_CA_Open)
+ if ((sysctl_tcp_early_retrans != 3 && sysctl_tcp_early_retrans != 4) ||
+ !tp->packets_out || !tcp_is_sack(tp) ||
+ icsk->icsk_ca_state != TCP_CA_Open)
return false;
if ((tp->snd_cwnd > tcp_packets_in_flight(tp)) &&
@@ -2774,6 +2771,13 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN_ECN) == TCPHDR_SYN_ECN)
tcp_ecn_clear_syn(sk, skb);
+ /* Update global and local TCP statistics. */
+ segs = tcp_skb_pcount(skb);
+ TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
+ if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
+ __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+ tp->total_retrans += segs;
+
/* make sure skb->data is aligned on arches that require it
* and check if ack-trimming & collapsing extended the headroom
* beyond what csum_start can cover.
@@ -2791,14 +2795,9 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
}
if (likely(!err)) {
- segs = tcp_skb_pcount(skb);
-
TCP_SKB_CB(skb)->sacked |= TCPCB_EVER_RETRANS;
- /* Update global TCP statistics. */
- TCP_ADD_STATS(sock_net(sk), TCP_MIB_RETRANSSEGS, segs);
- if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)
- __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
- tp->total_retrans += segs;
+ } else if (err != -EBUSY) {
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
return err;
}
@@ -2821,8 +2820,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
if (!tp->retrans_stamp)
tp->retrans_stamp = tcp_skb_timestamp(skb);
- } else if (err != -EBUSY) {
- NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
}
if (tp->undo_retrans < 0)
@@ -2831,36 +2828,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
return err;
}
-/* Check if we forward retransmits are possible in the current
- * window/congestion state.
- */
-static bool tcp_can_forward_retransmit(struct sock *sk)
-{
- const struct inet_connection_sock *icsk = inet_csk(sk);
- const struct tcp_sock *tp = tcp_sk(sk);
-
- /* Forward retransmissions are possible only during Recovery. */
- if (icsk->icsk_ca_state != TCP_CA_Recovery)
- return false;
-
- /* No forward retransmissions in Reno are possible. */
- if (tcp_is_reno(tp))
- return false;
-
- /* Yeah, we have to make difficult choice between forward transmission
- * and retransmission... Both ways have their merits...
- *
- * For now we do not retransmit anything, while we have some new
- * segments to send. In the other cases, follow rule 3 for
- * NextSeg() specified in RFC3517.
- */
-
- if (tcp_may_send_now(sk))
- return false;
-
- return true;
-}
-
/* This gets called after a retransmit timeout, and the initially
* retransmitted data is acknowledged. It tries to continue
* resending the rest of the retransmit queue, until either
@@ -2875,24 +2842,16 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
struct sk_buff *hole = NULL;
- u32 max_segs, last_lost;
+ u32 max_segs;
int mib_idx;
- int fwd_rexmitting = 0;
if (!tp->packets_out)
return;
- if (!tp->lost_out)
- tp->retransmit_high = tp->snd_una;
-
if (tp->retransmit_skb_hint) {
skb = tp->retransmit_skb_hint;
- last_lost = TCP_SKB_CB(skb)->end_seq;
- if (after(last_lost, tp->retransmit_high))
- last_lost = tp->retransmit_high;
} else {
skb = tcp_write_queue_head(sk);
- last_lost = tp->snd_una;
}
max_segs = tcp_tso_segs(sk, tcp_current_mss(sk));
@@ -2915,31 +2874,14 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
*/
segs = min_t(int, segs, max_segs);
- if (fwd_rexmitting) {
-begin_fwd:
- if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp)))
- break;
- mib_idx = LINUX_MIB_TCPFORWARDRETRANS;
-
- } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) {
- tp->retransmit_high = last_lost;
- if (!tcp_can_forward_retransmit(sk))
- break;
- /* Backtrack if necessary to non-L'ed skb */
- if (hole) {
- skb = hole;
- hole = NULL;
- }
- fwd_rexmitting = 1;
- goto begin_fwd;
-
+ if (tp->retrans_out >= tp->lost_out) {
+ break;
} else if (!(sacked & TCPCB_LOST)) {
if (!hole && !(sacked & (TCPCB_SACKED_RETRANS|TCPCB_SACKED_ACKED)))
hole = skb;
continue;
} else {
- last_lost = TCP_SKB_CB(skb)->end_seq;
if (icsk->icsk_ca_state != TCP_CA_Loss)
mib_idx = LINUX_MIB_TCPFASTRETRANS;
else
@@ -2960,7 +2902,8 @@ begin_fwd:
if (tcp_in_cwnd_reduction(sk))
tp->prr_out += tcp_skb_pcount(skb);
- if (skb == tcp_write_queue_head(sk))
+ if (skb == tcp_write_queue_head(sk) &&
+ icsk->icsk_pending != ICSK_TIME_REO_TIMEOUT)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto,
TCP_RTO_MAX);
@@ -3324,23 +3267,11 @@ static int tcp_send_syn_data(struct sock *sk, struct sk_buff *syn)
{
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_fastopen_request *fo = tp->fastopen_req;
- int syn_loss = 0, space, err = 0;
- unsigned long last_syn_loss = 0;
+ int space, err = 0;
struct sk_buff *syn_data;
tp->rx_opt.mss_clamp = tp->advmss; /* If MSS is not cached */
- tcp_fastopen_cache_get(sk, &tp->rx_opt.mss_clamp, &fo->cookie,
- &syn_loss, &last_syn_loss);
- /* Recurring FO SYN losses: revert to regular handshake temporarily */
- if (syn_loss > 1 &&
- time_before(jiffies, last_syn_loss + (60*HZ << syn_loss))) {
- fo->cookie.len = -1;
- goto fallback;
- }
-
- if (sysctl_tcp_fastopen & TFO_CLIENT_NO_COOKIE)
- fo->cookie.len = -1;
- else if (fo->cookie.len <= 0)
+ if (!tcp_fastopen_cookie_check(sk, &tp->rx_opt.mss_clamp, &fo->cookie))
goto fallback;
/* MSS for SYN-data is based on cached MSS and bounded by PMTU and
diff --git a/net/ipv4/tcp_recovery.c b/net/ipv4/tcp_recovery.c
index e36df4f..4ecb38a 100644
--- a/net/ipv4/tcp_recovery.c
+++ b/net/ipv4/tcp_recovery.c
@@ -1,9 +1,32 @@
#include <linux/tcp.h>
#include <net/tcp.h>
-int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
+int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOSS_DETECTION;
-/* Marks a packet lost, if some packet sent later has been (s)acked.
+static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+
+ tcp_skb_mark_lost_uncond_verify(tp, skb);
+ if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) {
+ /* Account for retransmits that are lost again */
+ TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
+ tp->retrans_out -= tcp_skb_pcount(skb);
+ NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPLOSTRETRANSMIT);
+ }
+}
+
+static bool tcp_rack_sent_after(const struct skb_mstamp *t1,
+ const struct skb_mstamp *t2,
+ u32 seq1, u32 seq2)
+{
+ return skb_mstamp_after(t1, t2) ||
+ (t1->v64 == t2->v64 && after(seq1, seq2));
+}
+
+/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
+ *
+ * Marks a packet lost, if some packet sent later has been (s)acked.
* The underlying idea is similar to the traditional dupthresh and FACK
* but they look at different metrics:
*
@@ -16,31 +39,26 @@ int sysctl_tcp_recovery __read_mostly = TCP_RACK_LOST_RETRANS;
* is being more resilient to reordering by simply allowing some
* "settling delay", instead of tweaking the dupthresh.
*
- * The current version is only used after recovery starts but can be
- * easily extended to detect the first loss.
+ * When tcp_rack_detect_loss() detects some packets are lost and we
+ * are not already in the CA_Recovery state, either tcp_rack_reo_timeout()
+ * or tcp_time_to_recover()'s "Trick#1: the loss is proven" code path will
+ * make us enter the CA_Recovery state.
*/
-int tcp_rack_mark_lost(struct sock *sk)
+static void tcp_rack_detect_loss(struct sock *sk, const struct skb_mstamp *now,
+ u32 *reo_timeout)
{
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- u32 reo_wnd, prior_retrans = tp->retrans_out;
-
- if (inet_csk(sk)->icsk_ca_state < TCP_CA_Recovery || !tp->rack.advanced)
- return 0;
-
- /* Reset the advanced flag to avoid unnecessary queue scanning */
- tp->rack.advanced = 0;
+ u32 reo_wnd;
+ *reo_timeout = 0;
/* To be more reordering resilient, allow min_rtt/4 settling delay
* (lower-bounded to 1000uS). We use min_rtt instead of the smoothed
* RTT because reordering is often a path property and less related
* to queuing or delayed ACKs.
- *
- * TODO: measure and adapt to the observed reordering delay, and
- * use a timer to retransmit like the delayed early retransmit.
*/
reo_wnd = 1000;
- if (tp->rack.reord && tcp_min_rtt(tp) != ~0U)
+ if ((tp->rack.reord || !tp->lost_out) && tcp_min_rtt(tp) != ~0U)
reo_wnd = max(tcp_min_rtt(tp) >> 2, reo_wnd);
tcp_for_write_queue(skb, sk) {
@@ -54,20 +72,29 @@ int tcp_rack_mark_lost(struct sock *sk)
scb->sacked & TCPCB_SACKED_ACKED)
continue;
- if (skb_mstamp_after(&tp->rack.mstamp, &skb->skb_mstamp)) {
+ if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp,
+ tp->rack.end_seq, scb->end_seq)) {
+ /* Step 3 in draft-cheng-tcpm-rack-00.txt:
+ * A packet is lost if its elapsed time is beyond
+ * the recent RTT plus the reordering window.
+ */
+ u32 elapsed = skb_mstamp_us_delta(now,
+ &skb->skb_mstamp);
+ s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
- if (skb_mstamp_us_delta(&tp->rack.mstamp,
- &skb->skb_mstamp) <= reo_wnd)
+ if (remaining < 0) {
+ tcp_rack_mark_skb_lost(sk, skb);
continue;
-
- /* skb is lost if packet sent later is sacked */
- tcp_skb_mark_lost_uncond_verify(tp, skb);
- if (scb->sacked & TCPCB_SACKED_RETRANS) {
- scb->sacked &= ~TCPCB_SACKED_RETRANS;
- tp->retrans_out -= tcp_skb_pcount(skb);
- NET_INC_STATS(sock_net(sk),
- LINUX_MIB_TCPLOSTRETRANSMIT);
}
+
+ /* Skip ones marked lost but not yet retransmitted */
+ if ((scb->sacked & TCPCB_LOST) &&
+ !(scb->sacked & TCPCB_SACKED_RETRANS))
+ continue;
+
+ /* Record maximum wait time (+1 to avoid 0) */
+ *reo_timeout = max_t(u32, *reo_timeout, 1 + remaining);
+
} else if (!(scb->sacked & TCPCB_RETRANS)) {
/* Original data are sent sequentially so stop early
* b/c the rest are all sent after rack_sent
@@ -75,20 +102,43 @@ int tcp_rack_mark_lost(struct sock *sk)
break;
}
}
- return prior_retrans - tp->retrans_out;
}
-/* Record the most recently (re)sent time among the (s)acked packets */
-void tcp_rack_advance(struct tcp_sock *tp,
- const struct skb_mstamp *xmit_time, u8 sacked)
+void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ u32 timeout;
+
+ if (!tp->rack.advanced)
+ return;
+
+ /* Reset the advanced flag to avoid unnecessary queue scanning */
+ tp->rack.advanced = 0;
+ tcp_rack_detect_loss(sk, now, &timeout);
+ if (timeout) {
+ timeout = usecs_to_jiffies(timeout + TCP_REO_TIMEOUT_MIN);
+ inet_csk_reset_xmit_timer(sk, ICSK_TIME_REO_TIMEOUT,
+ timeout, inet_csk(sk)->icsk_rto);
+ }
+}
+
+/* Record the most recently (re)sent time among the (s)acked packets
+ * This is "Step 3: Advance RACK.xmit_time and update RACK.RTT" from
+ * draft-cheng-tcpm-rack-00.txt
+ */
+void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
+ const struct skb_mstamp *xmit_time,
+ const struct skb_mstamp *ack_time)
{
+ u32 rtt_us;
+
if (tp->rack.mstamp.v64 &&
- !skb_mstamp_after(xmit_time, &tp->rack.mstamp))
+ !tcp_rack_sent_after(xmit_time, &tp->rack.mstamp,
+ end_seq, tp->rack.end_seq))
return;
+ rtt_us = skb_mstamp_us_delta(ack_time, xmit_time);
if (sacked & TCPCB_RETRANS) {
- struct skb_mstamp now;
-
/* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior
* retransmission) was sacked.
@@ -99,11 +149,35 @@ void tcp_rack_advance(struct tcp_sock *tp,
* so it's at least one RTT (i.e., retransmission is at least
* an RTT later).
*/
- skb_mstamp_get(&now);
- if (skb_mstamp_us_delta(&now, xmit_time) < tcp_min_rtt(tp))
+ if (rtt_us < tcp_min_rtt(tp))
return;
}
-
+ tp->rack.rtt_us = rtt_us;
tp->rack.mstamp = *xmit_time;
+ tp->rack.end_seq = end_seq;
tp->rack.advanced = 1;
}
+
+/* We have waited long enough to accommodate reordering. Mark the expired
+ * packets lost and retransmit them.
+ */
+void tcp_rack_reo_timeout(struct sock *sk)
+{
+ struct tcp_sock *tp = tcp_sk(sk);
+ struct skb_mstamp now;
+ u32 timeout, prior_inflight;
+
+ skb_mstamp_get(&now);
+ prior_inflight = tcp_packets_in_flight(tp);
+ tcp_rack_detect_loss(sk, &now, &timeout);
+ if (prior_inflight != tcp_packets_in_flight(tp)) {
+ if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
+ tcp_enter_recovery(sk, false);
+ if (!inet_csk(sk)->icsk_ca_ops->cong_control)
+ tcp_cwnd_reduction(sk, 1, 0);
+ }
+ tcp_xmit_retransmit_queue(sk);
+ }
+ if (inet_csk(sk)->icsk_pending != ICSK_TIME_RETRANS)
+ tcp_rearm_rto(sk);
+}
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 29a9bd5..40d8935 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -563,8 +563,8 @@ void tcp_write_timer_handler(struct sock *sk)
event = icsk->icsk_pending;
switch (event) {
- case ICSK_TIME_EARLY_RETRANS:
- tcp_resume_early_retransmit(sk);
+ case ICSK_TIME_REO_TIMEOUT:
+ tcp_rack_reo_timeout(sk);
break;
case ICSK_TIME_LOSS_PROBE:
tcp_send_loss_probe(sk);
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 4318d72..cf6ba33 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -134,14 +134,21 @@ EXPORT_SYMBOL(udp_memory_allocated);
#define MAX_UDP_PORTS 65536
#define PORTS_PER_CHAIN (MAX_UDP_PORTS / UDP_HTABLE_SIZE_MIN)
+/* IPCB reference means this can not be used from early demux */
+static bool udp_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_udp_l3mdev_accept &&
+ skb && ipv4_l3mdev_skb(IPCB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
static int udp_lib_lport_inuse(struct net *net, __u16 num,
const struct udp_hslot *hslot,
unsigned long *bitmap,
- struct sock *sk,
- int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard),
- unsigned int log)
+ struct sock *sk, unsigned int log)
{
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
@@ -153,7 +160,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- saddr_comp(sk, sk2, true)) {
+ inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
@@ -176,10 +183,7 @@ static int udp_lib_lport_inuse(struct net *net, __u16 num,
*/
static int udp_lib_lport_inuse2(struct net *net, __u16 num,
struct udp_hslot *hslot2,
- struct sock *sk,
- int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard))
+ struct sock *sk)
{
struct sock *sk2;
kuid_t uid = sock_i_uid(sk);
@@ -193,7 +197,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
(!sk2->sk_reuse || !sk->sk_reuse) &&
(!sk2->sk_bound_dev_if || !sk->sk_bound_dev_if ||
sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
- saddr_comp(sk, sk2, true)) {
+ inet_rcv_saddr_equal(sk, sk2, true)) {
if (sk2->sk_reuseport && sk->sk_reuseport &&
!rcu_access_pointer(sk->sk_reuseport_cb) &&
uid_eq(uid, sock_i_uid(sk2))) {
@@ -208,10 +212,7 @@ static int udp_lib_lport_inuse2(struct net *net, __u16 num,
return res;
}
-static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
- int (*saddr_same)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard))
+static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot)
{
struct net *net = sock_net(sk);
kuid_t uid = sock_i_uid(sk);
@@ -225,7 +226,7 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
(udp_sk(sk2)->udp_port_hash == udp_sk(sk)->udp_port_hash) &&
(sk2->sk_bound_dev_if == sk->sk_bound_dev_if) &&
sk2->sk_reuseport && uid_eq(uid, sock_i_uid(sk2)) &&
- (*saddr_same)(sk, sk2, false)) {
+ inet_rcv_saddr_equal(sk, sk2, false)) {
return reuseport_add_sock(sk, sk2);
}
}
@@ -241,14 +242,10 @@ static int udp_reuseport_add_sock(struct sock *sk, struct udp_hslot *hslot,
*
* @sk: socket struct in question
* @snum: port number to look up
- * @saddr_comp: AF-dependent comparison of bound local IP addresses
* @hash2_nulladdr: AF-dependent hash value in secondary hash chains,
* with NULL address
*/
int udp_lib_get_port(struct sock *sk, unsigned short snum,
- int (*saddr_comp)(const struct sock *sk1,
- const struct sock *sk2,
- bool match_wildcard),
unsigned int hash2_nulladdr)
{
struct udp_hslot *hslot, *hslot2;
@@ -277,7 +274,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
bitmap_zero(bitmap, PORTS_PER_CHAIN);
spin_lock_bh(&hslot->lock);
udp_lib_lport_inuse(net, snum, hslot, bitmap, sk,
- saddr_comp, udptable->log);
+ udptable->log);
snum = first;
/*
@@ -310,12 +307,11 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
if (hslot->count < hslot2->count)
goto scan_primary_hash;
- exist = udp_lib_lport_inuse2(net, snum, hslot2,
- sk, saddr_comp);
+ exist = udp_lib_lport_inuse2(net, snum, hslot2, sk);
if (!exist && (hash2_nulladdr != slot2)) {
hslot2 = udp_hashslot2(udptable, hash2_nulladdr);
exist = udp_lib_lport_inuse2(net, snum, hslot2,
- sk, saddr_comp);
+ sk);
}
if (exist)
goto fail_unlock;
@@ -323,8 +319,7 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
goto found;
}
scan_primary_hash:
- if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk,
- saddr_comp, 0))
+ if (udp_lib_lport_inuse(net, snum, hslot, NULL, sk, 0))
goto fail_unlock;
}
found:
@@ -333,7 +328,7 @@ found:
udp_sk(sk)->udp_portaddr_hash ^= snum;
if (sk_unhashed(sk)) {
if (sk->sk_reuseport &&
- udp_reuseport_add_sock(sk, hslot, saddr_comp)) {
+ udp_reuseport_add_sock(sk, hslot)) {
inet_sk(sk)->inet_num = 0;
udp_sk(sk)->udp_port_hash = 0;
udp_sk(sk)->udp_portaddr_hash ^= snum;
@@ -365,24 +360,6 @@ fail:
}
EXPORT_SYMBOL(udp_lib_get_port);
-/* match_wildcard == true: 0.0.0.0 equals to any IPv4 addresses
- * match_wildcard == false: addresses must be exactly the same, i.e.
- * 0.0.0.0 only equals to 0.0.0.0
- */
-int ipv4_rcv_saddr_equal(const struct sock *sk1, const struct sock *sk2,
- bool match_wildcard)
-{
- struct inet_sock *inet1 = inet_sk(sk1), *inet2 = inet_sk(sk2);
-
- if (!ipv6_only_sock(sk2)) {
- if (inet1->inet_rcv_saddr == inet2->inet_rcv_saddr)
- return 1;
- if (!inet1->inet_rcv_saddr || !inet2->inet_rcv_saddr)
- return match_wildcard;
- }
- return 0;
-}
-
static u32 udp4_portaddr_hash(const struct net *net, __be32 saddr,
unsigned int port)
{
@@ -398,12 +375,13 @@ int udp_v4_get_port(struct sock *sk, unsigned short snum)
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
- return udp_lib_get_port(sk, snum, ipv4_rcv_saddr_equal, hash2_nulladdr);
+ return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
static int compute_score(struct sock *sk, struct net *net,
__be32 saddr, __be16 sport,
- __be32 daddr, unsigned short hnum, int dif)
+ __be32 daddr, unsigned short hnum, int dif,
+ bool exact_dif)
{
int score;
struct inet_sock *inet;
@@ -434,7 +412,7 @@ static int compute_score(struct sock *sk, struct net *net,
score += 4;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score += 4;
@@ -459,7 +437,7 @@ static u32 udp_ehashfn(const struct net *net, const __be32 laddr,
/* called with rcu_read_lock() */
static struct sock *udp4_lib_lookup2(struct net *net,
__be32 saddr, __be16 sport,
- __be32 daddr, unsigned int hnum, int dif,
+ __be32 daddr, unsigned int hnum, int dif, bool exact_dif,
struct udp_hslot *hslot2,
struct sk_buff *skb)
{
@@ -471,7 +449,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
badness = 0;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif);
+ daddr, hnum, dif, exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -506,6 +484,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+ bool exact_dif = udp_lib_exact_dif_match(net, skb);
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
@@ -518,7 +497,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, skb);
+ exact_dif, hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
hash2 = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum);
@@ -533,7 +512,7 @@ struct sock *__udp4_lib_lookup(struct net *net, __be32 saddr,
result = udp4_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, skb);
+ exact_dif, hslot2, skb);
}
return result;
}
@@ -542,7 +521,7 @@ begin:
badness = 0;
sk_for_each_rcu(sk, &hslot->head) {
score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif);
+ daddr, hnum, dif, exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
diff --git a/net/ipv4/xfrm4_state.c b/net/ipv4/xfrm4_state.c
index 542074c..d6660a8 100644
--- a/net/ipv4/xfrm4_state.c
+++ b/net/ipv4/xfrm4_state.c
@@ -90,11 +90,3 @@ void __init xfrm4_state_init(void)
{
xfrm_state_register_afinfo(&xfrm4_state_afinfo);
}
-
-#if 0
-void __exit xfrm4_state_fini(void)
-{
- xfrm_state_unregister_afinfo(&xfrm4_state_afinfo);
-}
-#endif /* 0 */
-
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index ac9bd56..156ed57 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -243,6 +243,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
.seg6_require_hmac = 0,
#endif
.enhanced_dad = 1,
+ .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
};
static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
@@ -294,6 +295,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
.seg6_require_hmac = 0,
#endif
.enhanced_dad = 1,
+ .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
};
/* Check if a valid qdisc is available */
@@ -386,9 +388,9 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
if (ndev->cnf.stable_secret.initialized)
- ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
else
- ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
+ ndev->cnf.addr_gen_mode = ipv6_devconf_dflt.addr_gen_mode;
ndev->cnf.mtu6 = dev->mtu;
ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
@@ -2144,12 +2146,14 @@ static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
case ARPHRD_SIT:
return addrconf_ifid_sit(eui, dev);
case ARPHRD_IPGRE:
+ case ARPHRD_TUNNEL:
return addrconf_ifid_gre(eui, dev);
case ARPHRD_6LOWPAN:
return addrconf_ifid_eui64(eui, dev);
case ARPHRD_IEEE1394:
return addrconf_ifid_ieee1394(eui, dev);
case ARPHRD_TUNNEL6:
+ case ARPHRD_IP6GRE:
return addrconf_ifid_ip6tnl(eui, dev);
}
return -1;
@@ -2387,8 +2391,8 @@ static void manage_tempaddrs(struct inet6_dev *idev,
static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
{
- return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
- idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
+ return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
+ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
}
int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
@@ -3152,7 +3156,7 @@ static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
- switch (idev->addr_gen_mode) {
+ switch (idev->cnf.addr_gen_mode) {
case IN6_ADDR_GEN_MODE_RANDOM:
ipv6_gen_mode_random_init(idev);
/* fallthrough */
@@ -3193,6 +3197,9 @@ static void addrconf_dev_config(struct net_device *dev)
(dev->type != ARPHRD_IEEE1394) &&
(dev->type != ARPHRD_TUNNEL6) &&
(dev->type != ARPHRD_6LOWPAN) &&
+ (dev->type != ARPHRD_IP6GRE) &&
+ (dev->type != ARPHRD_IPGRE) &&
+ (dev->type != ARPHRD_TUNNEL) &&
(dev->type != ARPHRD_NONE)) {
/* Alas, we support only Ethernet autoconfiguration. */
return;
@@ -3204,8 +3211,8 @@ static void addrconf_dev_config(struct net_device *dev)
/* this device type has no EUI support */
if (dev->type == ARPHRD_NONE &&
- idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
- idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
+ idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
+ idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
addrconf_addr_gen(idev, false);
}
@@ -4982,6 +4989,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
#endif
array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
+ array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
}
static inline size_t inet6_ifla6_size(void)
@@ -5093,7 +5101,7 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
if (!nla)
goto nla_put_failure;
- if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
+ if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
goto nla_put_failure;
read_lock_bh(&idev->lock);
@@ -5211,6 +5219,26 @@ static int inet6_validate_link_af(const struct net_device *dev,
return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
}
+static int check_addr_gen_mode(int mode)
+{
+ if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
+ mode != IN6_ADDR_GEN_MODE_NONE &&
+ mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+ mode != IN6_ADDR_GEN_MODE_RANDOM)
+ return -EINVAL;
+ return 1;
+}
+
+static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
+ int mode)
+{
+ if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
+ !idev->cnf.stable_secret.initialized &&
+ !net->ipv6.devconf_dflt->stable_secret.initialized)
+ return -EINVAL;
+ return 1;
+}
+
static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
{
int err = -EINVAL;
@@ -5232,18 +5260,11 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
- if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
- mode != IN6_ADDR_GEN_MODE_NONE &&
- mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
- mode != IN6_ADDR_GEN_MODE_RANDOM)
+ if (check_addr_gen_mode(mode) < 0 ||
+ check_stable_privacy(idev, dev_net(dev), mode) < 0)
return -EINVAL;
- if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
- !idev->cnf.stable_secret.initialized &&
- !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
- return -EINVAL;
-
- idev->addr_gen_mode = mode;
+ idev->cnf.addr_gen_mode = mode;
err = 0;
}
@@ -5547,8 +5568,7 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
struct net_device *dev;
struct inet6_dev *idev;
- rcu_read_lock();
- for_each_netdev_rcu(net, dev) {
+ for_each_netdev(net, dev) {
idev = __in6_dev_get(dev);
if (idev) {
int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
@@ -5557,7 +5577,6 @@ static void addrconf_disable_change(struct net *net, __s32 newf)
dev_disable_change(idev);
}
}
- rcu_read_unlock();
}
static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
@@ -5652,6 +5671,47 @@ int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
return ret;
}
+static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret = 0;
+ int new_val;
+ struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
+ struct net *net = (struct net *)ctl->extra2;
+
+ ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+ if (write) {
+ new_val = *((int *)ctl->data);
+
+ if (check_addr_gen_mode(new_val) < 0)
+ return -EINVAL;
+
+ /* request for default */
+ if (&net->ipv6.devconf_dflt->addr_gen_mode == ctl->data) {
+ ipv6_devconf_dflt.addr_gen_mode = new_val;
+
+ /* request for individual net device */
+ } else {
+ if (!idev)
+ return ret;
+
+ if (check_stable_privacy(idev, net, new_val) < 0)
+ return -EINVAL;
+
+ if (idev->cnf.addr_gen_mode != new_val) {
+ idev->cnf.addr_gen_mode = new_val;
+ rtnl_lock();
+ addrconf_dev_config(idev->dev);
+ rtnl_unlock();
+ }
+ }
+ }
+
+ return ret;
+}
+
static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -5702,14 +5762,14 @@ static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
struct inet6_dev *idev = __in6_dev_get(dev);
if (idev) {
- idev->addr_gen_mode =
+ idev->cnf.addr_gen_mode =
IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
}
}
} else {
struct inet6_dev *idev = ctl->extra1;
- idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
+ idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
}
out:
@@ -6097,6 +6157,13 @@ static const struct ctl_table addrconf_sysctl[] = {
.proc_handler = proc_dointvec,
},
{
+ .procname = "addr_gen_mode",
+ .data = &ipv6_devconf.addr_gen_mode,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = addrconf_sysctl_addr_gen_mode,
+ },
+ {
/* sentinel */
}
};
diff --git a/net/ipv6/af_inet6.c b/net/ipv6/af_inet6.c
index aa42123..04db406 100644
--- a/net/ipv6/af_inet6.c
+++ b/net/ipv6/af_inet6.c
@@ -302,7 +302,8 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
return -EINVAL;
snum = ntohs(addr->sin6_port);
- if (snum && snum < PROT_SOCK && !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
+ if (snum && snum < inet_prot_sock(net) &&
+ !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
lock_sock(sk);
diff --git a/net/ipv6/ah6.c b/net/ipv6/ah6.c
index 189eb10..dda6035 100644
--- a/net/ipv6/ah6.c
+++ b/net/ipv6/ah6.c
@@ -474,6 +474,9 @@ static void ah6_input_done(struct crypto_async_request *base, int err)
int hdr_len = skb_network_header_len(skb);
int ah_hlen = (ah->hdrlen + 2) << 2;
+ if (err)
+ goto out;
+
work_iph = AH_SKB_CB(skb)->tmp;
auth_data = ah_tmp_auth(work_iph, hdr_len);
icv = ah_tmp_icv(ahp->ahash, auth_data, ahp->icv_trunc_len);
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index cbcdd5d..ff54faa 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -44,6 +44,8 @@
#include <net/protocol.h>
#include <linux/icmpv6.h>
+#include <linux/highmem.h>
+
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
void *tmp;
@@ -114,11 +116,40 @@ static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
__alignof__(struct scatterlist));
}
+static void esp_ssg_unref(struct xfrm_state *x, void *tmp)
+{
+ __be32 *seqhi;
+ struct crypto_aead *aead = x->data;
+ int seqhilen = 0;
+ u8 *iv;
+ struct aead_request *req;
+ struct scatterlist *sg;
+
+ if (x->props.flags & XFRM_STATE_ESN)
+ seqhilen += sizeof(__be32);
+
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+ req = esp_tmp_req(aead, iv);
+
+ /* Unref skb_frag_pages in the src scatterlist if necessary.
+ * Skip the first sg which comes from skb->data.
+ */
+ if (req->src != req->dst)
+ for (sg = sg_next(req->src); sg; sg = sg_next(sg))
+ put_page(sg_page(sg));
+}
+
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
+ void *tmp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x = dst->xfrm;
- kfree(ESP_SKB_CB(skb)->tmp);
+ tmp = ESP_SKB_CB(skb)->tmp;
+ esp_ssg_unref(x, tmp);
+ kfree(tmp);
xfrm_output_resume(skb, err);
}
@@ -138,6 +169,27 @@ static void esp_output_restore_header(struct sk_buff *skb)
esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
}
+static struct ip_esp_hdr *esp_output_set_esn(struct sk_buff *skb,
+ struct ip_esp_hdr *esph,
+ __be32 *seqhi)
+{
+ struct xfrm_state *x = skb_dst(skb)->xfrm;
+
+ /* For ESN we move the header forward by 4 bytes to
+ * accomodate the high bits. We will move it back after
+ * encryption.
+ */
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
+ *seqhi = esph->spi;
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
+ }
+
+ esph->spi = x->id.spi;
+
+ return esph;
+}
+
static void esp_output_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
@@ -146,14 +198,31 @@ static void esp_output_done_esn(struct crypto_async_request *base, int err)
esp_output_done(base, err);
}
+static void esp_output_fill_trailer(u8 *tail, int tfclen, int plen, __u8 proto)
+{
+ /* Fill padding... */
+ if (tfclen) {
+ memset(tail, 0, tfclen);
+ tail += tfclen;
+ }
+ do {
+ int i;
+ for (i = 0; i < plen - 2; i++)
+ tail[i] = i + 1;
+ } while (0);
+ tail[plen - 2] = plen - 2;
+ tail[plen - 1] = proto;
+}
+
static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
- struct scatterlist *sg;
+ struct scatterlist *sg, *dsg;
struct sk_buff *trailer;
+ struct page *page;
void *tmp;
int blksize;
int clen;
@@ -164,10 +233,13 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
int nfrags;
int assoclen;
int seqhilen;
+ int tailen;
u8 *iv;
u8 *tail;
+ u8 *vaddr;
__be32 *seqhi;
__be64 seqno;
+ __u8 proto = *skb_mac_header(skb);
/* skb is pure payload to encrypt */
aead = x->data;
@@ -186,11 +258,7 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
plen = clen - skb->len - tfclen;
-
- err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
- if (err < 0)
- goto error;
- nfrags = err;
+ tailen = tfclen + plen + alen;
assoclen = sizeof(*esph);
seqhilen = 0;
@@ -200,59 +268,152 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
assoclen += seqhilen;
}
- tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
- if (!tmp) {
- err = -ENOMEM;
- goto error;
+ *skb_mac_header(skb) = IPPROTO_ESP;
+ esph = ip_esp_hdr(skb);
+
+ if (!skb_cloned(skb)) {
+ if (tailen <= skb_availroom(skb)) {
+ nfrags = 1;
+ trailer = skb;
+ tail = skb_tail_pointer(trailer);
+
+ goto skip_cow;
+ } else if ((skb_shinfo(skb)->nr_frags < MAX_SKB_FRAGS)
+ && !skb_has_frag_list(skb)) {
+ int allocsize;
+ struct sock *sk = skb->sk;
+ struct page_frag *pfrag = &x->xfrag;
+
+ allocsize = ALIGN(tailen, L1_CACHE_BYTES);
+
+ spin_lock_bh(&x->lock);
+
+ if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+ spin_unlock_bh(&x->lock);
+ goto cow;
+ }
+
+ page = pfrag->page;
+ get_page(page);
+
+ vaddr = kmap_atomic(page);
+
+ tail = vaddr + pfrag->offset;
+
+ esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+ kunmap_atomic(vaddr);
+
+ nfrags = skb_shinfo(skb)->nr_frags;
+
+ __skb_fill_page_desc(skb, nfrags, page, pfrag->offset,
+ tailen);
+ skb_shinfo(skb)->nr_frags = ++nfrags;
+
+ pfrag->offset = pfrag->offset + allocsize;
+ nfrags++;
+
+ skb->len += tailen;
+ skb->data_len += tailen;
+ skb->truesize += tailen;
+ if (sk)
+ atomic_add(tailen, &sk->sk_wmem_alloc);
+
+ skb_push(skb, -skb_network_offset(skb));
+
+ esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+ esph->spi = x->id.spi;
+
+ tmp = esp_alloc_tmp(aead, nfrags + 2, seqhilen);
+ if (!tmp) {
+ spin_unlock_bh(&x->lock);
+ err = -ENOMEM;
+ goto error;
+ }
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = &sg[nfrags];
+
+ esph = esp_output_set_esn(skb, esph, seqhi);
+
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+
+ allocsize = ALIGN(skb->data_len, L1_CACHE_BYTES);
+
+ if (unlikely(!skb_page_frag_refill(allocsize, pfrag, GFP_ATOMIC))) {
+ spin_unlock_bh(&x->lock);
+ err = -ENOMEM;
+ goto error;
+ }
+
+ skb_shinfo(skb)->nr_frags = 1;
+
+ page = pfrag->page;
+ get_page(page);
+ /* replace page frags in skb with new page */
+ __skb_fill_page_desc(skb, 0, page, pfrag->offset, skb->data_len);
+ pfrag->offset = pfrag->offset + allocsize;
+
+ sg_init_table(dsg, skb_shinfo(skb)->nr_frags + 1);
+ skb_to_sgvec(skb, dsg,
+ (unsigned char *)esph - skb->data,
+ assoclen + ivlen + clen + alen);
+
+ spin_unlock_bh(&x->lock);
+
+ goto skip_cow2;
+ }
}
- seqhi = esp_tmp_seqhi(tmp);
- iv = esp_tmp_iv(aead, tmp, seqhilen);
- req = esp_tmp_req(aead, iv);
- sg = esp_req_sg(aead, req);
+cow:
+ err = skb_cow_data(skb, tailen, &trailer);
+ if (err < 0)
+ goto error;
+ nfrags = err;
- /* Fill padding... */
tail = skb_tail_pointer(trailer);
- if (tfclen) {
- memset(tail, 0, tfclen);
- tail += tfclen;
- }
- do {
- int i;
- for (i = 0; i < plen - 2; i++)
- tail[i] = i + 1;
- } while (0);
- tail[plen - 2] = plen - 2;
- tail[plen - 1] = *skb_mac_header(skb);
- pskb_put(skb, trailer, clen - skb->len + alen);
+ esph = ip_esp_hdr(skb);
+skip_cow:
+ esp_output_fill_trailer(tail, tfclen, plen, proto);
+
+ pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb));
- esph = ip_esp_hdr(skb);
- *skb_mac_header(skb) = IPPROTO_ESP;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
+ esph->spi = x->id.spi;
- aead_request_set_callback(req, 0, esp_output_done, skb);
-
- /* For ESN we move the header forward by 4 bytes to
- * accomodate the high bits. We will move it back after
- * encryption.
- */
- if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
- *seqhi = esph->spi;
- esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
- aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+ tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
+ if (!tmp) {
+ err = -ENOMEM;
+ goto error;
}
- esph->spi = x->id.spi;
+ seqhi = esp_tmp_seqhi(tmp);
+ iv = esp_tmp_iv(aead, tmp, seqhilen);
+ req = esp_tmp_req(aead, iv);
+ sg = esp_req_sg(aead, req);
+ dsg = sg;
+
+ esph = esp_output_set_esn(skb, esph, seqhi);
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
- aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
+skip_cow2:
+ if ((x->props.flags & XFRM_STATE_ESN))
+ aead_request_set_callback(req, 0, esp_output_done_esn, skb);
+ else
+ aead_request_set_callback(req, 0, esp_output_done, skb);
+
+ aead_request_set_crypt(req, sg, dsg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
@@ -278,6 +439,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
esp_output_restore_header(skb);
}
+ if (sg != dsg)
+ esp_ssg_unref(x, tmp);
kfree(tmp);
error:
@@ -343,6 +506,23 @@ static void esp_input_restore_header(struct sk_buff *skb)
__skb_pull(skb, 4);
}
+static void esp_input_set_header(struct sk_buff *skb, __be32 *seqhi)
+{
+ struct xfrm_state *x = xfrm_input_state(skb);
+ struct ip_esp_hdr *esph = (struct ip_esp_hdr *)skb->data;
+
+ /* For ESN we move the header forward by 4 bytes to
+ * accomodate the high bits. We will move it back after
+ * decryption.
+ */
+ if ((x->props.flags & XFRM_STATE_ESN)) {
+ esph = (void *)skb_push(skb, 4);
+ *seqhi = esph->spi;
+ esph->spi = esph->seq_no;
+ esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+ }
+}
+
static void esp_input_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
@@ -378,14 +558,6 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
goto out;
}
- nfrags = skb_cow_data(skb, 0, &trailer);
- if (nfrags < 0) {
- ret = -EINVAL;
- goto out;
- }
-
- ret = -ENOMEM;
-
assoclen = sizeof(*esph);
seqhilen = 0;
@@ -394,6 +566,27 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
assoclen += seqhilen;
}
+ if (!skb_cloned(skb)) {
+ if (!skb_is_nonlinear(skb)) {
+ nfrags = 1;
+
+ goto skip_cow;
+ } else if (!skb_has_frag_list(skb)) {
+ nfrags = skb_shinfo(skb)->nr_frags;
+ nfrags++;
+
+ goto skip_cow;
+ }
+ }
+
+ nfrags = skb_cow_data(skb, 0, &trailer);
+ if (nfrags < 0) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+skip_cow:
+ ret = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
if (!tmp)
goto out;
@@ -404,26 +597,17 @@ static int esp6_input(struct xfrm_state *x, struct sk_buff *skb)
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
- skb->ip_summed = CHECKSUM_NONE;
+ esp_input_set_header(skb, seqhi);
- esph = (struct ip_esp_hdr *)skb->data;
+ sg_init_table(sg, nfrags);
+ skb_to_sgvec(skb, sg, 0, skb->len);
- aead_request_set_callback(req, 0, esp_input_done, skb);
+ skb->ip_summed = CHECKSUM_NONE;
- /* For ESN we move the header forward by 4 bytes to
- * accomodate the high bits. We will move it back after
- * decryption.
- */
- if ((x->props.flags & XFRM_STATE_ESN)) {
- esph = (void *)skb_push(skb, 4);
- *seqhi = esph->spi;
- esph->spi = esph->seq_no;
- esph->seq_no = XFRM_SKB_CB(skb)->seq.input.hi;
+ if ((x->props.flags & XFRM_STATE_ESN))
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
- }
-
- sg_init_table(sg, nfrags);
- skb_to_sgvec(skb, sg, 0, skb->len);
+ else
+ aead_request_set_callback(req, 0, esp_input_done, skb);
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
diff --git a/net/ipv6/ila/ila_lwt.c b/net/ipv6/ila/ila_lwt.c
index a7bc54a..ce1aae4 100644
--- a/net/ipv6/ila/ila_lwt.c
+++ b/net/ipv6/ila/ila_lwt.c
@@ -115,7 +115,7 @@ static const struct nla_policy ila_nl_policy[ILA_ATTR_MAX + 1] = {
[ILA_ATTR_CSUM_MODE] = { .type = NLA_U8, },
};
-static int ila_build_state(struct net_device *dev, struct nlattr *nla,
+static int ila_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
@@ -238,6 +238,7 @@ static const struct lwtunnel_encap_ops ila_encap_ops = {
.fill_encap = ila_fill_encap_info,
.get_encap_size = ila_encap_nlsize,
.cmp_encap = ila_encap_cmp,
+ .owner = THIS_MODULE,
};
int ila_lwt_init(void)
diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c
index 7396e75..9a31d13 100644
--- a/net/ipv6/inet6_connection_sock.c
+++ b/net/ipv6/inet6_connection_sock.c
@@ -28,46 +28,6 @@
#include <net/inet6_connection_sock.h>
#include <net/sock_reuseport.h>
-int inet6_csk_bind_conflict(const struct sock *sk,
- const struct inet_bind_bucket *tb, bool relax,
- bool reuseport_ok)
-{
- const struct sock *sk2;
- bool reuse = !!sk->sk_reuse;
- bool reuseport = !!sk->sk_reuseport && reuseport_ok;
- kuid_t uid = sock_i_uid((struct sock *)sk);
-
- /* We must walk the whole port owner list in this case. -DaveM */
- /*
- * See comment in inet_csk_bind_conflict about sock lookup
- * vs net namespaces issues.
- */
- sk_for_each_bound(sk2, &tb->owners) {
- if (sk != sk2 &&
- (!sk->sk_bound_dev_if ||
- !sk2->sk_bound_dev_if ||
- sk->sk_bound_dev_if == sk2->sk_bound_dev_if)) {
- if ((!reuse || !sk2->sk_reuse ||
- sk2->sk_state == TCP_LISTEN) &&
- (!reuseport || !sk2->sk_reuseport ||
- rcu_access_pointer(sk->sk_reuseport_cb) ||
- (sk2->sk_state != TCP_TIME_WAIT &&
- !uid_eq(uid,
- sock_i_uid((struct sock *)sk2))))) {
- if (ipv6_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- if (!relax && reuse && sk2->sk_reuse &&
- sk2->sk_state != TCP_LISTEN &&
- ipv6_rcv_saddr_equal(sk, sk2, true))
- break;
- }
- }
-
- return sk2 != NULL;
-}
-EXPORT_SYMBOL_GPL(inet6_csk_bind_conflict);
-
struct dst_entry *inet6_csk_route_req(const struct sock *sk,
struct flowi6 *fl6,
const struct request_sock *req,
@@ -176,7 +136,7 @@ int inet6_csk_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl_unused
/* Restore final destination back after routing done */
fl6.daddr = sk->sk_v6_daddr;
- res = ip6_xmit(sk, skb, &fl6, rcu_dereference(np->opt),
+ res = ip6_xmit(sk, skb, &fl6, sk->sk_mark, rcu_dereference(np->opt),
np->tclass);
rcu_read_unlock();
return res;
diff --git a/net/ipv6/inet6_hashtables.c b/net/ipv6/inet6_hashtables.c
index 02761c9..d090091 100644
--- a/net/ipv6/inet6_hashtables.c
+++ b/net/ipv6/inet6_hashtables.c
@@ -268,54 +268,10 @@ int inet6_hash(struct sock *sk)
if (sk->sk_state != TCP_CLOSE) {
local_bh_disable();
- err = __inet_hash(sk, NULL, ipv6_rcv_saddr_equal);
+ err = __inet_hash(sk, NULL);
local_bh_enable();
}
return err;
}
EXPORT_SYMBOL_GPL(inet6_hash);
-
-/* match_wildcard == true: IPV6_ADDR_ANY equals to any IPv6 addresses if IPv6
- * only, and any IPv4 addresses if not IPv6 only
- * match_wildcard == false: addresses must be exactly the same, i.e.
- * IPV6_ADDR_ANY only equals to IPV6_ADDR_ANY,
- * and 0.0.0.0 equals to 0.0.0.0 only
- */
-int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
- bool match_wildcard)
-{
- const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
- int sk2_ipv6only = inet_v6_ipv6only(sk2);
- int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
- int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
-
- /* if both are mapped, treat as IPv4 */
- if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) {
- if (!sk2_ipv6only) {
- if (sk->sk_rcv_saddr == sk2->sk_rcv_saddr)
- return 1;
- if (!sk->sk_rcv_saddr || !sk2->sk_rcv_saddr)
- return match_wildcard;
- }
- return 0;
- }
-
- if (addr_type == IPV6_ADDR_ANY && addr_type2 == IPV6_ADDR_ANY)
- return 1;
-
- if (addr_type2 == IPV6_ADDR_ANY && match_wildcard &&
- !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))
- return 1;
-
- if (addr_type == IPV6_ADDR_ANY && match_wildcard &&
- !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
- return 1;
-
- if (sk2_rcv_saddr6 &&
- ipv6_addr_equal(&sk->sk_v6_rcv_saddr, sk2_rcv_saddr6))
- return 1;
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(ipv6_rcv_saddr_equal);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index ef54852..febde6c 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -746,6 +746,9 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
u16 nlflags = NLM_F_EXCL;
int err;
+ if (info->nlh && (info->nlh->nlmsg_flags & NLM_F_APPEND))
+ nlflags |= NLM_F_APPEND;
+
ins = &fn->leaf;
for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 75b6108..51b9835 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -484,11 +484,6 @@ drop:
return 0;
}
-struct ipv6_tel_txoption {
- struct ipv6_txoptions ops;
- __u8 dst_opt[8];
-};
-
static int gre_handle_offloads(struct sk_buff *skb, bool csum)
{
return iptunnel_handle_offloads(skb,
@@ -582,6 +577,9 @@ static inline int ip6gre_xmit_ipv6(struct sk_buff *skb, struct net_device *dev)
return -1;
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
+
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
tel = (struct ipv6_tlv_tnl_enc_lim *)&skb_network_header(skb)[offset];
@@ -998,6 +996,9 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
static int ip6gre_tunnel_init_common(struct net_device *dev)
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 89c59e6..fc7b401 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -191,6 +191,7 @@ static struct sk_buff **ipv6_gro_receive(struct sk_buff **head,
ops = rcu_dereference(inet6_offloads[proto]);
if (!ops || !ops->callbacks.gro_receive) {
__pskb_pull(skb, skb_gro_offset(skb));
+ skb_gro_frag0_invalidate(skb);
proto = ipv6_gso_pull_exthdrs(skb, proto);
skb_gro_pull(skb, -skb_transport_offset(skb));
skb_reset_transport_header(skb);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 38122d0..2c0df09 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -172,7 +172,7 @@ int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
* which are using proper atomic operations or spinlocks.
*/
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
- struct ipv6_txoptions *opt, int tclass)
+ __u32 mark, struct ipv6_txoptions *opt, int tclass)
{
struct net *net = sock_net(sk);
const struct ipv6_pinfo *np = inet6_sk(sk);
@@ -240,7 +240,7 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = mark;
mtu = dst_mtu(dst);
if ((skb->len <= mtu) || skb->ignore_df || skb_is_gso(skb)) {
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 36d2921..ff8ee06 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -400,18 +400,19 @@ ip6_tnl_dev_uninit(struct net_device *dev)
__u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
{
- const struct ipv6hdr *ipv6h = (const struct ipv6hdr *) raw;
- __u8 nexthdr = ipv6h->nexthdr;
- __u16 off = sizeof(*ipv6h);
+ const struct ipv6hdr *ipv6h = (const struct ipv6hdr *)raw;
+ unsigned int nhoff = raw - skb->data;
+ unsigned int off = nhoff + sizeof(*ipv6h);
+ u8 next, nexthdr = ipv6h->nexthdr;
while (ipv6_ext_hdr(nexthdr) && nexthdr != NEXTHDR_NONE) {
- __u16 optlen = 0;
struct ipv6_opt_hdr *hdr;
- if (raw + off + sizeof(*hdr) > skb->data &&
- !pskb_may_pull(skb, raw - skb->data + off + sizeof (*hdr)))
+ u16 optlen;
+
+ if (!pskb_may_pull(skb, off + sizeof(*hdr)))
break;
- hdr = (struct ipv6_opt_hdr *) (raw + off);
+ hdr = (struct ipv6_opt_hdr *)(skb->data + off);
if (nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag_hdr = (struct frag_hdr *) hdr;
if (frag_hdr->frag_off)
@@ -422,20 +423,29 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
} else {
optlen = ipv6_optlen(hdr);
}
+ /* cache hdr->nexthdr, since pskb_may_pull() might
+ * invalidate hdr
+ */
+ next = hdr->nexthdr;
if (nexthdr == NEXTHDR_DEST) {
- __u16 i = off + 2;
+ u16 i = 2;
+
+ /* Remember : hdr is no longer valid at this point. */
+ if (!pskb_may_pull(skb, off + optlen))
+ break;
+
while (1) {
struct ipv6_tlv_tnl_enc_lim *tel;
/* No more room for encapsulation limit */
- if (i + sizeof (*tel) > off + optlen)
+ if (i + sizeof(*tel) > optlen)
break;
- tel = (struct ipv6_tlv_tnl_enc_lim *) &raw[i];
+ tel = (struct ipv6_tlv_tnl_enc_lim *) skb->data + off + i;
/* return index of option if found and valid */
if (tel->type == IPV6_TLV_TNL_ENCAP_LIMIT &&
tel->length == 1)
- return i;
+ return i + off - nhoff;
/* else jump to next option */
if (tel->type)
i += tel->length + 2;
@@ -443,7 +453,7 @@ __u16 ip6_tnl_parse_tlv_enc_lim(struct sk_buff *skb, __u8 *raw)
i++;
}
}
- nexthdr = hdr->nexthdr;
+ nexthdr = next;
off += optlen;
}
return 0;
@@ -1108,7 +1118,7 @@ route_lookup:
t->parms.name);
goto tx_err_dst_release;
}
- mtu = dst_mtu(dst) - psh_hlen;
+ mtu = dst_mtu(dst) - psh_hlen - t->tun_hlen;
if (encap_limit >= 0) {
max_headroom += 8;
mtu -= 8;
@@ -1117,7 +1127,7 @@ route_lookup:
mtu = IPV6_MIN_MTU;
if (skb_dst(skb) && !t->parms.collect_md)
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), NULL, skb, mtu);
- if (skb->len > mtu && !skb_is_gso(skb)) {
+ if (skb->len - t->tun_hlen > mtu && !skb_is_gso(skb)) {
*pmtu = mtu;
err = -EMSGSIZE;
goto tx_err_dst_release;
@@ -1303,6 +1313,8 @@ ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
fl6.flowlabel = key->label;
} else {
offset = ip6_tnl_parse_tlv_enc_lim(skb, skb_network_header(skb));
+ /* ip6_tnl_parse_tlv_enc_lim() might have reallocated skb->head */
+ ipv6h = ipv6_hdr(skb);
if (offset > 0) {
struct ipv6_tlv_tnl_enc_lim *tel;
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d82042c..c795fee 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -49,6 +49,7 @@
#include <net/xfrm.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
+#include <linux/etherdevice.h>
#define IP6_VTI_HASH_SIZE_SHIFT 5
#define IP6_VTI_HASH_SIZE (1 << IP6_VTI_HASH_SIZE_SHIFT)
@@ -842,6 +843,9 @@ static void vti6_dev_setup(struct net_device *dev)
dev->flags |= IFF_NOARP;
dev->addr_len = sizeof(struct in6_addr);
netif_keep_dst(dev);
+ /* This perm addr will be used as interface identifier by IPv6 */
+ dev->addr_assign_type = NET_ADDR_RANDOM;
+ eth_random_addr(dev->perm_addr);
}
/**
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index e275077..babaf3e 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -2288,7 +2288,7 @@ static int __ip6mr_fill_mroute(struct mr6_table *mrt, struct sk_buff *skb,
}
int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
- int nowait, u32 portid)
+ u32 portid)
{
int err;
struct mr6_table *mrt;
@@ -2315,11 +2315,6 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
struct net_device *dev;
int vif;
- if (nowait) {
- read_unlock(&mrt_lock);
- return -EAGAIN;
- }
-
dev = skb->dev;
if (!dev || (vif = ip6mr_find_vif(mrt, dev)) < 0) {
read_unlock(&mrt_lock);
@@ -2357,7 +2352,7 @@ int ip6mr_get_route(struct net *net, struct sk_buff *skb, struct rtmsg *rtm,
return err;
}
- if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
+ if (rtm->rtm_flags & RTM_F_NOTIFY)
cache->mfc_flags |= MFC_NOTIFY;
err = __ip6mr_fill_mroute(mrt, skb, cache, rtm);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 14a3903..7139fff 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -81,7 +81,7 @@ static void mld_gq_timer_expire(unsigned long data);
static void mld_ifc_timer_expire(unsigned long data);
static void mld_ifc_event(struct inet6_dev *idev);
static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *addr);
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *pmc);
static void mld_clear_delrec(struct inet6_dev *idev);
static bool mld_in_v1_mode(const struct inet6_dev *idev);
static int sf_setstate(struct ifmcaddr6 *pmc);
@@ -692,9 +692,9 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
dev_mc_del(dev, buf);
}
- if (mc->mca_flags & MAF_NOREPORT)
- goto done;
spin_unlock_bh(&mc->mca_lock);
+ if (mc->mca_flags & MAF_NOREPORT)
+ return;
if (!mc->idev->dead)
igmp6_leave_group(mc);
@@ -702,8 +702,6 @@ static void igmp6_group_dropped(struct ifmcaddr6 *mc)
spin_lock_bh(&mc->mca_lock);
if (del_timer(&mc->mca_timer))
atomic_dec(&mc->mca_refcnt);
-done:
- ip6_mc_clear_src(mc);
spin_unlock_bh(&mc->mca_lock);
}
@@ -748,10 +746,11 @@ static void mld_add_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
spin_unlock_bh(&idev->mc_lock);
}
-static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
+static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
{
struct ifmcaddr6 *pmc, *pmc_prev;
- struct ip6_sf_list *psf, *psf_next;
+ struct ip6_sf_list *psf;
+ struct in6_addr *pmca = &im->mca_addr;
spin_lock_bh(&idev->mc_lock);
pmc_prev = NULL;
@@ -768,14 +767,20 @@ static void mld_del_delrec(struct inet6_dev *idev, const struct in6_addr *pmca)
}
spin_unlock_bh(&idev->mc_lock);
+ spin_lock_bh(&im->mca_lock);
if (pmc) {
- for (psf = pmc->mca_tomb; psf; psf = psf_next) {
- psf_next = psf->sf_next;
- kfree(psf);
+ im->idev = pmc->idev;
+ im->mca_crcount = idev->mc_qrv;
+ im->mca_sfmode = pmc->mca_sfmode;
+ if (pmc->mca_sfmode == MCAST_INCLUDE) {
+ im->mca_tomb = pmc->mca_tomb;
+ im->mca_sources = pmc->mca_sources;
+ for (psf = im->mca_sources; psf; psf = psf->sf_next)
+ psf->sf_crcount = im->mca_crcount;
}
in6_dev_put(pmc->idev);
- kfree(pmc);
}
+ spin_unlock_bh(&im->mca_lock);
}
static void mld_clear_delrec(struct inet6_dev *idev)
@@ -904,7 +909,7 @@ int ipv6_dev_mc_inc(struct net_device *dev, const struct in6_addr *addr)
mca_get(mc);
write_unlock_bh(&idev->lock);
- mld_del_delrec(idev, &mc->mca_addr);
+ mld_del_delrec(idev, mc);
igmp6_group_added(mc);
ma_put(mc);
return 0;
@@ -927,6 +932,7 @@ int __ipv6_dev_mc_dec(struct inet6_dev *idev, const struct in6_addr *addr)
write_unlock_bh(&idev->lock);
igmp6_group_dropped(ma);
+ ip6_mc_clear_src(ma);
ma_put(ma);
return 0;
@@ -2501,15 +2507,17 @@ void ipv6_mc_down(struct inet6_dev *idev)
/* Withdraw multicast list */
read_lock_bh(&idev->lock);
- mld_ifc_stop_timer(idev);
- mld_gq_stop_timer(idev);
- mld_dad_stop_timer(idev);
for (i = idev->mc_list; i; i = i->next)
igmp6_group_dropped(i);
- read_unlock_bh(&idev->lock);
- mld_clear_delrec(idev);
+ /* Should stop timer after group drop. or we will
+ * start timer again in mld_ifc_event()
+ */
+ mld_ifc_stop_timer(idev);
+ mld_gq_stop_timer(idev);
+ mld_dad_stop_timer(idev);
+ read_unlock_bh(&idev->lock);
}
static void ipv6_mc_reset(struct inet6_dev *idev)
@@ -2531,8 +2539,10 @@ void ipv6_mc_up(struct inet6_dev *idev)
read_lock_bh(&idev->lock);
ipv6_mc_reset(idev);
- for (i = idev->mc_list; i; i = i->next)
+ for (i = idev->mc_list; i; i = i->next) {
+ mld_del_delrec(idev, i);
igmp6_group_added(i);
+ }
read_unlock_bh(&idev->lock);
}
@@ -2565,6 +2575,7 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
/* Deactivate timers */
ipv6_mc_down(idev);
+ mld_clear_delrec(idev);
/* Delete all-nodes address. */
/* We cannot call ipv6_dev_mc_dec() directly, our caller in
@@ -2579,11 +2590,9 @@ void ipv6_mc_destroy_dev(struct inet6_dev *idev)
write_lock_bh(&idev->lock);
while ((i = idev->mc_list) != NULL) {
idev->mc_list = i->next;
- write_unlock_bh(&idev->lock);
- igmp6_group_dropped(i);
+ write_unlock_bh(&idev->lock);
ma_put(i);
-
write_lock_bh(&idev->lock);
}
write_unlock_bh(&idev->lock);
diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c
index d5263dc..b12e61b 100644
--- a/net/ipv6/netfilter/ip6t_rpfilter.c
+++ b/net/ipv6/netfilter/ip6t_rpfilter.c
@@ -72,10 +72,10 @@ static bool rpfilter_lookup_reverse6(struct net *net, const struct sk_buff *skb,
return ret;
}
-static bool rpfilter_is_local(const struct sk_buff *skb)
+static bool
+rpfilter_is_loopback(const struct sk_buff *skb, const struct net_device *in)
{
- const struct rt6_info *rt = (const void *) skb_dst(skb);
- return rt && (rt->rt6i_flags & RTF_LOCAL);
+ return skb->pkt_type == PACKET_LOOPBACK || in->flags & IFF_LOOPBACK;
}
static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
@@ -85,7 +85,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)
struct ipv6hdr *iph;
bool invert = info->flags & XT_RPFILTER_INVERT;
- if (rpfilter_is_local(skb))
+ if (rpfilter_is_loopback(skb, xt_in(par)))
return true ^ invert;
iph = ipv6_hdr(skb);
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 1009040..eedee5d 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -157,6 +157,7 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
fl6.fl6_sport = otcph->dest;
fl6.fl6_dport = otcph->source;
fl6.flowi6_oif = l3mdev_master_ifindex(skb_dst(oldskb)->dev);
+ fl6.flowi6_mark = IP6_REPLY_MARK(net, oldskb->mark);
security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error) {
@@ -180,6 +181,8 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
skb_dst_set(nskb, dst);
+ nskb->mark = fl6.flowi6_mark;
+
skb_reserve(nskb, hh_len + dst->header_len);
ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
ip6_dst_hoplimit(dst));
diff --git a/net/ipv6/netfilter/nft_fib_ipv6.c b/net/ipv6/netfilter/nft_fib_ipv6.c
index c947aad..765facf 100644
--- a/net/ipv6/netfilter/nft_fib_ipv6.c
+++ b/net/ipv6/netfilter/nft_fib_ipv6.c
@@ -18,13 +18,6 @@
#include <net/ip6_fib.h>
#include <net/ip6_route.h>
-static bool fib6_is_local(const struct sk_buff *skb)
-{
- const struct rt6_info *rt = (const void *)skb_dst(skb);
-
- return rt && (rt->rt6i_flags & RTF_LOCAL);
-}
-
static int get_ifindex(const struct net_device *dev)
{
return dev ? dev->ifindex : 0;
@@ -164,8 +157,10 @@ void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
lookup_flags = nft_fib6_flowi_init(&fl6, priv, pkt, oif);
- if (nft_hook(pkt) == NF_INET_PRE_ROUTING && fib6_is_local(pkt->skb)) {
- nft_fib_store_result(dest, priv->result, pkt, LOOPBACK_IFINDEX);
+ if (nft_hook(pkt) == NF_INET_PRE_ROUTING &&
+ nft_fib_is_loopback(pkt->skb, nft_in(pkt))) {
+ nft_fib_store_result(dest, priv->result, pkt,
+ nft_in(pkt)->ifindex);
return;
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 8417c41..2563331 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1464,7 +1464,7 @@ static struct rt6_info *__ip6_route_redirect(struct net *net,
struct fib6_node *fn;
/* Get the "current" route for this destination and
- * check if the redirect has come from approriate router.
+ * check if the redirect has come from appropriate router.
*
* RFC 4861 specifies that redirects should only be
* accepted if they come from the nexthop to the target.
@@ -1897,7 +1897,7 @@ static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg)
if (cfg->fc_encap) {
struct lwtunnel_state *lwtstate;
- err = lwtunnel_build_state(dev, cfg->fc_encap_type,
+ err = lwtunnel_build_state(cfg->fc_encap_type,
cfg->fc_encap, AF_INET6, cfg,
&lwtstate);
if (err)
@@ -2711,13 +2711,16 @@ struct arg_dev_net {
struct net *net;
};
+/* called with write lock held for table with rt */
static int fib6_ifdown(struct rt6_info *rt, void *arg)
{
const struct arg_dev_net *adn = arg;
const struct net_device *dev = adn->dev;
if ((rt->dst.dev == dev || !dev) &&
- rt != adn->net->ipv6.ip6_null_entry)
+ rt != adn->net->ipv6.ip6_null_entry &&
+ (rt->rt6i_nsiblings == 0 ||
+ !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
return -1;
return 0;
@@ -2768,7 +2771,7 @@ static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
old MTU is the lowest MTU in the path, update the route PMTU
to reflect the increase. In this case if the other nodes' MTU
also have the lowest MTU, TOO BIG MESSAGE will be lead to
- PMTU discouvery.
+ PMTU discovery.
*/
if (rt->dst.dev == arg->dev &&
dst_metric_raw(&rt->dst, RTAX_MTU) &&
@@ -2896,6 +2899,11 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RTA_MULTIPATH]) {
cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
+
+ err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
+ cfg->fc_mp_len);
+ if (err < 0)
+ goto errout;
}
if (tb[RTA_PREF]) {
@@ -2909,9 +2917,14 @@ static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
if (tb[RTA_ENCAP])
cfg->fc_encap = tb[RTA_ENCAP];
- if (tb[RTA_ENCAP_TYPE])
+ if (tb[RTA_ENCAP_TYPE]) {
cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
+ err = lwtunnel_valid_encap_type(cfg->fc_encap_type);
+ if (err < 0)
+ goto errout;
+ }
+
if (tb[RTA_EXPIRES]) {
unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
@@ -3169,7 +3182,7 @@ static int rt6_fill_node(struct net *net,
struct sk_buff *skb, struct rt6_info *rt,
struct in6_addr *dst, struct in6_addr *src,
int iif, int type, u32 portid, u32 seq,
- int prefix, int nowait, unsigned int flags)
+ unsigned int flags)
{
u32 metrics[RTAX_MAX];
struct rtmsg *rtm;
@@ -3177,13 +3190,6 @@ static int rt6_fill_node(struct net *net,
long expires;
u32 table;
- if (prefix) { /* user wants prefix routes only */
- if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
- /* success since this is not a prefix route */
- return 1;
- }
- }
-
nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
if (!nlh)
return -EMSGSIZE;
@@ -3223,7 +3229,7 @@ static int rt6_fill_node(struct net *net,
else
rtm->rtm_type = RTN_UNICAST;
rtm->rtm_flags = 0;
- if (!netif_carrier_ok(rt->dst.dev)) {
+ if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
rtm->rtm_flags |= RTNH_F_LINKDOWN;
if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
rtm->rtm_flags |= RTNH_F_DEAD;
@@ -3261,19 +3267,12 @@ static int rt6_fill_node(struct net *net,
if (iif) {
#ifdef CONFIG_IPV6_MROUTE
if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
- int err = ip6mr_get_route(net, skb, rtm, nowait,
- portid);
-
- if (err <= 0) {
- if (!nowait) {
- if (err == 0)
- return 0;
- goto nla_put_failure;
- } else {
- if (err == -EMSGSIZE)
- goto nla_put_failure;
- }
- }
+ int err = ip6mr_get_route(net, skb, rtm, portid);
+
+ if (err == 0)
+ return 0;
+ if (err < 0)
+ goto nla_put_failure;
} else
#endif
if (nla_put_u32(skb, RTA_IIF, iif))
@@ -3317,7 +3316,8 @@ static int rt6_fill_node(struct net *net,
if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
goto nla_put_failure;
- lwtunnel_fill_encap(skb, rt->dst.lwtstate);
+ if (lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
+ goto nla_put_failure;
nlmsg_end(skb, nlh);
return 0;
@@ -3330,18 +3330,26 @@ nla_put_failure:
int rt6_dump_route(struct rt6_info *rt, void *p_arg)
{
struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
- int prefix;
+ struct net *net = arg->net;
+
+ if (rt == net->ipv6.ip6_null_entry)
+ return 0;
if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
- prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
- } else
- prefix = 0;
- return rt6_fill_node(arg->net,
+ /* user wants prefix routes only */
+ if (rtm->rtm_flags & RTM_F_PREFIX &&
+ !(rt->rt6i_flags & RTF_PREFIX_RT)) {
+ /* success since this is not a prefix route */
+ return 1;
+ }
+ }
+
+ return rt6_fill_node(net,
arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
- prefix, 0, NLM_F_MULTI);
+ NLM_F_MULTI);
}
static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
@@ -3422,17 +3430,11 @@ static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
goto errout;
}
- /* Reserve room for dummy headers, this skb can pass
- through good chunk of routing engine.
- */
- skb_reset_mac_header(skb);
- skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
-
skb_dst_set(skb, &rt->dst);
err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
- nlh->nlmsg_seq, 0, 0, 0);
+ nlh->nlmsg_seq, 0);
if (err < 0) {
kfree_skb(skb);
goto errout;
@@ -3459,7 +3461,7 @@ void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
goto errout;
err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
- event, info->portid, seq, 0, 0, nlm_flags);
+ event, info->portid, seq, nlm_flags);
if (err < 0) {
/* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
WARN_ON(err == -EMSGSIZE);
diff --git a/net/ipv6/seg6.c b/net/ipv6/seg6.c
index b172d85..a855eb3 100644
--- a/net/ipv6/seg6.c
+++ b/net/ipv6/seg6.c
@@ -176,6 +176,8 @@ static int seg6_genl_set_tunsrc(struct sk_buff *skb, struct genl_info *info)
val = nla_data(info->attrs[SEG6_ATTR_DST]);
t_new = kmemdup(val, sizeof(*val), GFP_KERNEL);
+ if (!t_new)
+ return -ENOMEM;
mutex_lock(&sdata->lock);
diff --git a/net/ipv6/seg6_hmac.c b/net/ipv6/seg6_hmac.c
index ef1c8a4..b274f1d 100644
--- a/net/ipv6/seg6_hmac.c
+++ b/net/ipv6/seg6_hmac.c
@@ -45,7 +45,7 @@
#include <net/seg6_hmac.h>
#include <linux/random.h>
-static char * __percpu *hmac_ring;
+static DEFINE_PER_CPU(char [SEG6_HMAC_RING_SIZE], hmac_ring);
static int seg6_hmac_cmpfn(struct rhashtable_compare_arg *arg, const void *obj)
{
@@ -192,7 +192,7 @@ int seg6_hmac_compute(struct seg6_hmac_info *hinfo, struct ipv6_sr_hdr *hdr,
*/
local_bh_disable();
- ring = *this_cpu_ptr(hmac_ring);
+ ring = this_cpu_ptr(hmac_ring);
off = ring;
/* source address */
@@ -353,27 +353,6 @@ out:
}
EXPORT_SYMBOL(seg6_push_hmac);
-static int seg6_hmac_init_ring(void)
-{
- int i;
-
- hmac_ring = alloc_percpu(char *);
-
- if (!hmac_ring)
- return -ENOMEM;
-
- for_each_possible_cpu(i) {
- char *ring = kzalloc(SEG6_HMAC_RING_SIZE, GFP_KERNEL);
-
- if (!ring)
- return -ENOMEM;
-
- *per_cpu_ptr(hmac_ring, i) = ring;
- }
-
- return 0;
-}
-
static int seg6_hmac_init_algo(void)
{
struct seg6_hmac_algo *algo;
@@ -400,7 +379,7 @@ static int seg6_hmac_init_algo(void)
*p_tfm = tfm;
}
- p_tfm = this_cpu_ptr(algo->tfms);
+ p_tfm = raw_cpu_ptr(algo->tfms);
tfm = *p_tfm;
shsize = sizeof(*shash) + crypto_shash_descsize(tfm);
@@ -410,7 +389,8 @@ static int seg6_hmac_init_algo(void)
return -ENOMEM;
for_each_possible_cpu(cpu) {
- shash = kzalloc(shsize, GFP_KERNEL);
+ shash = kzalloc_node(shsize, GFP_KERNEL,
+ cpu_to_node(cpu));
if (!shash)
return -ENOMEM;
*per_cpu_ptr(algo->shashs, cpu) = shash;
@@ -422,16 +402,7 @@ static int seg6_hmac_init_algo(void)
int __init seg6_hmac_init(void)
{
- int ret;
-
- ret = seg6_hmac_init_ring();
- if (ret < 0)
- goto out;
-
- ret = seg6_hmac_init_algo();
-
-out:
- return ret;
+ return seg6_hmac_init_algo();
}
EXPORT_SYMBOL(seg6_hmac_init);
@@ -450,13 +421,6 @@ void seg6_hmac_exit(void)
struct seg6_hmac_algo *algo = NULL;
int i, alg_count, cpu;
- for_each_possible_cpu(i) {
- char *ring = *per_cpu_ptr(hmac_ring, i);
-
- kfree(ring);
- }
- free_percpu(hmac_ring);
-
alg_count = sizeof(hmac_algos) / sizeof(struct seg6_hmac_algo);
for (i = 0; i < alg_count; i++) {
algo = &hmac_algos[i];
diff --git a/net/ipv6/seg6_iptunnel.c b/net/ipv6/seg6_iptunnel.c
index bbfca22..6124e15 100644
--- a/net/ipv6/seg6_iptunnel.c
+++ b/net/ipv6/seg6_iptunnel.c
@@ -265,7 +265,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
slwt = seg6_lwt_lwtunnel(orig_dst->lwtstate);
#ifdef CONFIG_DST_CACHE
+ preempt_disable();
dst = dst_cache_get(&slwt->cache);
+ preempt_enable();
#endif
if (unlikely(!dst)) {
@@ -286,7 +288,9 @@ int seg6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
}
#ifdef CONFIG_DST_CACHE
+ preempt_disable();
dst_cache_set_ip6(&slwt->cache, dst, &fl6.saddr);
+ preempt_enable();
#endif
}
@@ -299,7 +303,7 @@ drop:
return err;
}
-static int seg6_build_state(struct net_device *dev, struct nlattr *nla,
+static int seg6_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
@@ -418,6 +422,7 @@ static const struct lwtunnel_encap_ops seg6_iptun_ops = {
.fill_encap = seg6_fill_encap_info,
.get_encap_size = seg6_encap_nlsize,
.cmp_encap = seg6_encap_cmp,
+ .owner = THIS_MODULE,
};
int __init seg6_iptunnel_init(void)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 228965d..64834ec 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -287,6 +287,11 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
inet->inet_dport,
&tp->tsoffset);
+ if (tcp_fastopen_defer_connect(sk, &err))
+ return err;
+ if (err)
+ goto late_failure;
+
err = tcp_connect(sk);
if (err)
goto late_failure;
@@ -295,7 +300,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
late_failure:
tcp_set_state(sk, TCP_CLOSE);
- __sk_dst_reset(sk);
failure:
inet->inet_dport = 0;
sk->sk_route_caps = 0;
@@ -470,7 +474,7 @@ static int tcp_v6_send_synack(const struct sock *sk, struct dst_entry *dst,
opt = ireq->ipv6_opt;
if (!opt)
opt = rcu_dereference(np->opt);
- err = ip6_xmit(sk, skb, fl6, opt, np->tclass);
+ err = ip6_xmit(sk, skb, fl6, sk->sk_mark, opt, np->tclass);
rcu_read_unlock();
err = net_xmit_eval(err);
}
@@ -841,7 +845,7 @@ static void tcp_v6_send_response(const struct sock *sk, struct sk_buff *skb, u32
dst = ip6_dst_lookup_flow(ctl_sk, &fl6, NULL);
if (!IS_ERR(dst)) {
skb_dst_set(buff, dst);
- ip6_xmit(ctl_sk, buff, &fl6, NULL, tclass);
+ ip6_xmit(ctl_sk, buff, &fl6, fl6.flowi6_mark, NULL, tclass);
TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
if (rst)
TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
@@ -1621,7 +1625,6 @@ static const struct inet_connection_sock_af_ops ipv6_specific = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
- .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
@@ -1652,7 +1655,6 @@ static const struct inet_connection_sock_af_ops ipv6_mapped = {
.getsockopt = ipv6_getsockopt,
.addr2sockaddr = inet6_csk_addr2sockaddr,
.sockaddr_len = sizeof(struct sockaddr_in6),
- .bind_conflict = inet6_csk_bind_conflict,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_ipv6_setsockopt,
.compat_getsockopt = compat_ipv6_getsockopt,
@@ -1745,7 +1747,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
srcp = ntohs(inet->inet_sport);
if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
- icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
+ icsk->icsk_pending == ICSK_TIME_REO_TIMEOUT ||
icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
timer_active = 1;
timer_expires = icsk->icsk_timeout;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 4d5c4ee..b4c6516 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -55,6 +55,16 @@
#include <trace/events/skb.h>
#include "udp_impl.h"
+static bool udp6_lib_exact_dif_match(struct net *net, struct sk_buff *skb)
+{
+#if defined(CONFIG_NET_L3_MASTER_DEV)
+ if (!net->ipv4.sysctl_udp_l3mdev_accept &&
+ skb && ipv6_l3mdev_skb(IP6CB(skb)->flags))
+ return true;
+#endif
+ return false;
+}
+
static u32 udp6_ehashfn(const struct net *net,
const struct in6_addr *laddr,
const u16 lport,
@@ -103,7 +113,7 @@ int udp_v6_get_port(struct sock *sk, unsigned short snum)
/* precompute partial secondary hash */
udp_sk(sk)->udp_portaddr_hash = hash2_partial;
- return udp_lib_get_port(sk, snum, ipv6_rcv_saddr_equal, hash2_nulladdr);
+ return udp_lib_get_port(sk, snum, hash2_nulladdr);
}
static void udp_v6_rehash(struct sock *sk)
@@ -118,7 +128,7 @@ static void udp_v6_rehash(struct sock *sk)
static int compute_score(struct sock *sk, struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned short hnum,
- int dif)
+ int dif, bool exact_dif)
{
int score;
struct inet_sock *inet;
@@ -149,7 +159,7 @@ static int compute_score(struct sock *sk, struct net *net,
score++;
}
- if (sk->sk_bound_dev_if) {
+ if (sk->sk_bound_dev_if || exact_dif) {
if (sk->sk_bound_dev_if != dif)
return -1;
score++;
@@ -165,7 +175,7 @@ static int compute_score(struct sock *sk, struct net *net,
static struct sock *udp6_lib_lookup2(struct net *net,
const struct in6_addr *saddr, __be16 sport,
const struct in6_addr *daddr, unsigned int hnum, int dif,
- struct udp_hslot *hslot2,
+ bool exact_dif, struct udp_hslot *hslot2,
struct sk_buff *skb)
{
struct sock *sk, *result;
@@ -176,7 +186,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
badness = -1;
udp_portaddr_for_each_entry_rcu(sk, &hslot2->head) {
score = compute_score(sk, net, saddr, sport,
- daddr, hnum, dif);
+ daddr, hnum, dif, exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
@@ -212,6 +222,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
unsigned short hnum = ntohs(dport);
unsigned int hash2, slot2, slot = udp_hashfn(net, hnum, udptable->mask);
struct udp_hslot *hslot2, *hslot = &udptable->hash[slot];
+ bool exact_dif = udp6_lib_exact_dif_match(net, skb);
int score, badness, matches = 0, reuseport = 0;
u32 hash = 0;
@@ -223,7 +234,7 @@ struct sock *__udp6_lib_lookup(struct net *net,
goto begin;
result = udp6_lib_lookup2(net, saddr, sport,
- daddr, hnum, dif,
+ daddr, hnum, dif, exact_dif,
hslot2, skb);
if (!result) {
unsigned int old_slot2 = slot2;
@@ -239,7 +250,8 @@ struct sock *__udp6_lib_lookup(struct net *net,
result = udp6_lib_lookup2(net, saddr, sport,
daddr, hnum, dif,
- hslot2, skb);
+ exact_dif, hslot2,
+ skb);
}
return result;
}
@@ -247,7 +259,8 @@ begin:
result = NULL;
badness = -1;
sk_for_each_rcu(sk, &hslot->head) {
- score = compute_score(sk, net, saddr, sport, daddr, hnum, dif);
+ score = compute_score(sk, net, saddr, sport, daddr, hnum, dif,
+ exact_dif);
if (score > badness) {
reuseport = sk->sk_reuseport;
if (reuseport) {
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
index cfb9e5f..13190b3 100644
--- a/net/iucv/af_iucv.c
+++ b/net/iucv/af_iucv.c
@@ -1044,7 +1044,8 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
{
struct sock *sk = sock->sk;
struct iucv_sock *iucv = iucv_sk(sk);
- size_t headroom, linear;
+ size_t headroom = 0;
+ size_t linear;
struct sk_buff *skb;
struct iucv_message txmsg = {0};
struct cmsghdr *cmsg;
@@ -1122,18 +1123,20 @@ static int iucv_sock_sendmsg(struct socket *sock, struct msghdr *msg,
* this is fine for SOCK_SEQPACKET (unless we want to support
* segmented records using the MSG_EOR flag), but
* for SOCK_STREAM we might want to improve it in future */
- headroom = (iucv->transport == AF_IUCV_TRANS_HIPER)
- ? sizeof(struct af_iucv_trans_hdr) + ETH_HLEN : 0;
- if (headroom + len < PAGE_SIZE) {
+ if (iucv->transport == AF_IUCV_TRANS_HIPER) {
+ headroom = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN;
linear = len;
} else {
- /* In nonlinear "classic" iucv skb,
- * reserve space for iucv_array
- */
- if (iucv->transport != AF_IUCV_TRANS_HIPER)
- headroom += sizeof(struct iucv_array) *
- (MAX_SKB_FRAGS + 1);
- linear = PAGE_SIZE - headroom;
+ if (len < PAGE_SIZE) {
+ linear = len;
+ } else {
+ /* In nonlinear "classic" iucv skb,
+ * reserve space for iucv_array
+ */
+ headroom = sizeof(struct iucv_array) *
+ (MAX_SKB_FRAGS + 1);
+ linear = PAGE_SIZE - headroom;
+ }
}
skb = sock_alloc_send_pskb(sk, headroom + linear, len - linear,
noblock, &err, 0);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e91e503..a0be2f6 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3563,6 +3563,17 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif,
}
EXPORT_SYMBOL(ieee80211_nan_func_match);
+static int ieee80211_set_multicast_to_unicast(struct wiphy *wiphy,
+ struct net_device *dev,
+ const bool enabled)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+
+ sdata->u.ap.multicast_to_unicast = enabled;
+
+ return 0;
+}
+
const struct cfg80211_ops mac80211_config_ops = {
.add_virtual_intf = ieee80211_add_iface,
.del_virtual_intf = ieee80211_del_iface,
@@ -3653,4 +3664,5 @@ const struct cfg80211_ops mac80211_config_ops = {
.nan_change_conf = ieee80211_nan_change_conf,
.add_nan_func = ieee80211_add_nan_func,
.del_nan_func = ieee80211_del_nan_func,
+ .set_multicast_to_unicast = ieee80211_set_multicast_to_unicast,
};
diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c
index e75cbf6..89178b4 100644
--- a/net/mac80211/chan.c
+++ b/net/mac80211/chan.c
@@ -231,9 +231,6 @@ ieee80211_get_max_required_bw(struct ieee80211_sub_if_data *sdata)
!(sta->sdata->bss && sta->sdata->bss == sdata->bss))
continue;
- if (!sta->uploaded || !test_sta_flag(sta, WLAN_STA_ASSOC))
- continue;
-
max_bw = max(max_bw, ieee80211_get_sta_bw(&sta->sta));
}
rcu_read_unlock();
@@ -1270,7 +1267,7 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
struct ieee80211_sub_if_data *sdata, *sdata_tmp;
struct ieee80211_chanctx *ctx, *ctx_tmp, *old_ctx;
struct ieee80211_chanctx *new_ctx = NULL;
- int i, err, n_assigned, n_reserved, n_ready;
+ int err, n_assigned, n_reserved, n_ready;
int n_ctx = 0, n_vifs_switch = 0, n_vifs_assign = 0, n_vifs_ctxless = 0;
lockdep_assert_held(&local->mtx);
@@ -1391,8 +1388,6 @@ static int ieee80211_vif_use_reserved_switch(struct ieee80211_local *local)
* Update all structures, values and pointers to point to new channel
* context(s).
*/
-
- i = 0;
list_for_each_entry(ctx, &local->chanctx_list, list) {
if (ctx->replace_state != IEEE80211_CHANCTX_REPLACES_OTHER)
continue;
diff --git a/net/mac80211/debugfs.c b/net/mac80211/debugfs.c
index e02ba42..f62cd0e 100644
--- a/net/mac80211/debugfs.c
+++ b/net/mac80211/debugfs.c
@@ -243,6 +243,31 @@ static ssize_t hwflags_read(struct file *file, char __user *user_buf,
return rv;
}
+static ssize_t misc_read(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct ieee80211_local *local = file->private_data;
+ /* Max len of each line is 16 characters, plus 9 for 'pending:\n' */
+ size_t bufsz = IEEE80211_MAX_QUEUES * 16 + 9;
+ char *buf = kzalloc(bufsz, GFP_KERNEL);
+ char *pos = buf, *end = buf + bufsz - 1;
+ ssize_t rv;
+ int i;
+ int ln;
+
+ pos += scnprintf(pos, end - pos, "pending:\n");
+
+ for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
+ ln = skb_queue_len(&local->pending[i]);
+ pos += scnprintf(pos, end - pos, "[%i] %d\n",
+ i, ln);
+ }
+
+ rv = simple_read_from_buffer(user_buf, count, ppos, buf, strlen(buf));
+ kfree(buf);
+ return rv;
+}
+
static ssize_t queues_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
@@ -263,6 +288,7 @@ static ssize_t queues_read(struct file *file, char __user *user_buf,
DEBUGFS_READONLY_FILE_OPS(hwflags);
DEBUGFS_READONLY_FILE_OPS(queues);
+DEBUGFS_READONLY_FILE_OPS(misc);
/* statistics stuff */
@@ -331,6 +357,7 @@ void debugfs_hw_add(struct ieee80211_local *local)
DEBUGFS_ADD(total_ps_buffered);
DEBUGFS_ADD(wep_iv);
DEBUGFS_ADD(queues);
+ DEBUGFS_ADD(misc);
#ifdef CONFIG_PM
DEBUGFS_ADD_MODE(reset, 0200);
#endif
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 1a05f85..8f5fff8 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -519,6 +519,8 @@ static ssize_t ieee80211_if_fmt_aqm(
}
IEEE80211_IF_FILE_R(aqm);
+IEEE80211_IF_FILE(multicast_to_unicast, u.ap.multicast_to_unicast, HEX);
+
/* IBSS attributes */
static ssize_t ieee80211_if_fmt_tsf(
const struct ieee80211_sub_if_data *sdata, char *buf, int buflen)
@@ -683,6 +685,7 @@ static void add_ap_files(struct ieee80211_sub_if_data *sdata)
DEBUGFS_ADD(dtim_count);
DEBUGFS_ADD(num_buffered_multicast);
DEBUGFS_ADD_MODE(tkip_mic_test, 0200);
+ DEBUGFS_ADD_MODE(multicast_to_unicast, 0600);
}
static void add_vlan_files(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index b2069fb..159a1a7 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -297,6 +297,7 @@ struct ieee80211_if_ap {
driver_smps_mode; /* smps mode request */
struct work_struct request_smps_work;
+ bool multicast_to_unicast;
};
struct ieee80211_if_wds {
@@ -624,8 +625,8 @@ struct ieee80211_mesh_sync_ops {
struct ieee80211_rx_status *rx_status);
/* should be called with beacon_data under RCU read lock */
- void (*adjust_tbtt)(struct ieee80211_sub_if_data *sdata,
- struct beacon_data *beacon);
+ void (*adjust_tsf)(struct ieee80211_sub_if_data *sdata,
+ struct beacon_data *beacon);
/* add other framework functions here */
};
@@ -688,7 +689,6 @@ struct ieee80211_if_mesh {
const struct ieee80211_mesh_sync_ops *sync_ops;
s64 sync_offset_clockdrift_max;
spinlock_t sync_offset_lock;
- bool adjusting_tbtt;
/* mesh power save */
enum nl80211_mesh_power_mode nonpeer_pm;
int ps_peers_light_sleep;
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index 77e8a42..40813dd 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -6,6 +6,7 @@
* Copyright (c) 2006 Jiri Benc <jbenc@suse.cz>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
+ * Copyright (c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1293,6 +1294,26 @@ static void ieee80211_iface_work(struct work_struct *work)
} else if (ieee80211_is_action(mgmt->frame_control) &&
mgmt->u.action.category == WLAN_CATEGORY_VHT) {
switch (mgmt->u.action.u.vht_group_notif.action_code) {
+ case WLAN_VHT_ACTION_OPMODE_NOTIF: {
+ struct ieee80211_rx_status *status;
+ enum nl80211_band band;
+ u8 opmode;
+
+ status = IEEE80211_SKB_RXCB(skb);
+ band = status->band;
+ opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
+
+ mutex_lock(&local->sta_mtx);
+ sta = sta_info_get_bss(sdata, mgmt->sa);
+
+ if (sta)
+ ieee80211_vht_handle_opmode(sdata, sta,
+ opmode,
+ band);
+
+ mutex_unlock(&local->sta_mtx);
+ break;
+ }
case WLAN_VHT_ACTION_GROUPID_MGMT:
ieee80211_process_mu_groups(sdata, mgmt);
break;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 1822c77..56fb479 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -913,12 +913,17 @@ int ieee80211_register_hw(struct ieee80211_hw *hw)
supp_ht = supp_ht || sband->ht_cap.ht_supported;
supp_vht = supp_vht || sband->vht_cap.vht_supported;
- if (sband->ht_cap.ht_supported)
- local->rx_chains =
- max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
- local->rx_chains);
+ if (!sband->ht_cap.ht_supported)
+ continue;
/* TODO: consider VHT for RX chains, hopefully it's the same */
+ local->rx_chains =
+ max(ieee80211_mcs_to_chains(&sband->ht_cap.mcs),
+ local->rx_chains);
+
+ /* no need to mask, SM_PS_DISABLED has all bits set */
+ sband->ht_cap.cap |= WLAN_HT_CAP_SM_PS_DISABLED <<
+ IEEE80211_HT_CAP_SM_PS_SHIFT;
}
/* if low-level driver supports AP, we also support VLAN */
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 42120d9..9c23172 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -279,10 +279,6 @@ int mesh_add_meshconf_ie(struct ieee80211_sub_if_data *sdata,
/* Mesh PS mode. See IEEE802.11-2012 8.4.2.100.8 */
*pos |= ifmsh->ps_peers_deep_sleep ?
IEEE80211_MESHCONF_CAPAB_POWER_SAVE_LEVEL : 0x00;
- *pos++ |= ifmsh->adjusting_tbtt ?
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING : 0x00;
- *pos++ = 0x00;
-
return 0;
}
@@ -850,7 +846,6 @@ int ieee80211_start_mesh(struct ieee80211_sub_if_data *sdata)
ifmsh->mesh_cc_id = 0; /* Disabled */
/* register sync ops from extensible synchronization framework */
ifmsh->sync_ops = ieee80211_mesh_sync_ops_get(ifmsh->mesh_sp_id);
- ifmsh->adjusting_tbtt = false;
ifmsh->sync_offset_clockdrift_max = 0;
set_bit(MESH_WORK_HOUSEKEEPING, &ifmsh->wrkq_flags);
ieee80211_mesh_root_setup(ifmsh);
@@ -1349,7 +1344,7 @@ void ieee80211_mesh_work(struct ieee80211_sub_if_data *sdata)
ieee80211_mesh_rootpath(sdata);
if (test_and_clear_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags))
- mesh_sync_adjust_tbtt(sdata);
+ mesh_sync_adjust_tsf(sdata);
if (test_and_clear_bit(MESH_WORK_MBSS_CHANGED, &ifmsh->wrkq_flags))
mesh_bss_info_changed(sdata);
diff --git a/net/mac80211/mesh.h b/net/mac80211/mesh.h
index 26b9ccb..7e5f271 100644
--- a/net/mac80211/mesh.h
+++ b/net/mac80211/mesh.h
@@ -341,7 +341,7 @@ static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
}
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata);
-void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata);
+void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata);
void ieee80211s_stop(void);
#else
static inline bool mesh_path_sel_is_hwmp(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 7fcdcf6..fcba70e5 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -505,12 +505,14 @@ mesh_sta_info_alloc(struct ieee80211_sub_if_data *sdata, u8 *addr,
/* Userspace handles station allocation */
if (sdata->u.mesh.user_mpm ||
- sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED)
- cfg80211_notify_new_peer_candidate(sdata->dev, addr,
- elems->ie_start,
- elems->total_len,
- GFP_KERNEL);
- else
+ sdata->u.mesh.security & IEEE80211_MESH_SEC_AUTHED) {
+ if (mesh_peer_accepts_plinks(elems) &&
+ mesh_plink_availables(sdata))
+ cfg80211_notify_new_peer_candidate(sdata->dev, addr,
+ elems->ie_start,
+ elems->total_len,
+ GFP_KERNEL);
+ } else
sta = __mesh_sta_info_alloc(sdata, addr);
return sta;
diff --git a/net/mac80211/mesh_sync.c b/net/mac80211/mesh_sync.c
index faca22c..a435f09 100644
--- a/net/mac80211/mesh_sync.c
+++ b/net/mac80211/mesh_sync.c
@@ -12,7 +12,7 @@
#include "mesh.h"
#include "driver-ops.h"
-/* This is not in the standard. It represents a tolerable tbtt drift below
+/* This is not in the standard. It represents a tolerable tsf drift below
* which we do no TSF adjustment.
*/
#define TOFFSET_MINIMUM_ADJUSTMENT 10
@@ -46,7 +46,7 @@ static bool mesh_peer_tbtt_adjusting(struct ieee802_11_elems *ie)
IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING) != 0;
}
-void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
+void mesh_sync_adjust_tsf(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
@@ -57,12 +57,12 @@ void mesh_sync_adjust_tbtt(struct ieee80211_sub_if_data *sdata)
spin_lock_bh(&ifmsh->sync_offset_lock);
if (ifmsh->sync_offset_clockdrift_max < beacon_int_fraction) {
- msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting\n",
+ msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting\n",
(long long) ifmsh->sync_offset_clockdrift_max);
tsfdelta = -ifmsh->sync_offset_clockdrift_max;
ifmsh->sync_offset_clockdrift_max = 0;
} else {
- msync_dbg(sdata, "TBTT : max clockdrift=%lld; adjusting by %llu\n",
+ msync_dbg(sdata, "TSF : max clockdrift=%lld; adjusting by %llu\n",
(long long) ifmsh->sync_offset_clockdrift_max,
(unsigned long long) beacon_int_fraction);
tsfdelta = -beacon_int_fraction;
@@ -123,7 +123,6 @@ static void mesh_sync_offset_rx_bcn_presp(struct ieee80211_sub_if_data *sdata,
*/
if (elems->mesh_config && mesh_peer_tbtt_adjusting(elems)) {
- clear_sta_flag(sta, WLAN_STA_TOFFSET_KNOWN);
msync_dbg(sdata, "STA %pM : is adjusting TBTT\n",
sta->sta.addr);
goto no_sync;
@@ -168,15 +167,13 @@ no_sync:
rcu_read_unlock();
}
-static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
+static void mesh_sync_offset_adjust_tsf(struct ieee80211_sub_if_data *sdata,
struct beacon_data *beacon)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
- u8 cap;
WARN_ON(ifmsh->mesh_sp_id != IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET);
WARN_ON(!rcu_read_lock_held());
- cap = beacon->meshconf->meshconf_cap;
spin_lock_bh(&ifmsh->sync_offset_lock);
@@ -187,24 +184,16 @@ static void mesh_sync_offset_adjust_tbtt(struct ieee80211_sub_if_data *sdata,
* the tsf adjustment to the mesh tasklet
*/
msync_dbg(sdata,
- "TBTT : kicking off TBTT adjustment with clockdrift_max=%lld\n",
+ "TSF : kicking off TSF adjustment with clockdrift_max=%lld\n",
ifmsh->sync_offset_clockdrift_max);
set_bit(MESH_WORK_DRIFT_ADJUST, &ifmsh->wrkq_flags);
-
- ifmsh->adjusting_tbtt = true;
} else {
msync_dbg(sdata,
- "TBTT : max clockdrift=%lld; too small to adjust\n",
+ "TSF : max clockdrift=%lld; too small to adjust\n",
(long long)ifmsh->sync_offset_clockdrift_max);
ifmsh->sync_offset_clockdrift_max = 0;
-
- ifmsh->adjusting_tbtt = false;
}
spin_unlock_bh(&ifmsh->sync_offset_lock);
-
- beacon->meshconf->meshconf_cap = ifmsh->adjusting_tbtt ?
- IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING | cap :
- ~IEEE80211_MESHCONF_CAPAB_TBTT_ADJUSTING & cap;
}
static const struct sync_method sync_methods[] = {
@@ -212,7 +201,7 @@ static const struct sync_method sync_methods[] = {
.method = IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET,
.ops = {
.rx_bcn_presp = &mesh_sync_offset_rx_bcn_presp,
- .adjust_tbtt = &mesh_sync_offset_adjust_tbtt,
+ .adjust_tsf = &mesh_sync_offset_adjust_tsf,
}
},
};
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 098ce9b..8a63445 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1486,10 +1486,6 @@ void ieee80211_recalc_ps(struct ieee80211_local *local)
if (count == 1 && ieee80211_powersave_allowed(found)) {
u8 dtimper = found->u.mgd.dtim_period;
- s32 beaconint_us;
-
- beaconint_us = ieee80211_tu_to_usec(
- found->vif.bss_conf.beacon_int);
timeout = local->dynamic_ps_forced_timeout;
if (timeout < 0)
diff --git a/net/mac80211/rc80211_minstrel.c b/net/mac80211/rc80211_minstrel.c
index 14c5ba3..3ebe440 100644
--- a/net/mac80211/rc80211_minstrel.c
+++ b/net/mac80211/rc80211_minstrel.c
@@ -159,21 +159,23 @@ minstrel_update_rates(struct minstrel_priv *mp, struct minstrel_sta_info *mi)
void
minstrel_calc_rate_stats(struct minstrel_rate_stats *mrs)
{
+ unsigned int cur_prob;
+
if (unlikely(mrs->attempts > 0)) {
mrs->sample_skipped = 0;
- mrs->cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
+ cur_prob = MINSTREL_FRAC(mrs->success, mrs->attempts);
if (unlikely(!mrs->att_hist)) {
- mrs->prob_ewma = mrs->cur_prob;
+ mrs->prob_ewma = cur_prob;
} else {
/* update exponential weighted moving variance */
- mrs->prob_ewmsd = minstrel_ewmsd(mrs->prob_ewmsd,
- mrs->cur_prob,
- mrs->prob_ewma,
- EWMA_LEVEL);
+ mrs->prob_ewmv = minstrel_ewmv(mrs->prob_ewmv,
+ cur_prob,
+ mrs->prob_ewma,
+ EWMA_LEVEL);
/*update exponential weighted moving avarage */
mrs->prob_ewma = minstrel_ewma(mrs->prob_ewma,
- mrs->cur_prob,
+ cur_prob,
EWMA_LEVEL);
}
mrs->att_hist += mrs->attempts;
@@ -365,6 +367,11 @@ minstrel_get_rate(void *priv, struct ieee80211_sta *sta,
return;
#endif
+ /* Don't use EAPOL frames for sampling on non-mrr hw */
+ if (mp->hw->max_rates == 1 &&
+ (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO))
+ return;
+
delta = (mi->total_packets * sampling_ratio / 100) -
(mi->sample_packets + mi->sample_deferred / 2);
diff --git a/net/mac80211/rc80211_minstrel.h b/net/mac80211/rc80211_minstrel.h
index c230bbe..be6c3f3 100644
--- a/net/mac80211/rc80211_minstrel.h
+++ b/net/mac80211/rc80211_minstrel.h
@@ -14,7 +14,7 @@
#define SAMPLE_COLUMNS 10 /* number of columns in sample table */
/* scaled fraction values */
-#define MINSTREL_SCALE 16
+#define MINSTREL_SCALE 12
#define MINSTREL_FRAC(val, div) (((val) << MINSTREL_SCALE) / div)
#define MINSTREL_TRUNC(val) ((val) >> MINSTREL_SCALE)
@@ -36,21 +36,16 @@ minstrel_ewma(int old, int new, int weight)
}
/*
- * Perform EWMSD (Exponentially Weighted Moving Standard Deviation) calculation
+ * Perform EWMV (Exponentially Weighted Moving Variance) calculation
*/
static inline int
-minstrel_ewmsd(int old_ewmsd, int cur_prob, int prob_ewma, int weight)
+minstrel_ewmv(int old_ewmv, int cur_prob, int prob_ewma, int weight)
{
- int diff, incr, tmp_var;
+ int diff, incr;
- /* calculate exponential weighted moving variance */
- diff = MINSTREL_TRUNC((cur_prob - prob_ewma) * 1000000);
+ diff = cur_prob - prob_ewma;
incr = (EWMA_DIV - weight) * diff / EWMA_DIV;
- tmp_var = old_ewmsd * old_ewmsd;
- tmp_var = weight * (tmp_var + diff * incr / 1000000) / EWMA_DIV;
-
- /* return standard deviation */
- return (u16) int_sqrt(tmp_var);
+ return weight * (old_ewmv + MINSTREL_TRUNC(diff * incr)) / EWMA_DIV;
}
struct minstrel_rate_stats {
@@ -59,15 +54,13 @@ struct minstrel_rate_stats {
u16 success, last_success;
/* total attempts/success counters */
- u64 att_hist, succ_hist;
+ u32 att_hist, succ_hist;
/* statistis of packet delivery probability
- * cur_prob - current prob within last update intervall
* prob_ewma - exponential weighted moving average of prob
* prob_ewmsd - exp. weighted moving standard deviation of prob */
- unsigned int cur_prob;
- unsigned int prob_ewma;
- u16 prob_ewmsd;
+ u16 prob_ewma;
+ u16 prob_ewmv;
/* maximum retry counts */
u8 retry_count;
@@ -153,6 +146,14 @@ struct minstrel_debugfs_info {
char buf[];
};
+/* Get EWMSD (Exponentially Weighted Moving Standard Deviation) * 10 */
+static inline int
+minstrel_get_ewmsd10(struct minstrel_rate_stats *mrs)
+{
+ unsigned int ewmv = mrs->prob_ewmv;
+ return int_sqrt(MINSTREL_TRUNC(ewmv * 1000 * 1000));
+}
+
extern const struct rate_control_ops mac80211_minstrel;
void minstrel_add_sta_debugfs(void *priv, void *priv_sta, struct dentry *dir);
void minstrel_remove_sta_debugfs(void *priv, void *priv_sta);
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index 820b0ab..36fc971 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -75,7 +75,7 @@ minstrel_stats_open(struct inode *inode, struct file *file)
{
struct minstrel_sta_info *mi = inode->i_private;
struct minstrel_debugfs_info *ms;
- unsigned int i, tp_max, tp_avg, prob, eprob;
+ unsigned int i, tp_max, tp_avg, eprob;
char *p;
ms = kmalloc(2048, GFP_KERNEL);
@@ -86,13 +86,14 @@ minstrel_stats_open(struct inode *inode, struct file *file)
p = ms->buf;
p += sprintf(p, "\n");
p += sprintf(p,
- "best __________rate_________ ________statistics________ ________last_______ ______sum-of________\n");
+ "best __________rate_________ ________statistics________ ____last_____ ______sum-of________\n");
p += sprintf(p,
- "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [prob.|retry|suc|att] [#success | #attempts]\n");
+ "rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n");
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
+ unsigned int prob_ewmsd;
*(p++) = (i == mi->max_tp_rate[0]) ? 'A' : ' ';
*(p++) = (i == mi->max_tp_rate[1]) ? 'B' : ' ';
@@ -107,17 +108,16 @@ minstrel_stats_open(struct inode *inode, struct file *file)
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
- prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ prob_ewmsd = minstrel_get_ewmsd10(mrs);
p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
- " %3u.%1u %3u %3u %-3u "
+ " %3u %3u %-3u "
"%9llu %-9llu\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
- prob / 10, prob % 10,
+ prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -148,7 +148,7 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
{
struct minstrel_sta_info *mi = inode->i_private;
struct minstrel_debugfs_info *ms;
- unsigned int i, tp_max, tp_avg, prob, eprob;
+ unsigned int i, tp_max, tp_avg, eprob;
char *p;
ms = kmalloc(2048, GFP_KERNEL);
@@ -161,6 +161,7 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
for (i = 0; i < mi->n_rates; i++) {
struct minstrel_rate *mr = &mi->r[i];
struct minstrel_rate_stats *mrs = &mi->r[i].stats;
+ unsigned int prob_ewmsd;
p += sprintf(p, "%s" ,((i == mi->max_tp_rate[0]) ? "A" : ""));
p += sprintf(p, "%s" ,((i == mi->max_tp_rate[1]) ? "B" : ""));
@@ -175,16 +176,15 @@ minstrel_stats_csv_open(struct inode *inode, struct file *file)
tp_max = minstrel_get_tp_avg(mr, MINSTREL_FRAC(100,100));
tp_avg = minstrel_get_tp_avg(mr, mrs->prob_ewma);
- prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,%u,"
"%llu,%llu,%d,%d\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
- prob / 10, prob % 10,
+ prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index 30fbabf..8e783e1 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -14,6 +14,7 @@
#include <linux/ieee80211.h>
#include <net/mac80211.h>
#include "rate.h"
+#include "sta_info.h"
#include "rc80211_minstrel.h"
#include "rc80211_minstrel_ht.h"
@@ -154,67 +155,47 @@ MODULE_PARM_DESC(minstrel_vht_only,
const struct mcs_group minstrel_mcs_groups[] = {
MCS_GROUP(1, 0, BW_20),
MCS_GROUP(2, 0, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 0, BW_20),
-#endif
MCS_GROUP(1, 1, BW_20),
MCS_GROUP(2, 1, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 1, BW_20),
-#endif
MCS_GROUP(1, 0, BW_40),
MCS_GROUP(2, 0, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 0, BW_40),
-#endif
MCS_GROUP(1, 1, BW_40),
MCS_GROUP(2, 1, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
MCS_GROUP(3, 1, BW_40),
-#endif
CCK_GROUP,
#ifdef CONFIG_MAC80211_RC_MINSTREL_VHT
VHT_GROUP(1, 0, BW_20),
VHT_GROUP(2, 0, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
VHT_GROUP(3, 0, BW_20),
-#endif
VHT_GROUP(1, 1, BW_20),
VHT_GROUP(2, 1, BW_20),
-#if MINSTREL_MAX_STREAMS >= 3
VHT_GROUP(3, 1, BW_20),
-#endif
VHT_GROUP(1, 0, BW_40),
VHT_GROUP(2, 0, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
VHT_GROUP(3, 0, BW_40),
-#endif
VHT_GROUP(1, 1, BW_40),
VHT_GROUP(2, 1, BW_40),
-#if MINSTREL_MAX_STREAMS >= 3
VHT_GROUP(3, 1, BW_40),
-#endif
VHT_GROUP(1, 0, BW_80),
VHT_GROUP(2, 0, BW_80),
-#if MINSTREL_MAX_STREAMS >= 3
VHT_GROUP(3, 0, BW_80),
-#endif
VHT_GROUP(1, 1, BW_80),
VHT_GROUP(2, 1, BW_80),
-#if MINSTREL_MAX_STREAMS >= 3
VHT_GROUP(3, 1, BW_80),
#endif
-#endif
};
static u8 sample_table[SAMPLE_COLUMNS][MCS_GROUP_RATES] __read_mostly;
@@ -301,7 +282,7 @@ minstrel_ht_get_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
break;
/* short preamble */
- if (!(mi->groups[group].supported & BIT(idx)))
+ if (!(mi->supported[group] & BIT(idx)))
idx += 4;
}
return &mi->groups[group].rates[idx];
@@ -486,7 +467,7 @@ minstrel_ht_prob_rate_reduce_streams(struct minstrel_ht_sta *mi)
MCS_GROUP_RATES].streams;
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group];
- if (!mg->supported || group == MINSTREL_CCK_GROUP)
+ if (!mi->supported[group] || group == MINSTREL_CCK_GROUP)
continue;
tmp_idx = mg->max_group_prob_rate % MCS_GROUP_RATES;
@@ -540,7 +521,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
for (group = 0; group < ARRAY_SIZE(minstrel_mcs_groups); group++) {
mg = &mi->groups[group];
- if (!mg->supported)
+ if (!mi->supported[group])
continue;
mi->sample_count++;
@@ -550,7 +531,7 @@ minstrel_ht_update_stats(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
tmp_group_tp_rate[j] = group;
for (i = 0; i < MCS_GROUP_RATES; i++) {
- if (!(mg->supported & BIT(i)))
+ if (!(mi->supported[group] & BIT(i)))
continue;
index = MCS_GROUP_RATES * group + i;
@@ -636,7 +617,7 @@ minstrel_set_next_sample_idx(struct minstrel_ht_sta *mi)
mi->sample_group %= ARRAY_SIZE(minstrel_mcs_groups);
mg = &mi->groups[mi->sample_group];
- if (!mg->supported)
+ if (!mi->supported[mi->sample_group])
continue;
if (++mg->index >= MCS_GROUP_RATES) {
@@ -657,7 +638,7 @@ minstrel_downgrade_rate(struct minstrel_ht_sta *mi, u16 *idx, bool primary)
while (group > 0) {
group--;
- if (!mi->groups[group].supported)
+ if (!mi->supported[group])
continue;
if (minstrel_mcs_groups[group].streams >
@@ -994,7 +975,7 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
sample_idx = sample_table[mg->column][mg->index];
minstrel_set_next_sample_idx(mi);
- if (!(mg->supported & BIT(sample_idx)))
+ if (!(mi->supported[sample_group] & BIT(sample_idx)))
return -1;
mrs = &mg->rates[sample_idx];
@@ -1049,22 +1030,6 @@ minstrel_get_sample_rate(struct minstrel_priv *mp, struct minstrel_ht_sta *mi)
}
static void
-minstrel_ht_check_cck_shortpreamble(struct minstrel_priv *mp,
- struct minstrel_ht_sta *mi, bool val)
-{
- u8 supported = mi->groups[MINSTREL_CCK_GROUP].supported;
-
- if (!supported || !mi->cck_supported_short)
- return;
-
- if (supported & (mi->cck_supported_short << (val * 4)))
- return;
-
- supported ^= mi->cck_supported_short | (mi->cck_supported_short << 4);
- mi->groups[MINSTREL_CCK_GROUP].supported = supported;
-}
-
-static void
minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
struct ieee80211_tx_rate_control *txrc)
{
@@ -1087,7 +1052,6 @@ minstrel_ht_get_rate(void *priv, struct ieee80211_sta *sta, void *priv_sta,
minstrel_aggr_check(sta, txrc->skb);
info->flags |= mi->tx_flags;
- minstrel_ht_check_cck_shortpreamble(mp, mi, txrc->short_preamble);
#ifdef CONFIG_MAC80211_DEBUGFS
if (mp->fixed_rate_idx != -1)
@@ -1154,7 +1118,7 @@ minstrel_ht_update_cck(struct minstrel_priv *mp, struct minstrel_ht_sta *mi,
mi->cck_supported_short |= BIT(i);
}
- mi->groups[MINSTREL_CCK_GROUP].supported = mi->cck_supported;
+ mi->supported[MINSTREL_CCK_GROUP] = mi->cck_supported;
}
static void
@@ -1168,6 +1132,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_mcs_info *mcs = &sta->ht_cap.mcs;
u16 sta_cap = sta->ht_cap.cap;
struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
+ struct sta_info *sinfo = container_of(sta, struct sta_info, sta);
int use_vht;
int n_supported = 0;
int ack_dur;
@@ -1224,7 +1189,7 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
u32 gflags = minstrel_mcs_groups[i].flags;
int bw, nss;
- mi->groups[i].supported = 0;
+ mi->supported[i] = 0;
if (i == MINSTREL_CCK_GROUP) {
minstrel_ht_update_cck(mp, mi, sband, sta);
continue;
@@ -1256,8 +1221,8 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
if (use_vht && minstrel_vht_only)
continue;
#endif
- mi->groups[i].supported = mcs->rx_mask[nss - 1];
- if (mi->groups[i].supported)
+ mi->supported[i] = mcs->rx_mask[nss - 1];
+ if (mi->supported[i])
n_supported++;
continue;
}
@@ -1283,16 +1248,19 @@ minstrel_ht_update_caps(void *priv, struct ieee80211_supported_band *sband,
else
bw = BW_20;
- mi->groups[i].supported = minstrel_get_valid_vht_rates(bw, nss,
+ mi->supported[i] = minstrel_get_valid_vht_rates(bw, nss,
vht_cap->vht_mcs.tx_mcs_map);
- if (mi->groups[i].supported)
+ if (mi->supported[i])
n_supported++;
}
if (!n_supported)
goto use_legacy;
+ if (test_sta_flag(sinfo, WLAN_STA_SHORT_PREAMBLE))
+ mi->cck_supported_short |= mi->cck_supported_short << 4;
+
/* create an initial rate table with the lowest supported rates */
minstrel_ht_update_stats(mp, mi);
minstrel_ht_update_rates(mp, mi);
diff --git a/net/mac80211/rc80211_minstrel_ht.h b/net/mac80211/rc80211_minstrel_ht.h
index e8b52a9..de1646c 100644
--- a/net/mac80211/rc80211_minstrel_ht.h
+++ b/net/mac80211/rc80211_minstrel_ht.h
@@ -52,9 +52,6 @@ struct minstrel_mcs_group_data {
u8 index;
u8 column;
- /* bitfield of supported MCS rates of this group */
- u16 supported;
-
/* sorted rate set within a MCS group*/
u16 max_group_tp_rate[MAX_THR_RATES];
u16 max_group_prob_rate;
@@ -101,6 +98,9 @@ struct minstrel_ht_sta {
u8 cck_supported;
u8 cck_supported_short;
+ /* Bitfield of supported MCS rates of all groups */
+ u16 supported[MINSTREL_GROUPS_NB];
+
/* MCS rate group info and statistics */
struct minstrel_mcs_group_data groups[MINSTREL_GROUPS_NB];
};
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index 5320e35..7d969e3 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -19,12 +19,12 @@ static char *
minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
{
const struct mcs_group *mg;
- unsigned int j, tp_max, tp_avg, prob, eprob, tx_time;
+ unsigned int j, tp_max, tp_avg, eprob, tx_time;
char htmode = '2';
char gimode = 'L';
u32 gflags;
- if (!mi->groups[i].supported)
+ if (!mi->supported[i])
return p;
mg = &minstrel_mcs_groups[i];
@@ -41,8 +41,9 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
+ unsigned int prob_ewmsd;
- if (!(mi->groups[i].supported & BIT(j)))
+ if (!(mi->supported[i] & BIT(j)))
continue;
if (gflags & IEEE80211_TX_RC_MCS) {
@@ -83,17 +84,16 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
- prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ prob_ewmsd = minstrel_get_ewmsd10(mrs);
p += sprintf(p, "%4u.%1u %4u.%1u %3u.%1u %3u.%1u"
- " %3u.%1u %3u %3u %-3u "
+ " %3u %3u %-3u "
"%9llu %-9llu\n",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
- prob / 10, prob % 10,
+ prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
@@ -130,9 +130,9 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
p += sprintf(p, "\n");
p += sprintf(p,
- " best ____________rate__________ ________statistics________ ________last_______ ______sum-of________\n");
+ " best ____________rate__________ ________statistics________ _____last____ ______sum-of________\n");
p += sprintf(p,
- "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [prob.|retry|suc|att] [#success | #attempts]\n");
+ "mode guard # rate [name idx airtime max_tp] [avg(tp) avg(prob) sd(prob)] [retry|suc|att] [#success | #attempts]\n");
p = minstrel_ht_stats_dump(mi, MINSTREL_CCK_GROUP, p);
for (i = 0; i < MINSTREL_CCK_GROUP; i++)
@@ -165,12 +165,12 @@ static char *
minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
{
const struct mcs_group *mg;
- unsigned int j, tp_max, tp_avg, prob, eprob, tx_time;
+ unsigned int j, tp_max, tp_avg, eprob, tx_time;
char htmode = '2';
char gimode = 'L';
u32 gflags;
- if (!mi->groups[i].supported)
+ if (!mi->supported[i])
return p;
mg = &minstrel_mcs_groups[i];
@@ -187,8 +187,9 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
struct minstrel_rate_stats *mrs = &mi->groups[i].rates[j];
static const int bitrates[4] = { 10, 20, 55, 110 };
int idx = i * MCS_GROUP_RATES + j;
+ unsigned int prob_ewmsd;
- if (!(mi->groups[i].supported & BIT(j)))
+ if (!(mi->supported[i] & BIT(j)))
continue;
if (gflags & IEEE80211_TX_RC_MCS) {
@@ -226,16 +227,15 @@ minstrel_ht_stats_csv_dump(struct minstrel_ht_sta *mi, int i, char *p)
tp_max = minstrel_ht_get_tp_avg(mi, i, j, MINSTREL_FRAC(100, 100));
tp_avg = minstrel_ht_get_tp_avg(mi, i, j, mrs->prob_ewma);
- prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
eprob = MINSTREL_TRUNC(mrs->prob_ewma * 1000);
+ prob_ewmsd = minstrel_get_ewmsd10(mrs);
- p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
+ p += sprintf(p, "%u.%u,%u.%u,%u.%u,%u.%u,%u,%u,"
"%u,%llu,%llu,",
tp_max / 10, tp_max % 10,
tp_avg / 10, tp_avg % 10,
eprob / 10, eprob % 10,
- mrs->prob_ewmsd / 10, mrs->prob_ewmsd % 10,
- prob / 10, prob % 10,
+ prob_ewmsd / 10, prob_ewmsd % 10,
mrs->retry_count,
mrs->last_success,
mrs->last_attempts,
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 3e289a6..b791c41 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1908,7 +1908,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
unsigned int frag, seq;
struct ieee80211_fragment_entry *entry;
struct sk_buff *skb;
- struct ieee80211_rx_status *status;
hdr = (struct ieee80211_hdr *)rx->skb->data;
fc = hdr->frame_control;
@@ -2034,9 +2033,6 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
dev_kfree_skb(skb);
}
- /* Complete frame has been reassembled - process it now */
- status = IEEE80211_SKB_RXCB(rx->skb);
-
out:
ieee80211_led_rx(rx->local);
out_no_led:
@@ -2472,7 +2468,8 @@ ieee80211_rx_h_mesh_fwding(struct ieee80211_rx_data *rx)
if (!ifmsh->mshcfg.dot11MeshForwarding)
goto out;
- fwd_skb = skb_copy_expand(skb, local->tx_headroom, 0, GFP_ATOMIC);
+ fwd_skb = skb_copy_expand(skb, local->tx_headroom +
+ sdata->encrypt_headroom, 0, GFP_ATOMIC);
if (!fwd_skb) {
net_info_ratelimited("%s: failed to clone mesh frame\n",
sdata->name);
@@ -2880,17 +2877,10 @@ ieee80211_rx_h_action(struct ieee80211_rx_data *rx)
switch (mgmt->u.action.u.vht_opmode_notif.action_code) {
case WLAN_VHT_ACTION_OPMODE_NOTIF: {
- u8 opmode;
-
/* verify opmode is present */
if (len < IEEE80211_MIN_ACTION_SIZE + 2)
goto invalid;
-
- opmode = mgmt->u.action.u.vht_opmode_notif.operating_mode;
-
- ieee80211_vht_handle_opmode(rx->sdata, rx->sta,
- opmode, status->band);
- goto handled;
+ goto queue;
}
case WLAN_VHT_ACTION_GROUPID_MGMT: {
if (len < IEEE80211_MIN_ACTION_SIZE + 25)
@@ -3942,21 +3932,31 @@ static bool ieee80211_invoke_fast_rx(struct ieee80211_rx_data *rx,
u64_stats_update_end(&stats->syncp);
if (fast_rx->internal_forward) {
- struct sta_info *dsta = sta_info_get(rx->sdata, skb->data);
+ struct sk_buff *xmit_skb = NULL;
+ bool multicast = is_multicast_ether_addr(skb->data);
- if (dsta) {
+ if (multicast) {
+ xmit_skb = skb_copy(skb, GFP_ATOMIC);
+ } else if (sta_info_get(rx->sdata, skb->data)) {
+ xmit_skb = skb;
+ skb = NULL;
+ }
+
+ if (xmit_skb) {
/*
* Send to wireless media and increase priority by 256
* to keep the received priority instead of
* reclassifying the frame (see cfg80211_classify8021d).
*/
- skb->priority += 256;
- skb->protocol = htons(ETH_P_802_3);
- skb_reset_network_header(skb);
- skb_reset_mac_header(skb);
- dev_queue_xmit(skb);
- return true;
+ xmit_skb->priority += 256;
+ xmit_skb->protocol = htons(ETH_P_802_3);
+ skb_reset_network_header(xmit_skb);
+ skb_reset_mac_header(xmit_skb);
+ dev_queue_xmit(xmit_skb);
}
+
+ if (!skb)
+ return true;
}
/* deliver to local stack */
diff --git a/net/mac80211/scan.c b/net/mac80211/scan.c
index 23d8ac8..faab3c4 100644
--- a/net/mac80211/scan.c
+++ b/net/mac80211/scan.c
@@ -1120,7 +1120,6 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
u32 rate_masks[NUM_NL80211_BANDS] = {};
u8 bands_used = 0;
u8 *ie;
- size_t len;
iebufsz = local->scan_ies_len + req->ie_len;
@@ -1145,10 +1144,9 @@ int __ieee80211_request_sched_scan_start(struct ieee80211_sub_if_data *sdata,
ieee80211_prepare_scan_chandef(&chandef, req->scan_width);
- len = ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
- &sched_scan_ies, req->ie,
- req->ie_len, bands_used,
- rate_masks, &chandef);
+ ieee80211_build_preq_ies(local, ie, num_bands * iebufsz,
+ &sched_scan_ies, req->ie,
+ req->ie_len, bands_used, rate_masks, &chandef);
ret = drv_sched_scan_start(local, sdata, req, &sched_scan_ies);
if (ret == 0) {
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index b6cfcf0..4774e66 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -513,23 +513,23 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
{
struct ieee80211_local *local = sta->local;
struct ieee80211_sub_if_data *sdata = sta->sdata;
- struct station_info *sinfo;
+ struct station_info *sinfo = NULL;
int err = 0;
lockdep_assert_held(&local->sta_mtx);
- sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
- if (!sinfo) {
- err = -ENOMEM;
- goto out_err;
- }
-
/* check if STA exists already */
if (sta_info_get_bss(sdata, sta->sta.addr)) {
err = -EEXIST;
goto out_err;
}
+ sinfo = kzalloc(sizeof(struct station_info), GFP_KERNEL);
+ if (!sinfo) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
local->num_sta++;
local->sta_generation++;
smp_mb();
@@ -1501,8 +1501,8 @@ ieee80211_sta_ps_deliver_response(struct sta_info *sta,
/* This will evaluate to 1, 3, 5 or 7. */
for (ac = IEEE80211_AC_VO; ac < IEEE80211_NUM_ACS; ac++)
- if (ignored_acs & BIT(ac))
- continue;
+ if (!(ignored_acs & ieee80211_ac_to_qos_mask[ac]))
+ break;
tid = 7 - 2 * ac;
ieee80211_send_null_response(sta, tid, reason, true, false);
@@ -2051,16 +2051,12 @@ void sta_set_sinfo(struct sta_info *sta, struct station_info *sinfo)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
struct ieee80211_local *local = sdata->local;
- struct rate_control_ref *ref = NULL;
u32 thr = 0;
int i, ac, cpu;
struct ieee80211_sta_rx_stats *last_rxstats;
last_rxstats = sta_get_last_rx_stats(sta);
- if (test_sta_flag(sta, WLAN_STA_RATE_CONTROL))
- ref = local->rate_ctrl;
-
sinfo->generation = sdata->local->sta_generation;
/* do before driver, so beacon filtering drivers have a
diff --git a/net/mac80211/status.c b/net/mac80211/status.c
index ddf71c6..d6a1bfa 100644
--- a/net/mac80211/status.c
+++ b/net/mac80211/status.c
@@ -541,6 +541,11 @@ static void ieee80211_report_used_skb(struct ieee80211_local *local,
} else if (info->ack_frame_id) {
ieee80211_report_ack_skb(local, info, acked, dropped);
}
+
+ if (!dropped && skb->destructor) {
+ skb->wifi_acked_valid = 1;
+ skb->wifi_acked = acked;
+ }
}
/*
@@ -633,10 +638,9 @@ void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
struct ieee80211_local *local = hw_to_local(hw);
struct ieee80211_supported_band *sband;
int retry_count;
- int rates_idx;
bool acked, noack_success;
- rates_idx = ieee80211_tx_get_rates(hw, info, &retry_count);
+ ieee80211_tx_get_rates(hw, info, &retry_count);
sband = hw->wiphy->bands[info->band];
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 0d8b716..986de09 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -16,6 +16,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
+#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/bitmap.h>
#include <linux/rcupdate.h>
@@ -63,6 +64,10 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
struct ieee80211_chanctx_conf *chanctx_conf;
u32 rate_flags = 0;
+ /* assume HW handles this */
+ if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
+ return 0;
+
rcu_read_lock();
chanctx_conf = rcu_dereference(tx->sdata->vif.chanctx_conf);
if (chanctx_conf) {
@@ -71,10 +76,6 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
}
rcu_read_unlock();
- /* assume HW handles this */
- if (tx->rate.flags & (IEEE80211_TX_RC_MCS | IEEE80211_TX_RC_VHT_MCS))
- return 0;
-
/* uh huh? */
if (WARN_ON_ONCE(tx->rate.idx < 0))
return 0;
@@ -1243,7 +1244,7 @@ ieee80211_tx_prepare(struct ieee80211_sub_if_data *sdata,
static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
struct ieee80211_vif *vif,
- struct ieee80211_sta *pubsta,
+ struct sta_info *sta,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
@@ -1257,10 +1258,13 @@ static struct txq_info *ieee80211_get_txq(struct ieee80211_local *local,
if (!ieee80211_is_data(hdr->frame_control))
return NULL;
- if (pubsta) {
+ if (sta) {
u8 tid = skb->priority & IEEE80211_QOS_CTL_TID_MASK;
- txq = pubsta->txq[tid];
+ if (!sta->uploaded)
+ return NULL;
+
+ txq = sta->sta.txq[tid];
} else if (vif) {
txq = vif->txq;
}
@@ -1503,23 +1507,17 @@ static bool ieee80211_queue_skb(struct ieee80211_local *local,
struct fq *fq = &local->fq;
struct ieee80211_vif *vif;
struct txq_info *txqi;
- struct ieee80211_sta *pubsta;
if (!local->ops->wake_tx_queue ||
sdata->vif.type == NL80211_IFTYPE_MONITOR)
return false;
- if (sta && sta->uploaded)
- pubsta = &sta->sta;
- else
- pubsta = NULL;
-
if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN)
sdata = container_of(sdata->bss,
struct ieee80211_sub_if_data, u.ap);
vif = &sdata->vif;
- txqi = ieee80211_get_txq(local, vif, pubsta, skb);
+ txqi = ieee80211_get_txq(local, vif, sta, skb);
if (!txqi)
return false;
@@ -3574,6 +3572,115 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
rcu_read_unlock();
}
+static int ieee80211_change_da(struct sk_buff *skb, struct sta_info *sta)
+{
+ struct ethhdr *eth;
+ int err;
+
+ err = skb_ensure_writable(skb, ETH_HLEN);
+ if (unlikely(err))
+ return err;
+
+ eth = (void *)skb->data;
+ ether_addr_copy(eth->h_dest, sta->sta.addr);
+
+ return 0;
+}
+
+static bool ieee80211_multicast_to_unicast(struct sk_buff *skb,
+ struct net_device *dev)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ const struct ethhdr *eth = (void *)skb->data;
+ const struct vlan_ethhdr *ethvlan = (void *)skb->data;
+ __be16 ethertype;
+
+ if (likely(!is_multicast_ether_addr(eth->h_dest)))
+ return false;
+
+ switch (sdata->vif.type) {
+ case NL80211_IFTYPE_AP_VLAN:
+ if (sdata->u.vlan.sta)
+ return false;
+ if (sdata->wdev.use_4addr)
+ return false;
+ /* fall through */
+ case NL80211_IFTYPE_AP:
+ /* check runtime toggle for this bss */
+ if (!sdata->bss->multicast_to_unicast)
+ return false;
+ break;
+ default:
+ return false;
+ }
+
+ /* multicast to unicast conversion only for some payload */
+ ethertype = eth->h_proto;
+ if (ethertype == htons(ETH_P_8021Q) && skb->len >= VLAN_ETH_HLEN)
+ ethertype = ethvlan->h_vlan_encapsulated_proto;
+ switch (ethertype) {
+ case htons(ETH_P_ARP):
+ case htons(ETH_P_IP):
+ case htons(ETH_P_IPV6):
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static void
+ieee80211_convert_to_unicast(struct sk_buff *skb, struct net_device *dev,
+ struct sk_buff_head *queue)
+{
+ struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev);
+ struct ieee80211_local *local = sdata->local;
+ const struct ethhdr *eth = (struct ethhdr *)skb->data;
+ struct sta_info *sta, *first = NULL;
+ struct sk_buff *cloned_skb;
+
+ rcu_read_lock();
+
+ list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ if (sdata != sta->sdata)
+ /* AP-VLAN mismatch */
+ continue;
+ if (unlikely(ether_addr_equal(eth->h_source, sta->sta.addr)))
+ /* do not send back to source */
+ continue;
+ if (!first) {
+ first = sta;
+ continue;
+ }
+ cloned_skb = skb_clone(skb, GFP_ATOMIC);
+ if (!cloned_skb)
+ goto multicast;
+ if (unlikely(ieee80211_change_da(cloned_skb, sta))) {
+ dev_kfree_skb(cloned_skb);
+ goto multicast;
+ }
+ __skb_queue_tail(queue, cloned_skb);
+ }
+
+ if (likely(first)) {
+ if (unlikely(ieee80211_change_da(skb, first)))
+ goto multicast;
+ __skb_queue_tail(queue, skb);
+ } else {
+ /* no STA connected, drop */
+ kfree_skb(skb);
+ skb = NULL;
+ }
+
+ goto out;
+multicast:
+ __skb_queue_purge(queue);
+ __skb_queue_tail(queue, skb);
+out:
+ rcu_read_unlock();
+}
+
/**
* ieee80211_subif_start_xmit - netif start_xmit function for 802.3 vifs
* @skb: packet to be sent
@@ -3584,7 +3691,17 @@ void __ieee80211_subif_start_xmit(struct sk_buff *skb,
netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
- __ieee80211_subif_start_xmit(skb, dev, 0);
+ if (unlikely(ieee80211_multicast_to_unicast(skb, dev))) {
+ struct sk_buff_head queue;
+
+ __skb_queue_head_init(&queue);
+ ieee80211_convert_to_unicast(skb, dev, &queue);
+ while ((skb = __skb_dequeue(&queue)))
+ __ieee80211_subif_start_xmit(skb, dev, 0);
+ } else {
+ __ieee80211_subif_start_xmit(skb, dev, 0);
+ }
+
return NETDEV_TX_OK;
}
@@ -4077,7 +4194,7 @@ __ieee80211_beacon_get(struct ieee80211_hw *hw,
}
if (ifmsh->sync_ops)
- ifmsh->sync_ops->adjust_tbtt(sdata, beacon);
+ ifmsh->sync_ops->adjust_tsf(sdata, beacon);
skb = dev_alloc_skb(local->tx_headroom +
beacon->head_len +
diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c
index 6832bf6..19ec218 100644
--- a/net/mac80211/vht.c
+++ b/net/mac80211/vht.c
@@ -436,14 +436,10 @@ u32 __ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
struct sta_info *sta, u8 opmode,
enum nl80211_band band)
{
- struct ieee80211_local *local = sdata->local;
- struct ieee80211_supported_band *sband;
enum ieee80211_sta_rx_bandwidth new_bw;
u32 changed = 0;
u8 nss;
- sband = local->hw.wiphy->bands[band];
-
/* ignore - no support for BF yet */
if (opmode & IEEE80211_OPMODE_NOTIF_RX_NSS_TYPE_BF)
return 0;
@@ -527,8 +523,10 @@ void ieee80211_vht_handle_opmode(struct ieee80211_sub_if_data *sdata,
u32 changed = __ieee80211_vht_handle_opmode(sdata, sta, opmode, band);
- if (changed > 0)
+ if (changed > 0) {
+ ieee80211_recalc_min_chandef(sdata);
rate_control_rate_update(local, sband, sta, changed);
+ }
}
void ieee80211_get_vht_mask_from_cap(__le16 vht_cap,
diff --git a/net/mac80211/wep.c b/net/mac80211/wep.c
index efa3f48..73e8f34 100644
--- a/net/mac80211/wep.c
+++ b/net/mac80211/wep.c
@@ -293,7 +293,8 @@ ieee80211_crypto_wep_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
ieee80211_wep_remove_iv(rx->local, rx->skb, rx->key);
/* remove ICV */
- if (pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
+ if (!(status->flag & RX_FLAG_ICV_STRIPPED) &&
+ pskb_trim(rx->skb, rx->skb->len - IEEE80211_WEP_ICV_LEN))
return RX_DROP_UNUSABLE;
}
diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c
index 8af6dd3..c1ef22d 100644
--- a/net/mac80211/wpa.c
+++ b/net/mac80211/wpa.c
@@ -294,7 +294,8 @@ ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx)
return RX_DROP_UNUSABLE;
/* Trim ICV */
- skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
+ if (!(status->flag & RX_FLAG_ICV_STRIPPED))
+ skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
/* Remove IV */
memmove(skb->data + IEEE80211_TKIP_IV_LEN, skb->data, hdrlen);
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 15fe976..64d3bf2 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -8,6 +8,7 @@
#include <linux/ipv6.h>
#include <linux/mpls.h>
#include <linux/vmalloc.h>
+#include <linux/percpu.h>
#include <net/ip.h>
#include <net/dst.h>
#include <net/sock.h>
@@ -17,8 +18,8 @@
#include <net/netns/generic.h>
#if IS_ENABLED(CONFIG_IPV6)
#include <net/ipv6.h>
-#include <net/addrconf.h>
#endif
+#include <net/addrconf.h>
#include <net/nexthop.h>
#include "internal.h"
@@ -48,11 +49,6 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
return rt;
}
-static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
-{
- return rcu_dereference_rtnl(dev->mpls_ptr);
-}
-
bool mpls_output_possible(const struct net_device *dev)
{
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
@@ -98,18 +94,44 @@ bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu)
}
EXPORT_SYMBOL_GPL(mpls_pkt_too_big);
-static u32 mpls_multipath_hash(struct mpls_route *rt,
- struct sk_buff *skb, bool bos)
+void mpls_stats_inc_outucastpkts(struct net_device *dev,
+ const struct sk_buff *skb)
+{
+ struct mpls_dev *mdev;
+
+ if (skb->protocol == htons(ETH_P_MPLS_UC)) {
+ mdev = mpls_dev_get(dev);
+ if (mdev)
+ MPLS_INC_STATS_LEN(mdev, skb->len,
+ tx_packets,
+ tx_bytes);
+ } else if (skb->protocol == htons(ETH_P_IP)) {
+ IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
+#if IS_ENABLED(CONFIG_IPV6)
+ } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ struct inet6_dev *in6dev = __in6_dev_get(dev);
+
+ if (in6dev)
+ IP6_UPD_PO_STATS(dev_net(dev), in6dev,
+ IPSTATS_MIB_OUT, skb->len);
+#endif
+ }
+}
+EXPORT_SYMBOL_GPL(mpls_stats_inc_outucastpkts);
+
+static u32 mpls_multipath_hash(struct mpls_route *rt, struct sk_buff *skb)
{
struct mpls_entry_decoded dec;
+ unsigned int mpls_hdr_len = 0;
struct mpls_shim_hdr *hdr;
bool eli_seen = false;
int label_index;
u32 hash = 0;
- for (label_index = 0; label_index < MAX_MP_SELECT_LABELS && !bos;
+ for (label_index = 0; label_index < MAX_MP_SELECT_LABELS;
label_index++) {
- if (!pskb_may_pull(skb, sizeof(*hdr) * label_index))
+ mpls_hdr_len += sizeof(*hdr);
+ if (!pskb_may_pull(skb, mpls_hdr_len))
break;
/* Read and decode the current label */
@@ -134,37 +156,38 @@ static u32 mpls_multipath_hash(struct mpls_route *rt,
eli_seen = true;
}
- bos = dec.bos;
- if (bos && pskb_may_pull(skb, sizeof(*hdr) * label_index +
- sizeof(struct iphdr))) {
+ if (!dec.bos)
+ continue;
+
+ /* found bottom label; does skb have room for a header? */
+ if (pskb_may_pull(skb, mpls_hdr_len + sizeof(struct iphdr))) {
const struct iphdr *v4hdr;
- v4hdr = (const struct iphdr *)(mpls_hdr(skb) +
- label_index);
+ v4hdr = (const struct iphdr *)(hdr + 1);
if (v4hdr->version == 4) {
hash = jhash_3words(ntohl(v4hdr->saddr),
ntohl(v4hdr->daddr),
v4hdr->protocol, hash);
} else if (v4hdr->version == 6 &&
- pskb_may_pull(skb, sizeof(*hdr) * label_index +
- sizeof(struct ipv6hdr))) {
+ pskb_may_pull(skb, mpls_hdr_len +
+ sizeof(struct ipv6hdr))) {
const struct ipv6hdr *v6hdr;
- v6hdr = (const struct ipv6hdr *)(mpls_hdr(skb) +
- label_index);
-
+ v6hdr = (const struct ipv6hdr *)(hdr + 1);
hash = __ipv6_addr_jhash(&v6hdr->saddr, hash);
hash = __ipv6_addr_jhash(&v6hdr->daddr, hash);
hash = jhash_1word(v6hdr->nexthdr, hash);
}
}
+
+ break;
}
return hash;
}
static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
- struct sk_buff *skb, bool bos)
+ struct sk_buff *skb)
{
int alive = ACCESS_ONCE(rt->rt_nhn_alive);
u32 hash = 0;
@@ -180,7 +203,7 @@ static struct mpls_nh *mpls_select_multipath(struct mpls_route *rt,
if (alive <= 0)
return NULL;
- hash = mpls_multipath_hash(rt, skb, bos);
+ hash = mpls_multipath_hash(rt, skb);
nh_index = hash % alive;
if (alive == rt->rt_nhn)
goto out;
@@ -253,6 +276,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
struct mpls_nh *nh;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
+ struct mpls_dev *out_mdev;
struct mpls_dev *mdev;
unsigned int hh_len;
unsigned int new_header_size;
@@ -262,56 +286,66 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
/* Careful this entire function runs inside of an rcu critical section */
mdev = mpls_dev_get(dev);
- if (!mdev || !mdev->input_enabled)
+ if (!mdev)
goto drop;
- if (skb->pkt_type != PACKET_HOST)
+ MPLS_INC_STATS_LEN(mdev, skb->len, rx_packets,
+ rx_bytes);
+
+ if (!mdev->input_enabled) {
+ MPLS_INC_STATS(mdev, rx_dropped);
goto drop;
+ }
+
+ if (skb->pkt_type != PACKET_HOST)
+ goto err;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
- goto drop;
+ goto err;
if (!pskb_may_pull(skb, sizeof(*hdr)))
- goto drop;
+ goto err;
/* Read and decode the label */
hdr = mpls_hdr(skb);
dec = mpls_entry_decode(hdr);
- /* Pop the label */
- skb_pull(skb, sizeof(*hdr));
- skb_reset_network_header(skb);
-
- skb_orphan(skb);
-
rt = mpls_route_input_rcu(net, dec.label);
- if (!rt)
+ if (!rt) {
+ MPLS_INC_STATS(mdev, rx_noroute);
goto drop;
+ }
- nh = mpls_select_multipath(rt, skb, dec.bos);
+ nh = mpls_select_multipath(rt, skb);
if (!nh)
- goto drop;
+ goto err;
- /* Find the output device */
- out_dev = rcu_dereference(nh->nh_dev);
- if (!mpls_output_possible(out_dev))
- goto drop;
+ /* Pop the label */
+ skb_pull(skb, sizeof(*hdr));
+ skb_reset_network_header(skb);
+
+ skb_orphan(skb);
if (skb_warn_if_lro(skb))
- goto drop;
+ goto err;
skb_forward_csum(skb);
/* Verify ttl is valid */
if (dec.ttl <= 1)
- goto drop;
+ goto err;
dec.ttl -= 1;
+ /* Find the output device */
+ out_dev = rcu_dereference(nh->nh_dev);
+ if (!mpls_output_possible(out_dev))
+ goto tx_err;
+
/* Verify the destination can hold the packet */
new_header_size = mpls_nh_header_size(nh);
mtu = mpls_dev_mtu(out_dev);
if (mpls_pkt_too_big(skb, mtu - new_header_size))
- goto drop;
+ goto tx_err;
hh_len = LL_RESERVED_SPACE(out_dev);
if (!out_dev->header_ops)
@@ -319,7 +353,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
/* Ensure there is enough space for the headers in the skb */
if (skb_cow(skb, hh_len + new_header_size))
- goto drop;
+ goto tx_err;
skb->dev = out_dev;
skb->protocol = htons(ETH_P_MPLS_UC);
@@ -327,7 +361,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
if (unlikely(!new_header_size && dec.bos)) {
/* Penultimate hop popping */
if (!mpls_egress(rt, skb, dec))
- goto drop;
+ goto err;
} else {
bool bos;
int i;
@@ -343,6 +377,8 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
}
}
+ mpls_stats_inc_outucastpkts(out_dev, skb);
+
/* If via wasn't specified then send out using device address */
if (nh->nh_via_table == MPLS_NEIGH_TABLE_UNSPEC)
err = neigh_xmit(NEIGH_LINK_TABLE, out_dev,
@@ -355,6 +391,13 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
__func__, err);
return 0;
+tx_err:
+ out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
+ if (out_mdev)
+ MPLS_INC_STATS(out_mdev, tx_errors);
+ goto drop;
+err:
+ MPLS_INC_STATS(mdev, rx_errors);
drop:
kfree_skb(skb);
return NET_RX_DROP;
@@ -853,6 +896,70 @@ errout:
return err;
}
+static void mpls_get_stats(struct mpls_dev *mdev,
+ struct mpls_link_stats *stats)
+{
+ struct mpls_pcpu_stats *p;
+ int i;
+
+ memset(stats, 0, sizeof(*stats));
+
+ for_each_possible_cpu(i) {
+ struct mpls_link_stats local;
+ unsigned int start;
+
+ p = per_cpu_ptr(mdev->stats, i);
+ do {
+ start = u64_stats_fetch_begin(&p->syncp);
+ local = p->stats;
+ } while (u64_stats_fetch_retry(&p->syncp, start));
+
+ stats->rx_packets += local.rx_packets;
+ stats->rx_bytes += local.rx_bytes;
+ stats->tx_packets += local.tx_packets;
+ stats->tx_bytes += local.tx_bytes;
+ stats->rx_errors += local.rx_errors;
+ stats->tx_errors += local.tx_errors;
+ stats->rx_dropped += local.rx_dropped;
+ stats->tx_dropped += local.tx_dropped;
+ stats->rx_noroute += local.rx_noroute;
+ }
+}
+
+static int mpls_fill_stats_af(struct sk_buff *skb,
+ const struct net_device *dev)
+{
+ struct mpls_link_stats *stats;
+ struct mpls_dev *mdev;
+ struct nlattr *nla;
+
+ mdev = mpls_dev_get(dev);
+ if (!mdev)
+ return -ENODATA;
+
+ nla = nla_reserve_64bit(skb, MPLS_STATS_LINK,
+ sizeof(struct mpls_link_stats),
+ MPLS_STATS_UNSPEC);
+ if (!nla)
+ return -EMSGSIZE;
+
+ stats = nla_data(nla);
+ mpls_get_stats(mdev, stats);
+
+ return 0;
+}
+
+static size_t mpls_get_stats_af_size(const struct net_device *dev)
+{
+ struct mpls_dev *mdev;
+
+ mdev = mpls_dev_get(dev);
+ if (!mdev)
+ return 0;
+
+ return nla_total_size_64bit(sizeof(struct mpls_link_stats));
+}
+
#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
(&((struct mpls_dev *)0)->field)
@@ -911,6 +1018,7 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
{
struct mpls_dev *mdev;
int err = -ENOMEM;
+ int i;
ASSERT_RTNL();
@@ -918,6 +1026,17 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
if (!mdev)
return ERR_PTR(err);
+ mdev->stats = alloc_percpu(struct mpls_pcpu_stats);
+ if (!mdev->stats)
+ goto free;
+
+ for_each_possible_cpu(i) {
+ struct mpls_pcpu_stats *mpls_stats;
+
+ mpls_stats = per_cpu_ptr(mdev->stats, i);
+ u64_stats_init(&mpls_stats->syncp);
+ }
+
err = mpls_dev_sysctl_register(dev, mdev);
if (err)
goto free;
@@ -927,10 +1046,19 @@ static struct mpls_dev *mpls_add_dev(struct net_device *dev)
return mdev;
free:
+ free_percpu(mdev->stats);
kfree(mdev);
return ERR_PTR(err);
}
+static void mpls_dev_destroy_rcu(struct rcu_head *head)
+{
+ struct mpls_dev *mdev = container_of(head, struct mpls_dev, rcu);
+
+ free_percpu(mdev->stats);
+ kfree(mdev);
+}
+
static void mpls_ifdown(struct net_device *dev, int event)
{
struct mpls_route __rcu **platform_label;
@@ -1045,7 +1173,7 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
if (mdev) {
mpls_dev_sysctl_unregister(mdev);
RCU_INIT_POINTER(dev->mpls_ptr, NULL);
- kfree_rcu(mdev, rcu);
+ call_rcu(&mdev->rcu, mpls_dev_destroy_rcu);
}
break;
case NETDEV_CHANGENAME:
@@ -1706,6 +1834,12 @@ static struct pernet_operations mpls_net_ops = {
.exit = mpls_net_exit,
};
+static struct rtnl_af_ops mpls_af_ops __read_mostly = {
+ .family = AF_MPLS,
+ .fill_stats_af = mpls_fill_stats_af,
+ .get_stats_af_size = mpls_get_stats_af_size,
+};
+
static int __init mpls_init(void)
{
int err;
@@ -1722,6 +1856,8 @@ static int __init mpls_init(void)
dev_add_pack(&mpls_packet_type);
+ rtnl_af_register(&mpls_af_ops);
+
rtnl_register(PF_MPLS, RTM_NEWROUTE, mpls_rtm_newroute, NULL, NULL);
rtnl_register(PF_MPLS, RTM_DELROUTE, mpls_rtm_delroute, NULL, NULL);
rtnl_register(PF_MPLS, RTM_GETROUTE, NULL, mpls_dump_routes, NULL);
@@ -1738,6 +1874,7 @@ module_init(mpls_init);
static void __exit mpls_exit(void)
{
rtnl_unregister_all(PF_MPLS);
+ rtnl_af_unregister(&mpls_af_ops);
dev_remove_pack(&mpls_packet_type);
unregister_netdevice_notifier(&mpls_dev_notifier);
unregister_pernet_subsys(&mpls_net_ops);
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index bdfef6c..d972430 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -9,13 +9,58 @@ struct mpls_entry_decoded {
u8 bos;
};
+struct mpls_pcpu_stats {
+ struct mpls_link_stats stats;
+ struct u64_stats_sync syncp;
+};
+
struct mpls_dev {
- int input_enabled;
+ int input_enabled;
- struct ctl_table_header *sysctl;
- struct rcu_head rcu;
+ struct mpls_pcpu_stats __percpu *stats;
+
+ struct ctl_table_header *sysctl;
+ struct rcu_head rcu;
};
+#if BITS_PER_LONG == 32
+
+#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
+ do { \
+ __typeof__(*(mdev)->stats) *ptr = \
+ raw_cpu_ptr((mdev)->stats); \
+ local_bh_disable(); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->stats.pkts_field++; \
+ ptr->stats.bytes_field += (len); \
+ u64_stats_update_end(&ptr->syncp); \
+ local_bh_enable(); \
+ } while (0)
+
+#define MPLS_INC_STATS(mdev, field) \
+ do { \
+ __typeof__(*(mdev)->stats) *ptr = \
+ raw_cpu_ptr((mdev)->stats); \
+ local_bh_disable(); \
+ u64_stats_update_begin(&ptr->syncp); \
+ ptr->stats.field++; \
+ u64_stats_update_end(&ptr->syncp); \
+ local_bh_enable(); \
+ } while (0)
+
+#else
+
+#define MPLS_INC_STATS_LEN(mdev, len, pkts_field, bytes_field) \
+ do { \
+ this_cpu_inc((mdev)->stats->stats.pkts_field); \
+ this_cpu_add((mdev)->stats->stats.bytes_field, (len)); \
+ } while (0)
+
+#define MPLS_INC_STATS(mdev, field) \
+ this_cpu_inc((mdev)->stats->stats.field)
+
+#endif
+
struct sk_buff;
#define LABEL_NOT_SPECIFIED (1 << 20)
@@ -114,6 +159,11 @@ static inline struct mpls_entry_decoded mpls_entry_decode(struct mpls_shim_hdr *
return result;
}
+static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->mpls_ptr);
+}
+
int nla_put_labels(struct sk_buff *skb, int attrtype, u8 labels,
const u32 label[]);
int nla_get_labels(const struct nlattr *nla, u32 max_labels, u8 *labels,
@@ -123,5 +173,7 @@ int nla_get_via(const struct nlattr *nla, u8 *via_alen, u8 *via_table,
bool mpls_output_possible(const struct net_device *dev);
unsigned int mpls_dev_mtu(const struct net_device *dev);
bool mpls_pkt_too_big(const struct sk_buff *skb, unsigned int mtu);
+void mpls_stats_inc_outucastpkts(struct net_device *dev,
+ const struct sk_buff *skb);
#endif /* MPLS_INTERNAL_H */
diff --git a/net/mpls/mpls_iptunnel.c b/net/mpls/mpls_iptunnel.c
index 2f7ccd9..e4e4424 100644
--- a/net/mpls/mpls_iptunnel.c
+++ b/net/mpls/mpls_iptunnel.c
@@ -48,11 +48,15 @@ static int mpls_xmit(struct sk_buff *skb)
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = NULL;
struct rt6_info *rt6 = NULL;
+ struct mpls_dev *out_mdev;
int err = 0;
bool bos;
int i;
unsigned int ttl;
+ /* Find the output device */
+ out_dev = dst->dev;
+
/* Obtain the ttl */
if (dst->ops->family == AF_INET) {
ttl = ip_hdr(skb)->ttl;
@@ -66,8 +70,6 @@ static int mpls_xmit(struct sk_buff *skb)
skb_orphan(skb);
- /* Find the output device */
- out_dev = dst->dev;
if (!mpls_output_possible(out_dev) ||
!dst->lwtstate || skb_warn_if_lro(skb))
goto drop;
@@ -109,6 +111,8 @@ static int mpls_xmit(struct sk_buff *skb)
bos = false;
}
+ mpls_stats_inc_outucastpkts(out_dev, skb);
+
if (rt)
err = neigh_xmit(NEIGH_ARP_TABLE, out_dev, &rt->rt_gateway,
skb);
@@ -122,11 +126,14 @@ static int mpls_xmit(struct sk_buff *skb)
return LWTUNNEL_XMIT_DONE;
drop:
+ out_mdev = out_dev ? mpls_dev_get(out_dev) : NULL;
+ if (out_mdev)
+ MPLS_INC_STATS(out_mdev, tx_errors);
kfree_skb(skb);
return -EINVAL;
}
-static int mpls_build_state(struct net_device *dev, struct nlattr *nla,
+static int mpls_build_state(struct nlattr *nla,
unsigned int family, const void *cfg,
struct lwtunnel_state **ts)
{
@@ -215,6 +222,7 @@ static const struct lwtunnel_encap_ops mpls_iptun_ops = {
.fill_encap = mpls_fill_encap_info,
.get_encap_size = mpls_encap_nlsize,
.cmp_encap = mpls_encap_cmp,
+ .owner = THIS_MODULE,
};
static int __init mpls_iptunnel_init(void)
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 63729b4..bbc45f8 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -494,7 +494,7 @@ config NFT_CT
depends on NF_CONNTRACK
tristate "Netfilter nf_tables conntrack module"
help
- This option adds the "meta" expression that you can use to match
+ This option adds the "ct" expression that you can use to match
connection tracking information such as the flow state.
config NFT_SET_RBTREE
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 55e0169..8b7416f 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -426,10 +426,9 @@ ip_vs_service_find(struct netns_ipvs *ipvs, int af, __u32 fwmark, __u16 protocol
*/
svc = __ip_vs_service_find(ipvs, af, protocol, vaddr, vport);
- if (svc == NULL
- && protocol == IPPROTO_TCP
- && atomic_read(&ipvs->ftpsvc_counter)
- && (vport == FTPDATA || ntohs(vport) >= PROT_SOCK)) {
+ if (!svc && protocol == IPPROTO_TCP &&
+ atomic_read(&ipvs->ftpsvc_counter) &&
+ (vport == FTPDATA || ntohs(vport) >= inet_prot_sock(ipvs->net))) {
/*
* Check if ftp service entry exists, the packet
* might belong to FTP data connections.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 3a073cd..4e8083c 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -85,11 +85,11 @@ static __read_mostly DEFINE_SPINLOCK(nf_conntrack_locks_all_lock);
static __read_mostly bool nf_conntrack_locks_all;
/* every gc cycle scans at most 1/GC_MAX_BUCKETS_DIV part of table */
-#define GC_MAX_BUCKETS_DIV 64u
-/* upper bound of scan intervals */
-#define GC_INTERVAL_MAX (2 * HZ)
-/* maximum conntracks to evict per gc run */
-#define GC_MAX_EVICTS 256u
+#define GC_MAX_BUCKETS_DIV 128u
+/* upper bound of full table scan */
+#define GC_MAX_SCAN_JIFFIES (16u * HZ)
+/* desired ratio of entries found to be expired */
+#define GC_EVICT_RATIO 50u
static struct conntrack_gc_work conntrack_gc_work;
@@ -938,6 +938,7 @@ static noinline int early_drop(struct net *net, unsigned int _hash)
static void gc_worker(struct work_struct *work)
{
+ unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
unsigned int i, goal, buckets = 0, expired_count = 0;
struct conntrack_gc_work *gc_work;
unsigned int ratio, scanned = 0;
@@ -979,8 +980,7 @@ static void gc_worker(struct work_struct *work)
*/
rcu_read_unlock();
cond_resched_rcu_qs();
- } while (++buckets < goal &&
- expired_count < GC_MAX_EVICTS);
+ } while (++buckets < goal);
if (gc_work->exiting)
return;
@@ -997,27 +997,25 @@ static void gc_worker(struct work_struct *work)
* 1. Minimize time until we notice a stale entry
* 2. Maximize scan intervals to not waste cycles
*
- * Normally, expired_count will be 0, this increases the next_run time
- * to priorize 2) above.
+ * Normally, expire ratio will be close to 0.
*
- * As soon as a timed-out entry is found, move towards 1) and increase
- * the scan frequency.
- * In case we have lots of evictions next scan is done immediately.
+ * As soon as a sizeable fraction of the entries have expired
+ * increase scan frequency.
*/
ratio = scanned ? expired_count * 100 / scanned : 0;
- if (ratio >= 90 || expired_count == GC_MAX_EVICTS) {
- gc_work->next_gc_run = 0;
- next_run = 0;
- } else if (expired_count) {
- gc_work->next_gc_run /= 2U;
- next_run = msecs_to_jiffies(1);
+ if (ratio > GC_EVICT_RATIO) {
+ gc_work->next_gc_run = min_interval;
} else {
- if (gc_work->next_gc_run < GC_INTERVAL_MAX)
- gc_work->next_gc_run += msecs_to_jiffies(1);
+ unsigned int max = GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV;
- next_run = gc_work->next_gc_run;
+ BUILD_BUG_ON((GC_MAX_SCAN_JIFFIES / GC_MAX_BUCKETS_DIV) == 0);
+
+ gc_work->next_gc_run += min_interval;
+ if (gc_work->next_gc_run > max)
+ gc_work->next_gc_run = max;
}
+ next_run = gc_work->next_gc_run;
gc_work->last_bucket = i;
queue_delayed_work(system_long_wq, &gc_work->dwork, next_run);
}
@@ -1025,7 +1023,7 @@ static void gc_worker(struct work_struct *work)
static void conntrack_gc_work_init(struct conntrack_gc_work *gc_work)
{
INIT_DELAYED_WORK(&gc_work->dwork, gc_worker);
- gc_work->next_gc_run = GC_INTERVAL_MAX;
+ gc_work->next_gc_run = HZ;
gc_work->exiting = false;
}
@@ -1917,7 +1915,7 @@ int nf_conntrack_init_start(void)
nf_ct_untracked_status_or(IPS_CONFIRMED | IPS_UNTRACKED);
conntrack_gc_work_init(&conntrack_gc_work);
- queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, GC_INTERVAL_MAX);
+ queue_delayed_work(system_long_wq, &conntrack_gc_work.dwork, HZ);
return 0;
diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
index 3dca90d..ffb9e8a 100644
--- a/net/netfilter/nf_log.c
+++ b/net/netfilter/nf_log.c
@@ -13,7 +13,6 @@
/* Internal logging interface, which relies on the real
LOG target modules */
-#define NF_LOG_PREFIXLEN 128
#define NFLOGGER_NAME_LEN 64
static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 0db5f97..1b91376 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -928,7 +928,8 @@ static struct nft_chain *nf_tables_chain_lookup(const struct nft_table *table,
}
static const struct nla_policy nft_chain_policy[NFTA_CHAIN_MAX + 1] = {
- [NFTA_CHAIN_TABLE] = { .type = NLA_STRING },
+ [NFTA_CHAIN_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_CHAIN_HANDLE] = { .type = NLA_U64 },
[NFTA_CHAIN_NAME] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
@@ -1854,7 +1855,8 @@ static struct nft_rule *nf_tables_rule_lookup(const struct nft_chain *chain,
}
static const struct nla_policy nft_rule_policy[NFTA_RULE_MAX + 1] = {
- [NFTA_RULE_TABLE] = { .type = NLA_STRING },
+ [NFTA_RULE_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_RULE_CHAIN] = { .type = NLA_STRING,
.len = NFT_CHAIN_MAXNAMELEN - 1 },
[NFTA_RULE_HANDLE] = { .type = NLA_U64 },
@@ -2443,7 +2445,8 @@ nft_select_set_ops(const struct nlattr * const nla[],
}
static const struct nla_policy nft_set_policy[NFTA_SET_MAX + 1] = {
- [NFTA_SET_TABLE] = { .type = NLA_STRING },
+ [NFTA_SET_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
[NFTA_SET_NAME] = { .type = NLA_STRING,
.len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_SET_FLAGS] = { .type = NLA_U32 },
@@ -3084,9 +3087,9 @@ static int nf_tables_delset(struct net *net, struct sock *nlsk,
}
static int nf_tables_bind_check_setelem(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
enum nft_registers dreg;
@@ -3192,8 +3195,10 @@ static const struct nla_policy nft_set_elem_policy[NFTA_SET_ELEM_MAX + 1] = {
};
static const struct nla_policy nft_set_elem_list_policy[NFTA_SET_ELEM_LIST_MAX + 1] = {
- [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING },
- [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING },
+ [NFTA_SET_ELEM_LIST_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
+ [NFTA_SET_ELEM_LIST_SET] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_SET_ELEM_LIST_ELEMENTS] = { .type = NLA_NESTED },
[NFTA_SET_ELEM_LIST_SET_ID] = { .type = NLA_U32 },
};
@@ -3303,9 +3308,9 @@ struct nft_set_dump_args {
};
static int nf_tables_dump_setelem(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
struct nft_set_dump_args *args;
@@ -3317,7 +3322,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
{
struct net *net = sock_net(skb->sk);
u8 genmask = nft_genmask_cur(net);
- const struct nft_set *set;
+ struct nft_set *set;
struct nft_set_dump_args args;
struct nft_ctx ctx;
struct nlattr *nla[NFTA_SET_ELEM_LIST_MAX + 1];
@@ -3740,10 +3745,18 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set,
goto err5;
}
+ if (set->size &&
+ !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact)) {
+ err = -ENFILE;
+ goto err6;
+ }
+
nft_trans_elem(trans) = elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
+err6:
+ set->ops->remove(set, &elem);
err5:
kfree(trans);
err4:
@@ -3790,15 +3803,9 @@ static int nf_tables_newsetelem(struct net *net, struct sock *nlsk,
return -EBUSY;
nla_for_each_nested(attr, nla[NFTA_SET_ELEM_LIST_ELEMENTS], rem) {
- if (set->size &&
- !atomic_add_unless(&set->nelems, 1, set->size + set->ndeact))
- return -ENFILE;
-
err = nft_add_set_elem(&ctx, set, attr, nlh->nlmsg_flags);
- if (err < 0) {
- atomic_dec(&set->nelems);
+ if (err < 0)
break;
- }
}
return err;
}
@@ -3883,9 +3890,9 @@ err1:
}
static int nft_flush_set(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
struct nft_trans *trans;
int err;
@@ -3899,9 +3906,10 @@ static int nft_flush_set(const struct nft_ctx *ctx,
err = -ENOENT;
goto err1;
}
+ set->ndeact++;
- nft_trans_elem_set(trans) = (struct nft_set *)set;
- nft_trans_elem(trans) = *((struct nft_set_elem *)elem);
+ nft_trans_elem_set(trans) = set;
+ nft_trans_elem(trans) = *elem;
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
@@ -4032,8 +4040,10 @@ struct nft_object *nf_tables_obj_lookup(const struct nft_table *table,
EXPORT_SYMBOL_GPL(nf_tables_obj_lookup);
static const struct nla_policy nft_obj_policy[NFTA_OBJ_MAX + 1] = {
- [NFTA_OBJ_TABLE] = { .type = NLA_STRING },
- [NFTA_OBJ_NAME] = { .type = NLA_STRING },
+ [NFTA_OBJ_TABLE] = { .type = NLA_STRING,
+ .len = NFT_TABLE_MAXNAMELEN - 1 },
+ [NFTA_OBJ_NAME] = { .type = NLA_STRING,
+ .len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJ_TYPE] = { .type = NLA_U32 },
[NFTA_OBJ_DATA] = { .type = NLA_NESTED },
};
@@ -4262,10 +4272,11 @@ static int nf_tables_dump_obj(struct sk_buff *skb, struct netlink_callback *cb)
if (idx > s_idx)
memset(&cb->args[1], 0,
sizeof(cb->args) - sizeof(cb->args[0]));
- if (filter->table[0] &&
+ if (filter && filter->table[0] &&
strcmp(filter->table, table->name))
goto cont;
- if (filter->type != NFT_OBJECT_UNSPEC &&
+ if (filter &&
+ filter->type != NFT_OBJECT_UNSPEC &&
obj->type->type != filter->type)
goto cont;
@@ -5009,9 +5020,9 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
const struct nft_chain *chain);
static int nf_tables_loop_check_setelem(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
const struct nft_set_iter *iter,
- const struct nft_set_elem *elem)
+ struct nft_set_elem *elem)
{
const struct nft_set_ext *ext = nft_set_elem_ext(set, elem->priv);
const struct nft_data *data;
@@ -5035,7 +5046,7 @@ static int nf_tables_check_loops(const struct nft_ctx *ctx,
{
const struct nft_rule *rule;
const struct nft_expr *expr, *last;
- const struct nft_set *set;
+ struct nft_set *set;
struct nft_set_binding *binding;
struct nft_set_iter iter;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 7de2f46..049ad2d 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -98,7 +98,8 @@ out:
}
static const struct nla_policy nft_dynset_policy[NFTA_DYNSET_MAX + 1] = {
- [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING },
+ [NFTA_DYNSET_SET_NAME] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_DYNSET_SET_ID] = { .type = NLA_U32 },
[NFTA_DYNSET_OP] = { .type = NLA_U32 },
[NFTA_DYNSET_SREG_KEY] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_log.c b/net/netfilter/nft_log.c
index 6271e40..6f6e644 100644
--- a/net/netfilter/nft_log.c
+++ b/net/netfilter/nft_log.c
@@ -39,7 +39,8 @@ static void nft_log_eval(const struct nft_expr *expr,
static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
[NFTA_LOG_GROUP] = { .type = NLA_U16 },
- [NFTA_LOG_PREFIX] = { .type = NLA_STRING },
+ [NFTA_LOG_PREFIX] = { .type = NLA_STRING,
+ .len = NF_LOG_PREFIXLEN - 1 },
[NFTA_LOG_SNAPLEN] = { .type = NLA_U32 },
[NFTA_LOG_QTHRESHOLD] = { .type = NLA_U16 },
[NFTA_LOG_LEVEL] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index d4f97fa..e21aea7 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -49,7 +49,8 @@ static void nft_lookup_eval(const struct nft_expr *expr,
}
static const struct nla_policy nft_lookup_policy[NFTA_LOOKUP_MAX + 1] = {
- [NFTA_LOOKUP_SET] = { .type = NLA_STRING },
+ [NFTA_LOOKUP_SET] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_LOOKUP_SET_ID] = { .type = NLA_U32 },
[NFTA_LOOKUP_SREG] = { .type = NLA_U32 },
[NFTA_LOOKUP_DREG] = { .type = NLA_U32 },
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index 415a65b..1ae8c49 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -193,10 +193,12 @@ nft_objref_select_ops(const struct nft_ctx *ctx,
}
static const struct nla_policy nft_objref_policy[NFTA_OBJREF_MAX + 1] = {
- [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING },
+ [NFTA_OBJREF_IMM_NAME] = { .type = NLA_STRING,
+ .len = NFT_OBJ_MAXNAMELEN - 1 },
[NFTA_OBJREF_IMM_TYPE] = { .type = NLA_U32 },
[NFTA_OBJREF_SET_SREG] = { .type = NLA_U32 },
- [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING },
+ [NFTA_OBJREF_SET_NAME] = { .type = NLA_STRING,
+ .len = NFT_SET_MAXNAMELEN - 1 },
[NFTA_OBJREF_SET_ID] = { .type = NLA_U32 },
};
diff --git a/net/netfilter/nft_set_hash.c b/net/netfilter/nft_set_hash.c
index 1e20e2b..e36069f 100644
--- a/net/netfilter/nft_set_hash.c
+++ b/net/netfilter/nft_set_hash.c
@@ -212,7 +212,7 @@ static void nft_hash_remove(const struct nft_set *set,
rhashtable_remove_fast(&priv->ht, &he->node, nft_hash_params);
}
-static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
+static void nft_hash_walk(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_iter *iter)
{
struct nft_hash *priv = nft_set_priv(set);
diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
index 08376e5..f06f55e 100644
--- a/net/netfilter/nft_set_rbtree.c
+++ b/net/netfilter/nft_set_rbtree.c
@@ -221,7 +221,7 @@ static void *nft_rbtree_deactivate(const struct net *net,
}
static void nft_rbtree_walk(const struct nft_ctx *ctx,
- const struct nft_set *set,
+ struct nft_set *set,
struct nft_set_iter *iter)
{
const struct nft_rbtree *priv = nft_set_priv(set);
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 161b628..7b73c7c 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1210,9 +1210,9 @@ static struct sk_buff *netlink_trim(struct sk_buff *skb, gfp_t allocation)
skb = nskb;
}
- if (!pskb_expand_head(skb, 0, -delta, allocation))
- skb->truesize -= delta;
-
+ pskb_expand_head(skb, 0, -delta,
+ (allocation & ~__GFP_DIRECT_RECLAIM) |
+ __GFP_NOWARN | __GFP_NORETRY);
return skb;
}
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 514f7bc..efa9a88 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -1141,12 +1141,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
struct sw_flow_key *key,
const struct nlattr *attr, int len)
{
- /* Every output action needs a separate clone of 'skb', but the common
- * case is just a single output action, so that doing a clone and
- * then freeing the original skbuff is wasteful. So the following code
- * is slightly obscure just to avoid that.
- */
- int prev_port = -1;
const struct nlattr *a;
int rem;
@@ -1154,20 +1148,28 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
a = nla_next(a, &rem)) {
int err = 0;
- if (unlikely(prev_port != -1)) {
- struct sk_buff *out_skb = skb_clone(skb, GFP_ATOMIC);
-
- if (out_skb)
- do_output(dp, out_skb, prev_port, key);
+ switch (nla_type(a)) {
+ case OVS_ACTION_ATTR_OUTPUT: {
+ int port = nla_get_u32(a);
+ struct sk_buff *clone;
+
+ /* Every output action needs a separate clone
+ * of 'skb', In case the output action is the
+ * last action, cloning can be avoided.
+ */
+ if (nla_is_last(a, rem)) {
+ do_output(dp, skb, port, key);
+ /* 'skb' has been used for output.
+ */
+ return 0;
+ }
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (clone)
+ do_output(dp, clone, port, key);
OVS_CB(skb)->cutlen = 0;
- prev_port = -1;
- }
-
- switch (nla_type(a)) {
- case OVS_ACTION_ATTR_OUTPUT:
- prev_port = nla_get_u32(a);
break;
+ }
case OVS_ACTION_ATTR_TRUNC: {
struct ovs_action_trunc *trunc = nla_data(a);
@@ -1257,11 +1259,7 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
}
}
- if (prev_port != -1)
- do_output(dp, skb, prev_port, key);
- else
- consume_skb(skb);
-
+ consume_skb(skb);
return 0;
}
diff --git a/net/openvswitch/conntrack.c b/net/openvswitch/conntrack.c
index 6b78bab..54253ea 100644
--- a/net/openvswitch/conntrack.c
+++ b/net/openvswitch/conntrack.c
@@ -514,7 +514,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
int hooknum, nh_off, err = NF_ACCEPT;
nh_off = skb_network_offset(skb);
- skb_pull(skb, nh_off);
+ skb_pull_rcsum(skb, nh_off);
/* See HOOK2MANIP(). */
if (maniptype == NF_NAT_MANIP_SRC)
@@ -579,6 +579,7 @@ static int ovs_ct_nat_execute(struct sk_buff *skb, struct nf_conn *ct,
err = nf_nat_packet(ct, ctinfo, hooknum, skb);
push:
skb_push(skb, nh_off);
+ skb_postpush_rcsum(skb, skb->data, nh_off);
return err;
}
@@ -886,7 +887,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
/* The conntrack module expects to be working at L3. */
nh_ofs = skb_network_offset(skb);
- skb_pull(skb, nh_ofs);
+ skb_pull_rcsum(skb, nh_ofs);
if (key->ip.frag != OVS_FRAG_TYPE_NONE) {
err = handle_fragments(net, key, info->zone.id, skb);
@@ -900,6 +901,7 @@ int ovs_ct_execute(struct net *net, struct sk_buff *skb,
err = ovs_ct_lookup(net, key, info, skb);
skb_push(skb, nh_ofs);
+ skb_postpush_rcsum(skb, skb->data, nh_ofs);
if (err)
kfree_skb(skb);
return err;
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index ddbda25..9854baa 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1984,7 +1984,7 @@ static int packet_rcv_vnet(struct msghdr *msg, const struct sk_buff *skb,
return -EINVAL;
*len -= sizeof(vnet_hdr);
- if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le()))
+ if (virtio_net_hdr_from_skb(skb, &vnet_hdr, vio_le(), true))
return -EINVAL;
return memcpy_to_msg(msg, (void *)&vnet_hdr, sizeof(vnet_hdr));
@@ -2245,7 +2245,7 @@ static int tpacket_rcv(struct sk_buff *skb, struct net_device *dev,
if (po->has_vnet_hdr) {
if (virtio_net_hdr_from_skb(skb, h.raw + macoff -
sizeof(struct virtio_net_hdr),
- vio_le())) {
+ vio_le(), true)) {
spin_lock(&sk->sk_receive_queue.lock);
goto drop_n_account;
}
diff --git a/net/psample/Kconfig b/net/psample/Kconfig
new file mode 100644
index 0000000..d850246
--- /dev/null
+++ b/net/psample/Kconfig
@@ -0,0 +1,15 @@
+#
+# psample packet sampling configuration
+#
+
+menuconfig PSAMPLE
+ depends on NET
+ tristate "Packet-sampling netlink channel"
+ default n
+ help
+ Say Y here to add support for packet-sampling netlink channel
+ This netlink channel allows transferring packets alongside some
+ metadata to userspace.
+
+ To compile this support as a module, choose M here: the module will
+ be called psample.
diff --git a/net/psample/Makefile b/net/psample/Makefile
new file mode 100644
index 0000000..609b0a7
--- /dev/null
+++ b/net/psample/Makefile
@@ -0,0 +1,5 @@
+#
+# Makefile for the psample netlink channel
+#
+
+obj-$(CONFIG_PSAMPLE) += psample.o
diff --git a/net/psample/psample.c b/net/psample/psample.c
new file mode 100644
index 0000000..8aa58a9
--- /dev/null
+++ b/net/psample/psample.c
@@ -0,0 +1,301 @@
+/*
+ * net/psample/psample.c - Netlink channel for packet sampling
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/skbuff.h>
+#include <linux/module.h>
+#include <net/net_namespace.h>
+#include <net/sock.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <net/psample.h>
+#include <linux/spinlock.h>
+
+#define PSAMPLE_MAX_PACKET_SIZE 0xffff
+
+static LIST_HEAD(psample_groups_list);
+static DEFINE_SPINLOCK(psample_groups_lock);
+
+/* multicast groups */
+enum psample_nl_multicast_groups {
+ PSAMPLE_NL_MCGRP_CONFIG,
+ PSAMPLE_NL_MCGRP_SAMPLE,
+};
+
+static const struct genl_multicast_group psample_nl_mcgrps[] = {
+ [PSAMPLE_NL_MCGRP_CONFIG] = { .name = PSAMPLE_NL_MCGRP_CONFIG_NAME },
+ [PSAMPLE_NL_MCGRP_SAMPLE] = { .name = PSAMPLE_NL_MCGRP_SAMPLE_NAME },
+};
+
+static struct genl_family psample_nl_family __ro_after_init;
+
+static int psample_group_nl_fill(struct sk_buff *msg,
+ struct psample_group *group,
+ enum psample_command cmd, u32 portid, u32 seq,
+ int flags)
+{
+ void *hdr;
+ int ret;
+
+ hdr = genlmsg_put(msg, portid, seq, &psample_nl_family, flags, cmd);
+ if (!hdr)
+ return -EMSGSIZE;
+
+ ret = nla_put_u32(msg, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
+ if (ret < 0)
+ goto error;
+
+ ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_REFCOUNT, group->refcount);
+ if (ret < 0)
+ goto error;
+
+ ret = nla_put_u32(msg, PSAMPLE_ATTR_GROUP_SEQ, group->seq);
+ if (ret < 0)
+ goto error;
+
+ genlmsg_end(msg, hdr);
+ return 0;
+
+error:
+ genlmsg_cancel(msg, hdr);
+ return -EMSGSIZE;
+}
+
+static int psample_nl_cmd_get_group_dumpit(struct sk_buff *msg,
+ struct netlink_callback *cb)
+{
+ struct psample_group *group;
+ int start = cb->args[0];
+ int idx = 0;
+ int err;
+
+ spin_lock(&psample_groups_lock);
+ list_for_each_entry(group, &psample_groups_list, list) {
+ if (!net_eq(group->net, sock_net(msg->sk)))
+ continue;
+ if (idx < start) {
+ idx++;
+ continue;
+ }
+ err = psample_group_nl_fill(msg, group, PSAMPLE_CMD_NEW_GROUP,
+ NETLINK_CB(cb->skb).portid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI);
+ if (err)
+ break;
+ idx++;
+ }
+
+ spin_unlock(&psample_groups_lock);
+ cb->args[0] = idx;
+ return msg->len;
+}
+
+static const struct genl_ops psample_nl_ops[] = {
+ {
+ .cmd = PSAMPLE_CMD_GET_GROUP,
+ .dumpit = psample_nl_cmd_get_group_dumpit,
+ /* can be retrieved by unprivileged users */
+ }
+};
+
+static struct genl_family psample_nl_family __ro_after_init = {
+ .name = PSAMPLE_GENL_NAME,
+ .version = PSAMPLE_GENL_VERSION,
+ .maxattr = PSAMPLE_ATTR_MAX,
+ .netnsok = true,
+ .module = THIS_MODULE,
+ .mcgrps = psample_nl_mcgrps,
+ .ops = psample_nl_ops,
+ .n_ops = ARRAY_SIZE(psample_nl_ops),
+ .n_mcgrps = ARRAY_SIZE(psample_nl_mcgrps),
+};
+
+static void psample_group_notify(struct psample_group *group,
+ enum psample_command cmd)
+{
+ struct sk_buff *msg;
+ int err;
+
+ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC);
+ if (!msg)
+ return;
+
+ err = psample_group_nl_fill(msg, group, cmd, 0, 0, NLM_F_MULTI);
+ if (!err)
+ genlmsg_multicast_netns(&psample_nl_family, group->net, msg, 0,
+ PSAMPLE_NL_MCGRP_CONFIG, GFP_ATOMIC);
+ else
+ nlmsg_free(msg);
+}
+
+static struct psample_group *psample_group_create(struct net *net,
+ u32 group_num)
+{
+ struct psample_group *group;
+
+ group = kzalloc(sizeof(*group), GFP_ATOMIC);
+ if (!group)
+ return NULL;
+
+ group->net = net;
+ group->group_num = group_num;
+ list_add_tail(&group->list, &psample_groups_list);
+
+ psample_group_notify(group, PSAMPLE_CMD_NEW_GROUP);
+ return group;
+}
+
+static void psample_group_destroy(struct psample_group *group)
+{
+ psample_group_notify(group, PSAMPLE_CMD_DEL_GROUP);
+ list_del(&group->list);
+ kfree(group);
+}
+
+static struct psample_group *
+psample_group_lookup(struct net *net, u32 group_num)
+{
+ struct psample_group *group;
+
+ list_for_each_entry(group, &psample_groups_list, list)
+ if ((group->group_num == group_num) && (group->net == net))
+ return group;
+ return NULL;
+}
+
+struct psample_group *psample_group_get(struct net *net, u32 group_num)
+{
+ struct psample_group *group;
+
+ spin_lock(&psample_groups_lock);
+
+ group = psample_group_lookup(net, group_num);
+ if (!group) {
+ group = psample_group_create(net, group_num);
+ if (!group)
+ goto out;
+ }
+ group->refcount++;
+
+out:
+ spin_unlock(&psample_groups_lock);
+ return group;
+}
+EXPORT_SYMBOL_GPL(psample_group_get);
+
+void psample_group_put(struct psample_group *group)
+{
+ spin_lock(&psample_groups_lock);
+
+ if (--group->refcount == 0)
+ psample_group_destroy(group);
+
+ spin_unlock(&psample_groups_lock);
+}
+EXPORT_SYMBOL_GPL(psample_group_put);
+
+void psample_sample_packet(struct psample_group *group, struct sk_buff *skb,
+ u32 trunc_size, int in_ifindex, int out_ifindex,
+ u32 sample_rate)
+{
+ struct sk_buff *nl_skb;
+ int data_len;
+ int meta_len;
+ void *data;
+ int ret;
+
+ meta_len = (in_ifindex ? nla_total_size(sizeof(u16)) : 0) +
+ (out_ifindex ? nla_total_size(sizeof(u16)) : 0) +
+ nla_total_size(sizeof(u32)) + /* sample_rate */
+ nla_total_size(sizeof(u32)) + /* orig_size */
+ nla_total_size(sizeof(u32)) + /* group_num */
+ nla_total_size(sizeof(u32)); /* seq */
+
+ data_len = min(skb->len, trunc_size);
+ if (meta_len + nla_total_size(data_len) > PSAMPLE_MAX_PACKET_SIZE)
+ data_len = PSAMPLE_MAX_PACKET_SIZE - meta_len - NLA_HDRLEN
+ - NLA_ALIGNTO;
+
+ nl_skb = genlmsg_new(meta_len + data_len, GFP_ATOMIC);
+ if (unlikely(!nl_skb))
+ return;
+
+ data = genlmsg_put(nl_skb, 0, 0, &psample_nl_family, 0,
+ PSAMPLE_CMD_SAMPLE);
+ if (unlikely(!data))
+ goto error;
+
+ if (in_ifindex) {
+ ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_IIFINDEX, in_ifindex);
+ if (unlikely(ret < 0))
+ goto error;
+ }
+
+ if (out_ifindex) {
+ ret = nla_put_u16(nl_skb, PSAMPLE_ATTR_OIFINDEX, out_ifindex);
+ if (unlikely(ret < 0))
+ goto error;
+ }
+
+ ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_RATE, sample_rate);
+ if (unlikely(ret < 0))
+ goto error;
+
+ ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_ORIGSIZE, skb->len);
+ if (unlikely(ret < 0))
+ goto error;
+
+ ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_SAMPLE_GROUP, group->group_num);
+ if (unlikely(ret < 0))
+ goto error;
+
+ ret = nla_put_u32(nl_skb, PSAMPLE_ATTR_GROUP_SEQ, group->seq++);
+ if (unlikely(ret < 0))
+ goto error;
+
+ if (data_len) {
+ int nla_len = nla_total_size(data_len);
+ struct nlattr *nla;
+
+ nla = (struct nlattr *)skb_put(nl_skb, nla_len);
+ nla->nla_type = PSAMPLE_ATTR_DATA;
+ nla->nla_len = nla_attr_size(data_len);
+
+ if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
+ goto error;
+ }
+
+ genlmsg_end(nl_skb, data);
+ genlmsg_multicast_netns(&psample_nl_family, group->net, nl_skb, 0,
+ PSAMPLE_NL_MCGRP_SAMPLE, GFP_ATOMIC);
+
+ return;
+error:
+ pr_err_ratelimited("Could not create psample log message\n");
+ nlmsg_free(nl_skb);
+}
+EXPORT_SYMBOL_GPL(psample_sample_packet);
+
+static int __init psample_module_init(void)
+{
+ return genl_register_family(&psample_nl_family);
+}
+
+static void __exit psample_module_exit(void)
+{
+ genl_unregister_family(&psample_nl_family);
+}
+
+module_init(psample_module_init);
+module_exit(psample_module_exit);
+
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("netlink channel for packet sampling");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index c985ecb..ae5ac17 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -252,7 +252,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
const int pkt_len = 20;
struct qrtr_hdr *hdr;
struct sk_buff *skb;
- u32 *buf;
+ __le32 *buf;
skb = alloc_skb(QRTR_HDR_SIZE + pkt_len, GFP_KERNEL);
if (!skb)
@@ -269,7 +269,7 @@ static struct sk_buff *qrtr_alloc_resume_tx(u32 src_node,
hdr->dst_node_id = cpu_to_le32(dst_node);
hdr->dst_port_id = cpu_to_le32(QRTR_PORT_CTRL);
- buf = (u32 *)skb_put(skb, pkt_len);
+ buf = (__le32 *)skb_put(skb, pkt_len);
memset(buf, 0, pkt_len);
buf[0] = cpu_to_le32(QRTR_TYPE_RESUME_TX);
buf[1] = cpu_to_le32(src_node);
diff --git a/net/rfkill/core.c b/net/rfkill/core.c
index 884027f..2064c3a 100644
--- a/net/rfkill/core.c
+++ b/net/rfkill/core.c
@@ -176,6 +176,50 @@ static void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
led_trigger_unregister(&rfkill->led_trigger);
}
+
+static struct led_trigger rfkill_any_led_trigger;
+static struct work_struct rfkill_any_work;
+
+static void rfkill_any_led_trigger_worker(struct work_struct *work)
+{
+ enum led_brightness brightness = LED_OFF;
+ struct rfkill *rfkill;
+
+ mutex_lock(&rfkill_global_mutex);
+ list_for_each_entry(rfkill, &rfkill_list, node) {
+ if (!(rfkill->state & RFKILL_BLOCK_ANY)) {
+ brightness = LED_FULL;
+ break;
+ }
+ }
+ mutex_unlock(&rfkill_global_mutex);
+
+ led_trigger_event(&rfkill_any_led_trigger, brightness);
+}
+
+static void rfkill_any_led_trigger_event(void)
+{
+ schedule_work(&rfkill_any_work);
+}
+
+static void rfkill_any_led_trigger_activate(struct led_classdev *led_cdev)
+{
+ rfkill_any_led_trigger_event();
+}
+
+static int rfkill_any_led_trigger_register(void)
+{
+ INIT_WORK(&rfkill_any_work, rfkill_any_led_trigger_worker);
+ rfkill_any_led_trigger.name = "rfkill-any";
+ rfkill_any_led_trigger.activate = rfkill_any_led_trigger_activate;
+ return led_trigger_register(&rfkill_any_led_trigger);
+}
+
+static void rfkill_any_led_trigger_unregister(void)
+{
+ led_trigger_unregister(&rfkill_any_led_trigger);
+ cancel_work_sync(&rfkill_any_work);
+}
#else
static void rfkill_led_trigger_event(struct rfkill *rfkill)
{
@@ -189,6 +233,19 @@ static inline int rfkill_led_trigger_register(struct rfkill *rfkill)
static inline void rfkill_led_trigger_unregister(struct rfkill *rfkill)
{
}
+
+static void rfkill_any_led_trigger_event(void)
+{
+}
+
+static int rfkill_any_led_trigger_register(void)
+{
+ return 0;
+}
+
+static void rfkill_any_led_trigger_unregister(void)
+{
+}
#endif /* CONFIG_RFKILL_LEDS */
static void rfkill_fill_event(struct rfkill_event *ev, struct rfkill *rfkill,
@@ -297,6 +354,7 @@ static void rfkill_set_block(struct rfkill *rfkill, bool blocked)
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
+ rfkill_any_led_trigger_event();
if (prev != curr)
rfkill_event(rfkill);
@@ -477,11 +535,9 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked)
spin_unlock_irqrestore(&rfkill->lock, flags);
rfkill_led_trigger_event(rfkill);
+ rfkill_any_led_trigger_event();
- if (!rfkill->registered)
- return ret;
-
- if (prev != blocked)
+ if (rfkill->registered && prev != blocked)
schedule_work(&rfkill->uevent_work);
return ret;
@@ -523,6 +579,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked)
schedule_work(&rfkill->uevent_work);
rfkill_led_trigger_event(rfkill);
+ rfkill_any_led_trigger_event();
return blocked;
}
@@ -572,6 +629,7 @@ void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw)
schedule_work(&rfkill->uevent_work);
rfkill_led_trigger_event(rfkill);
+ rfkill_any_led_trigger_event();
}
}
EXPORT_SYMBOL(rfkill_set_states);
@@ -988,6 +1046,7 @@ int __must_check rfkill_register(struct rfkill *rfkill)
#endif
}
+ rfkill_any_led_trigger_event();
rfkill_send_events(rfkill, RFKILL_OP_ADD);
mutex_unlock(&rfkill_global_mutex);
@@ -1020,6 +1079,7 @@ void rfkill_unregister(struct rfkill *rfkill)
mutex_lock(&rfkill_global_mutex);
rfkill_send_events(rfkill, RFKILL_OP_DEL);
list_del_init(&rfkill->node);
+ rfkill_any_led_trigger_event();
mutex_unlock(&rfkill_global_mutex);
rfkill_led_trigger_unregister(rfkill);
@@ -1266,24 +1326,33 @@ static int __init rfkill_init(void)
error = class_register(&rfkill_class);
if (error)
- goto out;
+ goto error_class;
error = misc_register(&rfkill_miscdev);
- if (error) {
- class_unregister(&rfkill_class);
- goto out;
- }
+ if (error)
+ goto error_misc;
+
+ error = rfkill_any_led_trigger_register();
+ if (error)
+ goto error_led_trigger;
#ifdef CONFIG_RFKILL_INPUT
error = rfkill_handler_init();
- if (error) {
- misc_deregister(&rfkill_miscdev);
- class_unregister(&rfkill_class);
- goto out;
- }
+ if (error)
+ goto error_input;
#endif
- out:
+ return 0;
+
+#ifdef CONFIG_RFKILL_INPUT
+error_input:
+ rfkill_any_led_trigger_unregister();
+#endif
+error_led_trigger:
+ misc_deregister(&rfkill_miscdev);
+error_misc:
+ class_unregister(&rfkill_class);
+error_class:
return error;
}
subsys_initcall(rfkill_init);
@@ -1293,6 +1362,7 @@ static void __exit rfkill_exit(void)
#ifdef CONFIG_RFKILL_INPUT
rfkill_handler_exit();
#endif
+ rfkill_any_led_trigger_unregister();
misc_deregister(&rfkill_miscdev);
class_unregister(&rfkill_class);
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index a9aa38d..72cfa3a 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -650,6 +650,18 @@ config NET_ACT_MIRRED
To compile this code as a module, choose M here: the
module will be called act_mirred.
+config NET_ACT_SAMPLE
+ tristate "Traffic Sampling"
+ depends on NET_CLS_ACT
+ select PSAMPLE
+ ---help---
+ Say Y here to allow packet sampling tc action. The packet sample
+ action consists of statistically choosing packets and sampling
+ them using the psample module.
+
+ To compile this code as a module, choose M here: the
+ module will be called act_sample.
+
config NET_ACT_IPT
tristate "IPtables targets"
depends on NET_CLS_ACT && NETFILTER && IP_NF_IPTABLES
diff --git a/net/sched/Makefile b/net/sched/Makefile
index 4bdda36..7b915d2 100644
--- a/net/sched/Makefile
+++ b/net/sched/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_NET_CLS_ACT) += act_api.o
obj-$(CONFIG_NET_ACT_POLICE) += act_police.o
obj-$(CONFIG_NET_ACT_GACT) += act_gact.o
obj-$(CONFIG_NET_ACT_MIRRED) += act_mirred.o
+obj-$(CONFIG_NET_ACT_SAMPLE) += act_sample.o
obj-$(CONFIG_NET_ACT_IPT) += act_ipt.o
obj-$(CONFIG_NET_ACT_NAT) += act_nat.o
obj-$(CONFIG_NET_ACT_PEDIT) += act_pedit.o
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index f04715a..3c5e29b 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -24,6 +24,7 @@
#include <net/net_namespace.h>
#include <net/sock.h>
#include <net/sch_generic.h>
+#include <net/pkt_cls.h>
#include <net/act_api.h>
#include <net/netlink.h>
@@ -33,6 +34,12 @@ static void free_tcf(struct rcu_head *head)
free_percpu(p->cpu_bstats);
free_percpu(p->cpu_qstats);
+
+ if (p->act_cookie) {
+ kfree(p->act_cookie->data);
+ kfree(p->act_cookie);
+ }
+
kfree(p);
}
@@ -475,6 +482,12 @@ tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int bind, int ref)
goto nla_put_failure;
if (tcf_action_copy_stats(skb, a, 0))
goto nla_put_failure;
+ if (a->act_cookie) {
+ if (nla_put(skb, TCA_ACT_COOKIE, a->act_cookie->len,
+ a->act_cookie->data))
+ goto nla_put_failure;
+ }
+
nest = nla_nest_start(skb, TCA_OPTIONS);
if (nest == NULL)
goto nla_put_failure;
@@ -516,6 +529,22 @@ errout:
return err;
}
+int nla_memdup_cookie(struct tc_action *a, struct nlattr **tb)
+{
+ a->act_cookie = kzalloc(sizeof(*a->act_cookie), GFP_KERNEL);
+ if (!a->act_cookie)
+ return -ENOMEM;
+
+ a->act_cookie->data = nla_memdup(tb[TCA_ACT_COOKIE], GFP_KERNEL);
+ if (!a->act_cookie->data) {
+ kfree(a->act_cookie);
+ return -ENOMEM;
+ }
+ a->act_cookie->len = nla_len(tb[TCA_ACT_COOKIE]);
+
+ return 0;
+}
+
struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
struct nlattr *est, char *name, int ovr,
int bind)
@@ -575,6 +604,22 @@ struct tc_action *tcf_action_init_1(struct net *net, struct nlattr *nla,
if (err < 0)
goto err_mod;
+ if (tb[TCA_ACT_COOKIE]) {
+ int cklen = nla_len(tb[TCA_ACT_COOKIE]);
+
+ if (cklen > TC_COOKIE_MAX_SIZE) {
+ err = -EINVAL;
+ tcf_hash_release(a, bind);
+ goto err_mod;
+ }
+
+ err = nla_memdup_cookie(a, tb);
+ if (err < 0) {
+ tcf_hash_release(a, bind);
+ goto err_mod;
+ }
+ }
+
/* module count goes up only when brand new policy is created
* if it exists and is only bound to in a_o->init() then
* ACT_P_CREATED is not returned (a zero is).
@@ -897,8 +942,6 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
goto err;
}
act->order = i;
- if (event == RTM_GETACTION)
- act->tcfa_refcnt++;
list_add_tail(&act->list, &actions);
}
@@ -911,7 +954,8 @@ tca_action_gd(struct net *net, struct nlattr *nla, struct nlmsghdr *n,
return ret;
}
err:
- tcf_action_destroy(&actions, 0);
+ if (event != RTM_GETACTION)
+ tcf_action_destroy(&actions, 0);
return ret;
}
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1c60317..520baa41 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -123,12 +123,11 @@ static int tcf_bpf_dump_ebpf_info(const struct tcf_bpf *prog,
nla_put_string(skb, TCA_ACT_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
- nla = nla_reserve(skb, TCA_ACT_BPF_DIGEST,
- sizeof(prog->filter->digest));
+ nla = nla_reserve(skb, TCA_ACT_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
- memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+ memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}
diff --git a/net/sched/act_sample.c b/net/sched/act_sample.c
new file mode 100644
index 0000000..3922975
--- /dev/null
+++ b/net/sched/act_sample.c
@@ -0,0 +1,274 @@
+/*
+ * net/sched/act_sample.c - Packet sampling tc action
+ * Copyright (c) 2017 Yotam Gigi <yotamg@mellanox.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/skbuff.h>
+#include <linux/rtnetlink.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/gfp.h>
+#include <net/net_namespace.h>
+#include <net/netlink.h>
+#include <net/pkt_sched.h>
+#include <linux/tc_act/tc_sample.h>
+#include <net/tc_act/tc_sample.h>
+#include <net/psample.h>
+
+#include <linux/if_arp.h>
+
+#define SAMPLE_TAB_MASK 7
+static unsigned int sample_net_id;
+static struct tc_action_ops act_sample_ops;
+
+static const struct nla_policy sample_policy[TCA_SAMPLE_MAX + 1] = {
+ [TCA_SAMPLE_PARMS] = { .len = sizeof(struct tc_sample) },
+ [TCA_SAMPLE_RATE] = { .type = NLA_U32 },
+ [TCA_SAMPLE_TRUNC_SIZE] = { .type = NLA_U32 },
+ [TCA_SAMPLE_PSAMPLE_GROUP] = { .type = NLA_U32 },
+};
+
+static int tcf_sample_init(struct net *net, struct nlattr *nla,
+ struct nlattr *est, struct tc_action **a, int ovr,
+ int bind)
+{
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+ struct nlattr *tb[TCA_SAMPLE_MAX + 1];
+ struct psample_group *psample_group;
+ struct tc_sample *parm;
+ struct tcf_sample *s;
+ bool exists = false;
+ int ret;
+
+ if (!nla)
+ return -EINVAL;
+ ret = nla_parse_nested(tb, TCA_SAMPLE_MAX, nla, sample_policy);
+ if (ret < 0)
+ return ret;
+ if (!tb[TCA_SAMPLE_PARMS] || !tb[TCA_SAMPLE_RATE] ||
+ !tb[TCA_SAMPLE_PSAMPLE_GROUP])
+ return -EINVAL;
+
+ parm = nla_data(tb[TCA_SAMPLE_PARMS]);
+
+ exists = tcf_hash_check(tn, parm->index, a, bind);
+ if (exists && bind)
+ return 0;
+
+ if (!exists) {
+ ret = tcf_hash_create(tn, parm->index, est, a,
+ &act_sample_ops, bind, false);
+ if (ret)
+ return ret;
+ ret = ACT_P_CREATED;
+ } else {
+ tcf_hash_release(*a, bind);
+ if (!ovr)
+ return -EEXIST;
+ }
+ s = to_sample(*a);
+
+ ASSERT_RTNL();
+ s->tcf_action = parm->action;
+ s->rate = nla_get_u32(tb[TCA_SAMPLE_RATE]);
+ s->psample_group_num = nla_get_u32(tb[TCA_SAMPLE_PSAMPLE_GROUP]);
+ psample_group = psample_group_get(net, s->psample_group_num);
+ if (!psample_group)
+ return -ENOMEM;
+ RCU_INIT_POINTER(s->psample_group, psample_group);
+
+ if (tb[TCA_SAMPLE_TRUNC_SIZE]) {
+ s->truncate = true;
+ s->trunc_size = nla_get_u32(tb[TCA_SAMPLE_TRUNC_SIZE]);
+ }
+
+ if (ret == ACT_P_CREATED)
+ tcf_hash_insert(tn, *a);
+ return ret;
+}
+
+static void tcf_sample_cleanup_rcu(struct rcu_head *rcu)
+{
+ struct tcf_sample *s = container_of(rcu, struct tcf_sample, rcu);
+ struct psample_group *psample_group;
+
+ psample_group = rcu_dereference_protected(s->psample_group, 1);
+ RCU_INIT_POINTER(s->psample_group, NULL);
+ psample_group_put(psample_group);
+}
+
+static void tcf_sample_cleanup(struct tc_action *a, int bind)
+{
+ struct tcf_sample *s = to_sample(a);
+
+ call_rcu(&s->rcu, tcf_sample_cleanup_rcu);
+}
+
+static bool tcf_sample_dev_ok_push(struct net_device *dev)
+{
+ switch (dev->type) {
+ case ARPHRD_TUNNEL:
+ case ARPHRD_TUNNEL6:
+ case ARPHRD_SIT:
+ case ARPHRD_IPGRE:
+ case ARPHRD_VOID:
+ case ARPHRD_NONE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static int tcf_sample_act(struct sk_buff *skb, const struct tc_action *a,
+ struct tcf_result *res)
+{
+ struct tcf_sample *s = to_sample(a);
+ struct psample_group *psample_group;
+ int retval;
+ int size;
+ int iif;
+ int oif;
+
+ tcf_lastuse_update(&s->tcf_tm);
+ bstats_cpu_update(this_cpu_ptr(s->common.cpu_bstats), skb);
+ retval = READ_ONCE(s->tcf_action);
+
+ rcu_read_lock();
+ psample_group = rcu_dereference(s->psample_group);
+
+ /* randomly sample packets according to rate */
+ if (psample_group && (prandom_u32() % s->rate == 0)) {
+ if (!skb_at_tc_ingress(skb)) {
+ iif = skb->skb_iif;
+ oif = skb->dev->ifindex;
+ } else {
+ iif = skb->dev->ifindex;
+ oif = 0;
+ }
+
+ /* on ingress, the mac header gets popped, so push it back */
+ if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
+ skb_push(skb, skb->mac_len);
+
+ size = s->truncate ? s->trunc_size : skb->len;
+ psample_sample_packet(psample_group, skb, size, iif, oif,
+ s->rate);
+
+ if (skb_at_tc_ingress(skb) && tcf_sample_dev_ok_push(skb->dev))
+ skb_pull(skb, skb->mac_len);
+ }
+
+ rcu_read_unlock();
+ return retval;
+}
+
+static int tcf_sample_dump(struct sk_buff *skb, struct tc_action *a,
+ int bind, int ref)
+{
+ unsigned char *b = skb_tail_pointer(skb);
+ struct tcf_sample *s = to_sample(a);
+ struct tc_sample opt = {
+ .index = s->tcf_index,
+ .action = s->tcf_action,
+ .refcnt = s->tcf_refcnt - ref,
+ .bindcnt = s->tcf_bindcnt - bind,
+ };
+ struct tcf_t t;
+
+ if (nla_put(skb, TCA_SAMPLE_PARMS, sizeof(opt), &opt))
+ goto nla_put_failure;
+
+ tcf_tm_dump(&t, &s->tcf_tm);
+ if (nla_put_64bit(skb, TCA_SAMPLE_TM, sizeof(t), &t, TCA_SAMPLE_PAD))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_SAMPLE_RATE, s->rate))
+ goto nla_put_failure;
+
+ if (s->truncate)
+ if (nla_put_u32(skb, TCA_SAMPLE_TRUNC_SIZE, s->trunc_size))
+ goto nla_put_failure;
+
+ if (nla_put_u32(skb, TCA_SAMPLE_PSAMPLE_GROUP, s->psample_group_num))
+ goto nla_put_failure;
+ return skb->len;
+
+nla_put_failure:
+ nlmsg_trim(skb, b);
+ return -1;
+}
+
+static int tcf_sample_walker(struct net *net, struct sk_buff *skb,
+ struct netlink_callback *cb, int type,
+ const struct tc_action_ops *ops)
+{
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+ return tcf_generic_walker(tn, skb, cb, type, ops);
+}
+
+static int tcf_sample_search(struct net *net, struct tc_action **a, u32 index)
+{
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+ return tcf_hash_search(tn, a, index);
+}
+
+static struct tc_action_ops act_sample_ops = {
+ .kind = "sample",
+ .type = TCA_ACT_SAMPLE,
+ .owner = THIS_MODULE,
+ .act = tcf_sample_act,
+ .dump = tcf_sample_dump,
+ .init = tcf_sample_init,
+ .cleanup = tcf_sample_cleanup,
+ .walk = tcf_sample_walker,
+ .lookup = tcf_sample_search,
+ .size = sizeof(struct tcf_sample),
+};
+
+static __net_init int sample_init_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+ return tc_action_net_init(tn, &act_sample_ops, SAMPLE_TAB_MASK);
+}
+
+static void __net_exit sample_exit_net(struct net *net)
+{
+ struct tc_action_net *tn = net_generic(net, sample_net_id);
+
+ tc_action_net_exit(tn);
+}
+
+static struct pernet_operations sample_net_ops = {
+ .init = sample_init_net,
+ .exit = sample_exit_net,
+ .id = &sample_net_id,
+ .size = sizeof(struct tc_action_net),
+};
+
+static int __init sample_init_module(void)
+{
+ return tcf_register_action(&act_sample_ops, &sample_net_ops);
+}
+
+static void __exit sample_cleanup_module(void)
+{
+ tcf_unregister_action(&act_sample_ops, &sample_net_ops);
+}
+
+module_init(sample_init_module);
+module_exit(sample_cleanup_module);
+
+MODULE_AUTHOR("Yotam Gigi <yotamg@mellanox.com>");
+MODULE_DESCRIPTION("Packet sampling action");
+MODULE_LICENSE("GPL v2");
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index adc7760..d9c9701 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -555,11 +555,11 @@ static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
return -EMSGSIZE;
- nla = nla_reserve(skb, TCA_BPF_DIGEST, sizeof(prog->filter->digest));
+ nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
if (nla == NULL)
return -EMSGSIZE;
- memcpy(nla_data(nla), prog->filter->digest, nla_len(nla));
+ memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
return 0;
}
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 970db7a..9e74b0f 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -40,6 +40,7 @@ struct fl_flow_key {
};
struct flow_dissector_key_ports tp;
struct flow_dissector_key_icmp icmp;
+ struct flow_dissector_key_arp arp;
struct flow_dissector_key_keyid enc_key_id;
union {
struct flow_dissector_key_ipv4_addrs enc_ipv4;
@@ -133,6 +134,14 @@ static void fl_clear_masked_range(struct fl_flow_key *key,
memset(fl_key_get_start(key, mask), 0, fl_mask_range(mask));
}
+static struct cls_fl_filter *fl_lookup(struct cls_fl_head *head,
+ struct fl_flow_key *mkey)
+{
+ return rhashtable_lookup_fast(&head->ht,
+ fl_key_get_start(mkey, &head->mask),
+ head->ht_params);
+}
+
static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct tcf_result *res)
{
@@ -180,9 +189,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
fl_set_masked_key(&skb_mkey, &skb_key, &head->mask);
- f = rhashtable_lookup_fast(&head->ht,
- fl_key_get_start(&skb_mkey, &head->mask),
- head->ht_params);
+ f = fl_lookup(head, &skb_mkey);
if (f && !tc_skip_sw(f->flags)) {
*res = f->res;
return tcf_exts_exec(skb, &f->exts, res);
@@ -401,6 +408,16 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
[TCA_FLOWER_KEY_ICMPV6_TYPE_MASK] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ICMPV6_CODE] = { .type = NLA_U8 },
[TCA_FLOWER_KEY_ICMPV6_CODE_MASK] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ARP_SIP] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ARP_SIP_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ARP_TIP] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ARP_TIP_MASK] = { .type = NLA_U32 },
+ [TCA_FLOWER_KEY_ARP_OP] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ARP_OP_MASK] = { .type = NLA_U8 },
+ [TCA_FLOWER_KEY_ARP_SHA] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ARP_SHA_MASK] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ARP_THA] = { .len = ETH_ALEN },
+ [TCA_FLOWER_KEY_ARP_THA_MASK] = { .len = ETH_ALEN },
};
static void fl_set_key_val(struct nlattr **tb,
@@ -572,6 +589,23 @@ static int fl_set_key(struct net *net, struct nlattr **tb,
&mask->icmp.code,
TCA_FLOWER_KEY_ICMPV4_CODE_MASK,
sizeof(key->icmp.code));
+ } else if (key->basic.n_proto == htons(ETH_P_ARP) ||
+ key->basic.n_proto == htons(ETH_P_RARP)) {
+ fl_set_key_val(tb, &key->arp.sip, TCA_FLOWER_KEY_ARP_SIP,
+ &mask->arp.sip, TCA_FLOWER_KEY_ARP_SIP_MASK,
+ sizeof(key->arp.sip));
+ fl_set_key_val(tb, &key->arp.tip, TCA_FLOWER_KEY_ARP_TIP,
+ &mask->arp.tip, TCA_FLOWER_KEY_ARP_TIP_MASK,
+ sizeof(key->arp.tip));
+ fl_set_key_val(tb, &key->arp.op, TCA_FLOWER_KEY_ARP_OP,
+ &mask->arp.op, TCA_FLOWER_KEY_ARP_OP_MASK,
+ sizeof(key->arp.op));
+ fl_set_key_val(tb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
+ mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
+ sizeof(key->arp.sha));
+ fl_set_key_val(tb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
+ mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
+ sizeof(key->arp.tha));
}
if (tb[TCA_FLOWER_KEY_ENC_IPV4_SRC] ||
@@ -689,6 +723,8 @@ static void fl_init_dissector(struct cls_fl_head *head,
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ICMP, icmp);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
+ FLOW_DISSECTOR_KEY_ARP, arp);
+ FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_VLAN, vlan);
FL_KEY_SET_IF_MASKED(&mask->key, keys, cnt,
FLOW_DISSECTOR_KEY_ENC_KEYID, enc_key_id);
@@ -796,23 +832,31 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
struct cls_fl_head *head = rtnl_dereference(tp->root);
struct cls_fl_filter *fold = (struct cls_fl_filter *) *arg;
struct cls_fl_filter *fnew;
- struct nlattr *tb[TCA_FLOWER_MAX + 1];
+ struct nlattr **tb;
struct fl_flow_mask mask = {};
int err;
if (!tca[TCA_OPTIONS])
return -EINVAL;
+ tb = kcalloc(TCA_FLOWER_MAX + 1, sizeof(struct nlattr *), GFP_KERNEL);
+ if (!tb)
+ return -ENOBUFS;
+
err = nla_parse_nested(tb, TCA_FLOWER_MAX, tca[TCA_OPTIONS], fl_policy);
if (err < 0)
- return err;
+ goto errout_tb;
- if (fold && handle && fold->handle != handle)
- return -EINVAL;
+ if (fold && handle && fold->handle != handle) {
+ err = -EINVAL;
+ goto errout_tb;
+ }
fnew = kzalloc(sizeof(*fnew), GFP_KERNEL);
- if (!fnew)
- return -ENOBUFS;
+ if (!fnew) {
+ err = -ENOBUFS;
+ goto errout_tb;
+ }
err = tcf_exts_init(&fnew->exts, TCA_FLOWER_ACT, 0);
if (err < 0)
@@ -845,6 +889,11 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
goto errout;
if (!tc_skip_sw(fnew->flags)) {
+ if (!fold && fl_lookup(head, &fnew->mkey)) {
+ err = -EEXIST;
+ goto errout;
+ }
+
err = rhashtable_insert_fast(&head->ht, &fnew->ht_node,
head->ht_params);
if (err)
@@ -878,11 +927,14 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
list_add_tail_rcu(&fnew->list, &head->filters);
}
+ kfree(tb);
return 0;
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
+errout_tb:
+ kfree(tb);
return err;
}
@@ -1112,6 +1164,27 @@ static int fl_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
TCA_FLOWER_KEY_ICMPV6_CODE_MASK,
sizeof(key->icmp.code))))
goto nla_put_failure;
+ else if ((key->basic.n_proto == htons(ETH_P_ARP) ||
+ key->basic.n_proto == htons(ETH_P_RARP)) &&
+ (fl_dump_key_val(skb, &key->arp.sip,
+ TCA_FLOWER_KEY_ARP_SIP, &mask->arp.sip,
+ TCA_FLOWER_KEY_ARP_SIP_MASK,
+ sizeof(key->arp.sip)) ||
+ fl_dump_key_val(skb, &key->arp.tip,
+ TCA_FLOWER_KEY_ARP_TIP, &mask->arp.tip,
+ TCA_FLOWER_KEY_ARP_TIP_MASK,
+ sizeof(key->arp.tip)) ||
+ fl_dump_key_val(skb, &key->arp.op,
+ TCA_FLOWER_KEY_ARP_OP, &mask->arp.op,
+ TCA_FLOWER_KEY_ARP_OP_MASK,
+ sizeof(key->arp.op)) ||
+ fl_dump_key_val(skb, key->arp.sha, TCA_FLOWER_KEY_ARP_SHA,
+ mask->arp.sha, TCA_FLOWER_KEY_ARP_SHA_MASK,
+ sizeof(key->arp.sha)) ||
+ fl_dump_key_val(skb, key->arp.tha, TCA_FLOWER_KEY_ARP_THA,
+ mask->arp.tha, TCA_FLOWER_KEY_ARP_THA_MASK,
+ sizeof(key->arp.tha))))
+ goto nla_put_failure;
if (key->enc_control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS &&
(fl_dump_key_val(skb, &key->enc_ipv4.src,
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index a5ea0e9..2f50e4c 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -57,7 +57,6 @@ struct fq_codel_sched_data {
struct fq_codel_flow *flows; /* Flows table [flows_cnt] */
u32 *backlogs; /* backlog table [flows_cnt] */
u32 flows_cnt; /* number of flows */
- u32 perturbation; /* hash perturbation */
u32 quantum; /* psched_mtu(qdisc_dev(sch)); */
u32 drop_batch_size;
u32 memory_limit;
@@ -75,9 +74,7 @@ struct fq_codel_sched_data {
static unsigned int fq_codel_hash(const struct fq_codel_sched_data *q,
struct sk_buff *skb)
{
- u32 hash = skb_get_hash_perturb(skb, q->perturbation);
-
- return reciprocal_scale(hash, q->flows_cnt);
+ return reciprocal_scale(skb_get_hash(skb), q->flows_cnt);
}
static unsigned int fq_codel_classify(struct sk_buff *skb, struct Qdisc *sch,
@@ -482,7 +479,6 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
q->memory_limit = 32 << 20; /* 32 MBytes */
q->drop_batch_size = 64;
q->quantum = psched_mtu(qdisc_dev(sch));
- q->perturbation = prandom_u32();
INIT_LIST_HEAD(&q->new_flows);
INIT_LIST_HEAD(&q->old_flows);
codel_params_init(&q->cparams);
diff --git a/net/sctp/associola.c b/net/sctp/associola.c
index 36294f7..e50dc6d 100644
--- a/net/sctp/associola.c
+++ b/net/sctp/associola.c
@@ -207,6 +207,7 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
* association to the same value as the initial TSN.
*/
asoc->addip_serial = asoc->c.initial_tsn;
+ asoc->strreset_outseq = asoc->c.initial_tsn;
INIT_LIST_HEAD(&asoc->addip_chunk_list);
INIT_LIST_HEAD(&asoc->asconf_ack_list);
@@ -269,6 +270,8 @@ static struct sctp_association *sctp_association_init(struct sctp_association *a
asoc->active_key_id = ep->active_key_id;
asoc->prsctp_enable = ep->prsctp_enable;
+ asoc->reconf_enable = ep->reconf_enable;
+ asoc->strreset_enable = ep->strreset_enable;
/* Save the hmacs and chunks list into this association */
if (ep->auth_hmacs_list)
@@ -361,6 +364,9 @@ void sctp_association_free(struct sctp_association *asoc)
/* Free stream information. */
sctp_stream_free(asoc->stream);
+ if (asoc->strreset_chunk)
+ sctp_chunk_free(asoc->strreset_chunk);
+
/* Clean up the bound address list. */
sctp_bind_addr_free(&asoc->base.bind_addr);
@@ -519,6 +525,12 @@ void sctp_assoc_rm_peer(struct sctp_association *asoc,
if (asoc->peer.last_data_from == peer)
asoc->peer.last_data_from = transport;
+ if (asoc->strreset_chunk &&
+ asoc->strreset_chunk->transport == peer) {
+ asoc->strreset_chunk->transport = transport;
+ sctp_transport_reset_reconf_timer(transport);
+ }
+
/* If we remove the transport an INIT was last sent to, set it to
* NULL. Combined with the update of the retran path above, this
* will cause the next INIT to be sent to the next available
diff --git a/net/sctp/debug.c b/net/sctp/debug.c
index 95d7b15..2e47eb2 100644
--- a/net/sctp/debug.c
+++ b/net/sctp/debug.c
@@ -159,6 +159,7 @@ static const char *const sctp_timer_tbl[] = {
"TIMEOUT_T4_RTO",
"TIMEOUT_T5_SHUTDOWN_GUARD",
"TIMEOUT_HEARTBEAT",
+ "TIMEOUT_RECONF",
"TIMEOUT_SACK",
"TIMEOUT_AUTOCLOSE",
};
@@ -166,7 +167,9 @@ static const char *const sctp_timer_tbl[] = {
/* Lookup timer debug name. */
const char *sctp_tname(const sctp_subtype_t id)
{
- if (id.timeout <= SCTP_EVENT_TIMEOUT_MAX)
+ BUILD_BUG_ON(SCTP_EVENT_TIMEOUT_MAX + 1 != ARRAY_SIZE(sctp_timer_tbl));
+
+ if (id.timeout < ARRAY_SIZE(sctp_timer_tbl))
return sctp_timer_tbl[id.timeout];
return "unknown_timer";
}
diff --git a/net/sctp/endpointola.c b/net/sctp/endpointola.c
index 410ddc1..8c58923 100644
--- a/net/sctp/endpointola.c
+++ b/net/sctp/endpointola.c
@@ -164,6 +164,7 @@ static struct sctp_endpoint *sctp_endpoint_init(struct sctp_endpoint *ep,
ep->auth_hmacs_list = auth_hmacs;
ep->auth_chunk_list = auth_chunks;
ep->prsctp_enable = net->sctp.prsctp_enable;
+ ep->reconf_enable = net->sctp.reconf_enable;
return ep;
diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c
index 6619367..063baac 100644
--- a/net/sctp/ipv6.c
+++ b/net/sctp/ipv6.c
@@ -222,7 +222,8 @@ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport)
SCTP_INC_STATS(sock_net(sk), SCTP_MIB_OUTSCTPPACKS);
rcu_read_lock();
- res = ip6_xmit(sk, skb, fl6, rcu_dereference(np->opt), np->tclass);
+ res = ip6_xmit(sk, skb, fl6, sk->sk_mark, rcu_dereference(np->opt),
+ np->tclass);
rcu_read_unlock();
return res;
}
diff --git a/net/sctp/offload.c b/net/sctp/offload.c
index 7e869d0..4f5a2b5 100644
--- a/net/sctp/offload.c
+++ b/net/sctp/offload.c
@@ -68,7 +68,7 @@ static struct sk_buff *sctp_gso_segment(struct sk_buff *skb,
goto out;
}
- segs = skb_segment(skb, features | NETIF_F_HW_CSUM);
+ segs = skb_segment(skb, features | NETIF_F_HW_CSUM | NETIF_F_SG);
if (IS_ERR(segs))
goto out;
diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
index e540826..65abe22 100644
--- a/net/sctp/outqueue.c
+++ b/net/sctp/outqueue.c
@@ -915,22 +915,28 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
case SCTP_CID_ECN_ECNE:
case SCTP_CID_ASCONF:
case SCTP_CID_FWD_TSN:
+ case SCTP_CID_RECONF:
status = sctp_packet_transmit_chunk(packet, chunk,
one_packet, gfp);
if (status != SCTP_XMIT_OK) {
/* put the chunk back */
list_add(&chunk->list, &q->control_chunk_list);
- } else {
- asoc->stats.octrlchunks++;
- /* PR-SCTP C5) If a FORWARD TSN is sent, the
- * sender MUST assure that at least one T3-rtx
- * timer is running.
- */
- if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
- sctp_transport_reset_t3_rtx(transport);
- transport->last_time_sent = jiffies;
- }
+ break;
+ }
+
+ asoc->stats.octrlchunks++;
+ /* PR-SCTP C5) If a FORWARD TSN is sent, the
+ * sender MUST assure that at least one T3-rtx
+ * timer is running.
+ */
+ if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
+ sctp_transport_reset_t3_rtx(transport);
+ transport->last_time_sent = jiffies;
}
+
+ if (chunk == asoc->strreset_chunk)
+ sctp_transport_reset_reconf_timer(transport);
+
break;
default:
@@ -1016,6 +1022,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
/* Finally, transmit new packets. */
while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
+ __u32 sid = ntohs(chunk->subh.data_hdr->stream);
+
/* RFC 2960 6.5 Every DATA chunk MUST carry a valid
* stream identifier.
*/
@@ -1038,6 +1046,11 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
continue;
}
+ if (asoc->stream->out[sid].state == SCTP_STREAM_CLOSED) {
+ sctp_outq_head_data(q, chunk);
+ goto sctp_flush_out;
+ }
+
/* If there is a specified transport, use it.
* Otherwise, we want to use the active path.
*/
@@ -1048,7 +1061,7 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
(new_transport->state == SCTP_PF)))
new_transport = asoc->peer.active_path;
if (new_transport->state == SCTP_UNCONFIRMED) {
- WARN_ONCE(1, "Atempt to send packet on unconfirmed path.");
+ WARN_ONCE(1, "Attempt to send packet on unconfirmed path.");
sctp_chunk_fail(chunk, 0);
sctp_chunk_free(chunk);
continue;
diff --git a/net/sctp/primitive.c b/net/sctp/primitive.c
index ab8d9f9..f0553a0 100644
--- a/net/sctp/primitive.c
+++ b/net/sctp/primitive.c
@@ -211,3 +211,6 @@ DECLARE_PRIMITIVE(REQUESTHEARTBEAT);
*/
DECLARE_PRIMITIVE(ASCONF);
+
+/* RE-CONFIG 5.1 */
+DECLARE_PRIMITIVE(RECONF);
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index f9c3c37..8227bbb 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -1258,6 +1258,9 @@ static int __net_init sctp_defaults_init(struct net *net)
/* Enable PR-SCTP by default. */
net->sctp.prsctp_enable = 1;
+ /* Disable RECONF by default. */
+ net->sctp.reconf_enable = 0;
+
/* Disable AUTH by default. */
net->sctp.auth_enable = 0;
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index a15d824..ad3445b 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -270,6 +270,11 @@ struct sctp_chunk *sctp_make_init(const struct sctp_association *asoc,
num_ext += 2;
}
+ if (asoc->reconf_enable) {
+ extensions[num_ext] = SCTP_CID_RECONF;
+ num_ext += 1;
+ }
+
if (sp->adaptation_ind)
chunksize += sizeof(aiparam);
@@ -434,6 +439,11 @@ struct sctp_chunk *sctp_make_init_ack(const struct sctp_association *asoc,
num_ext += 2;
}
+ if (asoc->peer.reconf_capable) {
+ extensions[num_ext] = SCTP_CID_RECONF;
+ num_ext += 1;
+ }
+
if (sp->adaptation_ind)
chunksize += sizeof(aiparam);
@@ -1844,6 +1854,7 @@ no_hmac:
retval->next_tsn = retval->c.initial_tsn;
retval->ctsn_ack_point = retval->next_tsn - 1;
retval->addip_serial = retval->c.initial_tsn;
+ retval->strreset_outseq = retval->c.initial_tsn;
retval->adv_peer_ack_point = retval->ctsn_ack_point;
retval->peer.prsctp_capable = retval->c.prsctp_capable;
retval->peer.adaptation_ind = retval->c.adaptation_ind;
@@ -2011,6 +2022,11 @@ static void sctp_process_ext_param(struct sctp_association *asoc,
for (i = 0; i < num_ext; i++) {
switch (param.ext->chunks[i]) {
+ case SCTP_CID_RECONF:
+ if (asoc->reconf_enable &&
+ !asoc->peer.reconf_capable)
+ asoc->peer.reconf_capable = 1;
+ break;
case SCTP_CID_FWD_TSN:
if (asoc->prsctp_enable && !asoc->peer.prsctp_capable)
asoc->peer.prsctp_capable = 1;
@@ -2387,6 +2403,8 @@ int sctp_process_init(struct sctp_association *asoc, struct sctp_chunk *chunk,
asoc->peer.i.initial_tsn =
ntohl(peer_init->init_hdr.initial_tsn);
+ asoc->strreset_inseq = asoc->peer.i.initial_tsn;
+
/* Apply the upper bounds for output streams based on peer's
* number of inbound streams.
*/
@@ -3210,7 +3228,6 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
union sctp_params param;
sctp_addiphdr_t *hdr;
union sctp_addr_param *addr_param;
- sctp_addip_param_t *asconf_param;
struct sctp_chunk *asconf_ack;
__be16 err_code;
int length = 0;
@@ -3230,7 +3247,6 @@ struct sctp_chunk *sctp_process_asconf(struct sctp_association *asoc,
* asconf parameter.
*/
length = ntohs(addr_param->p.length);
- asconf_param = (void *)addr_param + length;
chunk_len -= length;
/* create an ASCONF_ACK chunk.
@@ -3526,3 +3542,121 @@ struct sctp_chunk *sctp_make_fwdtsn(const struct sctp_association *asoc,
return retval;
}
+
+/* RE-CONFIG 3.1 (RE-CONFIG chunk)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Type = 130 | Chunk Flags | Chunk Length |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Re-configuration Parameter /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * \ \
+ * / Re-configuration Parameter (optional) /
+ * \ \
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+static struct sctp_chunk *sctp_make_reconf(
+ const struct sctp_association *asoc,
+ int length)
+{
+ struct sctp_reconf_chunk *reconf;
+ struct sctp_chunk *retval;
+
+ retval = sctp_make_control(asoc, SCTP_CID_RECONF, 0, length,
+ GFP_ATOMIC);
+ if (!retval)
+ return NULL;
+
+ reconf = (struct sctp_reconf_chunk *)retval->chunk_hdr;
+ retval->param_hdr.v = reconf->params;
+
+ return retval;
+}
+
+/* RE-CONFIG 4.1 (STREAM OUT RESET)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 13 | Parameter Length = 16 + 2 * N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Request Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Response Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Sender's Last Assigned TSN |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number 1 (optional) | Stream Number 2 (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / ...... /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number N-1 (optional) | Stream Number N (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * RE-CONFIG 4.2 (STREAM IN RESET)
+ * 0 1 2 3
+ * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Parameter Type = 14 | Parameter Length = 8 + 2 * N |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Re-configuration Request Sequence Number |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number 1 (optional) | Stream Number 2 (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * / ...... /
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | Stream Number N-1 (optional) | Stream Number N (optional) |
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ */
+struct sctp_chunk *sctp_make_strreset_req(
+ const struct sctp_association *asoc,
+ __u16 stream_num, __u16 *stream_list,
+ bool out, bool in)
+{
+ struct sctp_strreset_outreq outreq;
+ __u16 stream_len = stream_num * 2;
+ struct sctp_strreset_inreq inreq;
+ struct sctp_chunk *retval;
+ __u16 outlen, inlen, i;
+
+ outlen = (sizeof(outreq) + stream_len) * out;
+ inlen = (sizeof(inreq) + stream_len) * in;
+
+ retval = sctp_make_reconf(asoc, outlen + inlen);
+ if (!retval)
+ return NULL;
+
+ for (i = 0; i < stream_num; i++)
+ stream_list[i] = htons(stream_list[i]);
+
+ if (outlen) {
+ outreq.param_hdr.type = SCTP_PARAM_RESET_OUT_REQUEST;
+ outreq.param_hdr.length = htons(outlen);
+ outreq.request_seq = htonl(asoc->strreset_outseq);
+ outreq.response_seq = htonl(asoc->strreset_inseq - 1);
+ outreq.send_reset_at_tsn = htonl(asoc->next_tsn - 1);
+
+ sctp_addto_chunk(retval, sizeof(outreq), &outreq);
+
+ if (stream_len)
+ sctp_addto_chunk(retval, stream_len, stream_list);
+ }
+
+ if (inlen) {
+ inreq.param_hdr.type = SCTP_PARAM_RESET_IN_REQUEST;
+ inreq.param_hdr.length = htons(inlen);
+ inreq.request_seq = htonl(asoc->strreset_outseq + out);
+
+ sctp_addto_chunk(retval, sizeof(inreq), &inreq);
+
+ if (stream_len)
+ sctp_addto_chunk(retval, stream_len, stream_list);
+ }
+
+ for (i = 0; i < stream_num; i++)
+ stream_list[i] = ntohs(stream_list[i]);
+
+ return retval;
+}
diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c
index c345bf1..a455271 100644
--- a/net/sctp/sm_sideeffect.c
+++ b/net/sctp/sm_sideeffect.c
@@ -436,6 +436,37 @@ out_unlock:
sctp_association_put(asoc);
}
+ /* Handle the timeout of the RE-CONFIG timer. */
+void sctp_generate_reconf_event(unsigned long data)
+{
+ struct sctp_transport *transport = (struct sctp_transport *)data;
+ struct sctp_association *asoc = transport->asoc;
+ struct sock *sk = asoc->base.sk;
+ struct net *net = sock_net(sk);
+ int error = 0;
+
+ bh_lock_sock(sk);
+ if (sock_owned_by_user(sk)) {
+ pr_debug("%s: sock is busy\n", __func__);
+
+ /* Try again later. */
+ if (!mod_timer(&transport->reconf_timer, jiffies + (HZ / 20)))
+ sctp_transport_hold(transport);
+ goto out_unlock;
+ }
+
+ error = sctp_do_sm(net, SCTP_EVENT_T_TIMEOUT,
+ SCTP_ST_TIMEOUT(SCTP_EVENT_TIMEOUT_RECONF),
+ asoc->state, asoc->ep, asoc,
+ transport, GFP_ATOMIC);
+
+ if (error)
+ sk->sk_err = -error;
+
+out_unlock:
+ bh_unlock_sock(sk);
+ sctp_transport_put(transport);
+}
/* Inject a SACK Timeout event into the state machine. */
static void sctp_generate_sack_event(unsigned long data)
@@ -453,6 +484,7 @@ sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES] = {
sctp_generate_t4_rto_event,
sctp_generate_t5_shutdown_guard_event,
NULL,
+ NULL,
sctp_generate_sack_event,
sctp_generate_autoclose_event,
};
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 0ceded3..782e579 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -1021,6 +1021,34 @@ sctp_disposition_t sctp_sf_sendbeat_8_3(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+/* resend asoc strreset_chunk. */
+sctp_disposition_t sctp_sf_send_reconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type, void *arg,
+ sctp_cmd_seq_t *commands)
+{
+ struct sctp_transport *transport = arg;
+
+ if (asoc->overall_error_count >= asoc->max_retrans) {
+ sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR,
+ SCTP_ERROR(ETIMEDOUT));
+ /* CMD_ASSOC_FAILED calls CMD_DELETE_TCB. */
+ sctp_add_cmd_sf(commands, SCTP_CMD_ASSOC_FAILED,
+ SCTP_PERR(SCTP_ERROR_NO_ERROR));
+ SCTP_INC_STATS(net, SCTP_MIB_ABORTEDS);
+ SCTP_DEC_STATS(net, SCTP_MIB_CURRESTAB);
+ return SCTP_DISPOSITION_DELETE_TCB;
+ }
+
+ sctp_chunk_hold(asoc->strreset_chunk);
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY,
+ SCTP_CHUNK(asoc->strreset_chunk));
+ sctp_add_cmd_sf(commands, SCTP_CMD_STRIKE, SCTP_TRANSPORT(transport));
+
+ return SCTP_DISPOSITION_CONSUME;
+}
+
/*
* Process an heartbeat request.
*
@@ -5157,6 +5185,19 @@ sctp_disposition_t sctp_sf_do_prm_asconf(struct net *net,
return SCTP_DISPOSITION_CONSUME;
}
+/* RE-CONFIG Section 5.1 RECONF Chunk Procedures */
+sctp_disposition_t sctp_sf_do_prm_reconf(struct net *net,
+ const struct sctp_endpoint *ep,
+ const struct sctp_association *asoc,
+ const sctp_subtype_t type,
+ void *arg, sctp_cmd_seq_t *commands)
+{
+ struct sctp_chunk *chunk = arg;
+
+ sctp_add_cmd_sf(commands, SCTP_CMD_REPLY, SCTP_CHUNK(chunk));
+ return SCTP_DISPOSITION_CONSUME;
+}
+
/*
* Ignore the primitive event
*
diff --git a/net/sctp/sm_statetable.c b/net/sctp/sm_statetable.c
index a987d54..b5438b4 100644
--- a/net/sctp/sm_statetable.c
+++ b/net/sctp/sm_statetable.c
@@ -643,6 +643,25 @@ chunk_event_table_unknown[SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
} /* TYPE_SCTP_PRIMITIVE_ASCONF */
+#define TYPE_SCTP_PRIMITIVE_RECONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_closed), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_do_prm_reconf), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_error_shutdown), \
+} /* TYPE_SCTP_PRIMITIVE_RECONF */
+
/* The primary index for this table is the primitive type.
* The secondary index for this table is the state.
*/
@@ -653,6 +672,7 @@ static const sctp_sm_table_entry_t primitive_event_table[SCTP_NUM_PRIMITIVE_TYPE
TYPE_SCTP_PRIMITIVE_SEND,
TYPE_SCTP_PRIMITIVE_REQUESTHEARTBEAT,
TYPE_SCTP_PRIMITIVE_ASCONF,
+ TYPE_SCTP_PRIMITIVE_RECONF,
};
#define TYPE_SCTP_OTHER_NO_PENDING_TSN { \
@@ -888,6 +908,25 @@ static const sctp_sm_table_entry_t other_event_table[SCTP_NUM_OTHER_TYPES][SCTP_
TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
}
+#define TYPE_SCTP_EVENT_TIMEOUT_RECONF { \
+ /* SCTP_STATE_CLOSED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_WAIT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_COOKIE_ECHOED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_ESTABLISHED */ \
+ TYPE_SCTP_FUNC(sctp_sf_send_reconf), \
+ /* SCTP_STATE_SHUTDOWN_PENDING */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_RECEIVED */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+ /* SCTP_STATE_SHUTDOWN_ACK_SENT */ \
+ TYPE_SCTP_FUNC(sctp_sf_timer_ignore), \
+}
+
static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][SCTP_STATE_NUM_STATES] = {
TYPE_SCTP_EVENT_TIMEOUT_NONE,
TYPE_SCTP_EVENT_TIMEOUT_T1_COOKIE,
@@ -897,6 +936,7 @@ static const sctp_sm_table_entry_t timeout_event_table[SCTP_NUM_TIMEOUT_TYPES][S
TYPE_SCTP_EVENT_TIMEOUT_T4_RTO,
TYPE_SCTP_EVENT_TIMEOUT_T5_SHUTDOWN_GUARD,
TYPE_SCTP_EVENT_TIMEOUT_HEARTBEAT,
+ TYPE_SCTP_EVENT_TIMEOUT_RECONF,
TYPE_SCTP_EVENT_TIMEOUT_SACK,
TYPE_SCTP_EVENT_TIMEOUT_AUTOCLOSE,
};
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 318c678..5fc7122 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -235,8 +235,12 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
sctp_assoc_t id)
{
struct sctp_association *addr_asoc = NULL, *id_asoc = NULL;
- struct sctp_transport *transport;
+ struct sctp_af *af = sctp_get_af_specific(addr->ss_family);
union sctp_addr *laddr = (union sctp_addr *)addr;
+ struct sctp_transport *transport;
+
+ if (sctp_verify_addr(sk, laddr, af->sockaddr_len))
+ return NULL;
addr_asoc = sctp_endpoint_lookup_assoc(sctp_sk(sk)->ep,
laddr,
@@ -360,7 +364,7 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
}
}
- if (snum && snum < PROT_SOCK &&
+ if (snum && snum < inet_prot_sock(net) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE))
return -EACCES;
@@ -1152,8 +1156,10 @@ static int __sctp_connect(struct sock *sk,
* accept new associations, but it SHOULD NOT
* be permitted to open new associations.
*/
- if (ep->base.bind_addr.port < PROT_SOCK &&
- !ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
+ if (ep->base.bind_addr.port <
+ inet_prot_sock(net) &&
+ !ns_capable(net->user_ns,
+ CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_free;
}
@@ -1818,7 +1824,7 @@ static int sctp_sendmsg(struct sock *sk, struct msghdr *msg, size_t msg_len)
* but it SHOULD NOT be permitted to open new
* associations.
*/
- if (ep->base.bind_addr.port < PROT_SOCK &&
+ if (ep->base.bind_addr.port < inet_prot_sock(net) &&
!ns_capable(net->user_ns, CAP_NET_BIND_SERVICE)) {
err = -EACCES;
goto out_unlock;
@@ -2430,7 +2436,6 @@ static int sctp_apply_peer_addr_params(struct sctp_paddrparams *params,
sctp_assoc_sync_pmtu(sctp_opt2sk(sp), asoc);
} else if (asoc) {
asoc->pathmtu = params->spp_pathmtu;
- sctp_frag_point(asoc, params->spp_pathmtu);
} else {
sp->pathmtu = params->spp_pathmtu;
}
@@ -3751,6 +3756,68 @@ out:
return retval;
}
+static int sctp_setsockopt_enable_strreset(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen != sizeof(params))
+ goto out;
+
+ if (copy_from_user(&params, optval, optlen)) {
+ retval = -EFAULT;
+ goto out;
+ }
+
+ if (params.assoc_value & (~SCTP_ENABLE_STRRESET_MASK))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (asoc) {
+ asoc->strreset_enable = params.assoc_value;
+ } else if (!params.assoc_id) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ sp->ep->strreset_enable = params.assoc_value;
+ } else {
+ goto out;
+ }
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
+static int sctp_setsockopt_reset_streams(struct sock *sk,
+ char __user *optval,
+ unsigned int optlen)
+{
+ struct sctp_reset_streams *params;
+ struct sctp_association *asoc;
+ int retval = -EINVAL;
+
+ if (optlen < sizeof(struct sctp_reset_streams))
+ return -EINVAL;
+
+ params = memdup_user(optval, optlen);
+ if (IS_ERR(params))
+ return PTR_ERR(params);
+
+ asoc = sctp_id2assoc(sk, params->srs_assoc_id);
+ if (!asoc)
+ goto out;
+
+ retval = sctp_send_reset_streams(asoc, params);
+
+out:
+ kfree(params);
+ return retval;
+}
+
/* API 6.2 setsockopt(), getsockopt()
*
* Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3917,6 +3984,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
case SCTP_DEFAULT_PRINFO:
retval = sctp_setsockopt_default_prinfo(sk, optval, optlen);
break;
+ case SCTP_ENABLE_STREAM_RESET:
+ retval = sctp_setsockopt_enable_strreset(sk, optval, optlen);
+ break;
+ case SCTP_RESET_STREAMS:
+ retval = sctp_setsockopt_reset_streams(sk, optval, optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
@@ -6401,6 +6474,47 @@ out:
return retval;
}
+static int sctp_getsockopt_enable_strreset(struct sock *sk, int len,
+ char __user *optval,
+ int __user *optlen)
+{
+ struct sctp_assoc_value params;
+ struct sctp_association *asoc;
+ int retval = -EFAULT;
+
+ if (len < sizeof(params)) {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ len = sizeof(params);
+ if (copy_from_user(&params, optval, len))
+ goto out;
+
+ asoc = sctp_id2assoc(sk, params.assoc_id);
+ if (asoc) {
+ params.assoc_value = asoc->strreset_enable;
+ } else if (!params.assoc_id) {
+ struct sctp_sock *sp = sctp_sk(sk);
+
+ params.assoc_value = sp->ep->strreset_enable;
+ } else {
+ retval = -EINVAL;
+ goto out;
+ }
+
+ if (put_user(len, optlen))
+ goto out;
+
+ if (copy_to_user(optval, &params, len))
+ goto out;
+
+ retval = 0;
+
+out:
+ return retval;
+}
+
static int sctp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
@@ -6568,6 +6682,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
retval = sctp_getsockopt_pr_assocstatus(sk, len, optval,
optlen);
break;
+ case SCTP_ENABLE_STREAM_RESET:
+ retval = sctp_getsockopt_enable_strreset(sk, len, optval,
+ optlen);
+ break;
default:
retval = -ENOPROTOOPT;
break;
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index f86de43..13d5e07 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -33,6 +33,7 @@
*/
#include <net/sctp/sctp.h>
+#include <net/sctp/sm.h>
struct sctp_stream *sctp_stream_new(__u16 incnt, __u16 outcnt, gfp_t gfp)
{
@@ -83,3 +84,81 @@ void sctp_stream_clear(struct sctp_stream *stream)
for (i = 0; i < stream->incnt; i++)
stream->in[i].ssn = 0;
}
+
+static int sctp_send_reconf(struct sctp_association *asoc,
+ struct sctp_chunk *chunk)
+{
+ struct net *net = sock_net(asoc->base.sk);
+ int retval = 0;
+
+ retval = sctp_primitive_RECONF(net, asoc, chunk);
+ if (retval)
+ sctp_chunk_free(chunk);
+
+ return retval;
+}
+
+int sctp_send_reset_streams(struct sctp_association *asoc,
+ struct sctp_reset_streams *params)
+{
+ struct sctp_stream *stream = asoc->stream;
+ __u16 i, str_nums, *str_list;
+ struct sctp_chunk *chunk;
+ int retval = -EINVAL;
+ bool out, in;
+
+ if (!asoc->peer.reconf_capable ||
+ !(asoc->strreset_enable & SCTP_ENABLE_RESET_STREAM_REQ)) {
+ retval = -ENOPROTOOPT;
+ goto out;
+ }
+
+ if (asoc->strreset_outstanding) {
+ retval = -EINPROGRESS;
+ goto out;
+ }
+
+ out = params->srs_flags & SCTP_STREAM_RESET_OUTGOING;
+ in = params->srs_flags & SCTP_STREAM_RESET_INCOMING;
+ if (!out && !in)
+ goto out;
+
+ str_nums = params->srs_number_streams;
+ str_list = params->srs_stream_list;
+ if (out && str_nums)
+ for (i = 0; i < str_nums; i++)
+ if (str_list[i] >= stream->outcnt)
+ goto out;
+
+ if (in && str_nums)
+ for (i = 0; i < str_nums; i++)
+ if (str_list[i] >= stream->incnt)
+ goto out;
+
+ chunk = sctp_make_strreset_req(asoc, str_nums, str_list, out, in);
+ if (!chunk)
+ goto out;
+
+ if (out) {
+ if (str_nums)
+ for (i = 0; i < str_nums; i++)
+ stream->out[str_list[i]].state =
+ SCTP_STREAM_CLOSED;
+ else
+ for (i = 0; i < stream->outcnt; i++)
+ stream->out[i].state = SCTP_STREAM_CLOSED;
+ }
+
+ asoc->strreset_outstanding = out + in;
+ asoc->strreset_chunk = chunk;
+ sctp_chunk_hold(asoc->strreset_chunk);
+
+ retval = sctp_send_reconf(asoc, chunk);
+ if (retval) {
+ sctp_chunk_put(asoc->strreset_chunk);
+ asoc->strreset_chunk = NULL;
+ }
+
+out:
+ return retval;
+}
diff --git a/net/sctp/transport.c b/net/sctp/transport.c
index a1652ab..baa1ac0 100644
--- a/net/sctp/transport.c
+++ b/net/sctp/transport.c
@@ -88,9 +88,11 @@ static struct sctp_transport *sctp_transport_init(struct net *net,
INIT_LIST_HEAD(&peer->transports);
setup_timer(&peer->T3_rtx_timer, sctp_generate_t3_rtx_event,
- (unsigned long)peer);
+ (unsigned long)peer);
setup_timer(&peer->hb_timer, sctp_generate_heartbeat_event,
- (unsigned long)peer);
+ (unsigned long)peer);
+ setup_timer(&peer->reconf_timer, sctp_generate_reconf_event,
+ (unsigned long)peer);
setup_timer(&peer->proto_unreach_timer,
sctp_generate_proto_unreach_event, (unsigned long)peer);
@@ -144,6 +146,9 @@ void sctp_transport_free(struct sctp_transport *transport)
if (del_timer(&transport->T3_rtx_timer))
sctp_transport_put(transport);
+ if (del_timer(&transport->reconf_timer))
+ sctp_transport_put(transport);
+
/* Delete the ICMP proto unreachable timer if it's active. */
if (del_timer(&transport->proto_unreach_timer))
sctp_association_put(transport->asoc);
@@ -211,6 +216,14 @@ void sctp_transport_reset_hb_timer(struct sctp_transport *transport)
sctp_transport_hold(transport);
}
+void sctp_transport_reset_reconf_timer(struct sctp_transport *transport)
+{
+ if (!timer_pending(&transport->reconf_timer))
+ if (!mod_timer(&transport->reconf_timer,
+ jiffies + transport->rto))
+ sctp_transport_hold(transport);
+}
+
/* This transport has been assigned to an association.
* Initialize fields from the association or from the sock itself.
* Register the reference count in the association.
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index e1e684c..cc6b6f8 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -10,6 +10,7 @@
*/
#include <linux/in.h>
+#include <linux/if_ether.h>
#include <net/sock.h>
#include <net/tcp.h>
@@ -151,8 +152,7 @@ int smc_clc_send_proposal(struct smc_sock *smc,
pclc.hdr.version = SMC_CLC_V1; /* SMC version */
memcpy(pclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
memcpy(&pclc.lcl.gid, &smcibdev->gid[ibport - 1], SMC_GID_SIZE);
- memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1],
- sizeof(smcibdev->mac[ibport - 1]));
+ memcpy(&pclc.lcl.mac, &smcibdev->mac[ibport - 1], ETH_ALEN);
/* determine subnet and mask from internal TCP socket */
rc = smc_netinfo_by_tcpsk(smc->clcsock, &pclc.outgoing_subnet,
@@ -199,8 +199,7 @@ int smc_clc_send_confirm(struct smc_sock *smc)
memcpy(cclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
memcpy(&cclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
SMC_GID_SIZE);
- memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1],
- sizeof(link->smcibdev->mac));
+ memcpy(&cclc.lcl.mac, &link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
hton24(cclc.qpn, link->roce_qp->qp_num);
cclc.rmb_rkey =
htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
@@ -252,8 +251,7 @@ int smc_clc_send_accept(struct smc_sock *new_smc, int srv_first_contact)
memcpy(aclc.lcl.id_for_peer, local_systemid, sizeof(local_systemid));
memcpy(&aclc.lcl.gid, &link->smcibdev->gid[link->ibport - 1],
SMC_GID_SIZE);
- memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1],
- sizeof(link->smcibdev->mac[link->ibport - 1]));
+ memcpy(&aclc.lcl.mac, link->smcibdev->mac[link->ibport - 1], ETH_ALEN);
hton24(aclc.qpn, link->roce_qp->qp_num);
aclc.rmb_rkey =
htonl(conn->rmb_desc->mr_rx[SMC_SINGLE_LINK]->rkey);
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 8b1d343..0eac633 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -532,6 +532,7 @@ int smc_sndbuf_create(struct smc_sock *smc)
__GFP_NORETRY);
if (!sndbuf_desc->cpu_addr) {
kfree(sndbuf_desc);
+ sndbuf_desc = NULL;
/* if send buffer allocation has failed,
* try a smaller one
*/
@@ -543,6 +544,7 @@ int smc_sndbuf_create(struct smc_sock *smc)
if (rc) {
kfree(sndbuf_desc->cpu_addr);
kfree(sndbuf_desc);
+ sndbuf_desc = NULL;
continue; /* if mapping failed, try smaller one */
}
sndbuf_desc->used = 1;
@@ -596,6 +598,7 @@ int smc_rmb_create(struct smc_sock *smc)
__GFP_NORETRY);
if (!rmb_desc->cpu_addr) {
kfree(rmb_desc);
+ rmb_desc = NULL;
/* if RMB allocation has failed,
* try a smaller one
*/
@@ -607,6 +610,7 @@ int smc_rmb_create(struct smc_sock *smc)
if (rc) {
kfree(rmb_desc->cpu_addr);
kfree(rmb_desc);
+ rmb_desc = NULL;
continue; /* if mapping failed, try smaller one */
}
rc = smc_ib_get_memory_region(lgr->lnk[SMC_SINGLE_LINK].roce_pd,
@@ -619,6 +623,7 @@ int smc_rmb_create(struct smc_sock *smc)
DMA_FROM_DEVICE);
kfree(rmb_desc->cpu_addr);
kfree(rmb_desc);
+ rmb_desc = NULL;
continue;
}
rmb_desc->used = 1;
diff --git a/net/smc/smc_ib.h b/net/smc/smc_ib.h
index 3fe2d55..a95f74b 100644
--- a/net/smc/smc_ib.h
+++ b/net/smc/smc_ib.h
@@ -11,6 +11,7 @@
#ifndef _SMC_IB_H
#define _SMC_IB_H
+#include <linux/if_ether.h>
#include <rdma/ib_verbs.h>
#define SMC_MAX_PORTS 2 /* Max # of ports */
@@ -34,7 +35,8 @@ struct smc_ib_device { /* ib-device infos for smc */
struct ib_cq *roce_cq_recv; /* recv completion queue */
struct tasklet_struct send_tasklet; /* called by send cq handler */
struct tasklet_struct recv_tasklet; /* called by recv cq handler */
- char mac[SMC_MAX_PORTS][6]; /* mac address per port*/
+ char mac[SMC_MAX_PORTS][ETH_ALEN];
+ /* mac address per port*/
union ib_gid gid[SMC_MAX_PORTS]; /* gid per port */
u8 initialized : 1; /* ib dev CQ, evthdl done */
struct work_struct port_event_work;
diff --git a/net/socket.c b/net/socket.c
index 3ef02e9..b7a63d5 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -531,7 +531,7 @@ static ssize_t sockfs_listxattr(struct dentry *dentry, char *buffer,
return used;
}
-int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
+static int sockfs_setattr(struct dentry *dentry, struct iattr *iattr)
{
int err = simple_setattr(dentry, iattr);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 886e9d38..1530825 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -1489,7 +1489,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
case RPC_GSS_PROC_DESTROY:
if (gss_write_verf(rqstp, rsci->mechctx, gc->gc_seq))
goto auth_err;
- rsci->h.expiry_time = get_seconds();
+ rsci->h.expiry_time = seconds_since_boot();
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
if (resv->iov_len + 4 > PAGE_SIZE)
goto drop;
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index 3bc1d61..9c9db55 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -799,6 +799,8 @@ static int svc_handle_xprt(struct svc_rqst *rqstp, struct svc_xprt *xprt)
if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) {
dprintk("svc_recv: found XPT_CLOSE\n");
+ if (test_and_clear_bit(XPT_KILL_TEMP, &xprt->xpt_flags))
+ xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
svc_delete_xprt(xprt);
/* Leave XPT_BUSY set on the dead xprt: */
goto out;
@@ -1020,9 +1022,11 @@ void svc_age_temp_xprts_now(struct svc_serv *serv, struct sockaddr *server_addr)
le = to_be_closed.next;
list_del_init(le);
xprt = list_entry(le, struct svc_xprt, xpt_list);
- dprintk("svc_age_temp_xprts_now: closing %p\n", xprt);
- xprt->xpt_ops->xpo_kill_temp_xprt(xprt);
- svc_close_xprt(xprt);
+ set_bit(XPT_CLOSE, &xprt->xpt_flags);
+ set_bit(XPT_KILL_TEMP, &xprt->xpt_flags);
+ dprintk("svc_age_temp_xprts_now: queuing xprt %p for closing\n",
+ xprt);
+ svc_xprt_enqueue(xprt);
}
}
EXPORT_SYMBOL_GPL(svc_age_temp_xprts_now);
diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
index 57d35fb..172b537 100644
--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
@@ -347,8 +347,6 @@ int rdma_read_chunk_frmr(struct svcxprt_rdma *xprt,
atomic_inc(&rdma_stat_read);
return ret;
err:
- ib_dma_unmap_sg(xprt->sc_cm_id->device,
- frmr->sg, frmr->sg_nents, frmr->direction);
svc_rdma_put_context(ctxt, 0);
svc_rdma_put_frmr(xprt, frmr);
return ret;
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index c35fad3..7d99029 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -1,7 +1,7 @@
/*
* net/tipc/bcast.c: TIPC broadcast code
*
- * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
+ * Copyright (c) 2004-2006, 2014-2016, Ericsson AB
* Copyright (c) 2004, Intel Corporation.
* Copyright (c) 2005, 2010-2011, Wind River Systems
* All rights reserved.
@@ -39,9 +39,8 @@
#include "socket.h"
#include "msg.h"
#include "bcast.h"
-#include "name_distr.h"
#include "link.h"
-#include "node.h"
+#include "name_table.h"
#define BCLINK_WIN_DEFAULT 50 /* bcast link window size (default) */
#define BCLINK_WIN_MIN 32 /* bcast minimum link window size */
@@ -54,12 +53,20 @@ const char tipc_bclink_name[] = "broadcast-link";
* @inputq: data input queue; will only carry SOCK_WAKEUP messages
* @dest: array keeping number of reachable destinations per bearer
* @primary_bearer: a bearer having links to all broadcast destinations, if any
+ * @bcast_support: indicates if primary bearer, if any, supports broadcast
+ * @rcast_support: indicates if all peer nodes support replicast
+ * @rc_ratio: dest count as percentage of cluster size where send method changes
+ * @bc_threshold: calculated drom rc_ratio; if dests > threshold use broadcast
*/
struct tipc_bc_base {
struct tipc_link *link;
struct sk_buff_head inputq;
int dests[MAX_BEARERS];
int primary_bearer;
+ bool bcast_support;
+ bool rcast_support;
+ int rc_ratio;
+ int bc_threshold;
};
static struct tipc_bc_base *tipc_bc_base(struct net *net)
@@ -69,7 +76,20 @@ static struct tipc_bc_base *tipc_bc_base(struct net *net)
int tipc_bcast_get_mtu(struct net *net)
{
- return tipc_link_mtu(tipc_bc_sndlink(net));
+ return tipc_link_mtu(tipc_bc_sndlink(net)) - INT_H_SIZE;
+}
+
+void tipc_bcast_disable_rcast(struct net *net)
+{
+ tipc_bc_base(net)->rcast_support = false;
+}
+
+static void tipc_bcbase_calc_bc_threshold(struct net *net)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ int cluster_size = tipc_link_bc_peers(tipc_bc_sndlink(net));
+
+ bb->bc_threshold = 1 + (cluster_size * bb->rc_ratio / 100);
}
/* tipc_bcbase_select_primary(): find a bearer with links to all destinations,
@@ -79,9 +99,10 @@ static void tipc_bcbase_select_primary(struct net *net)
{
struct tipc_bc_base *bb = tipc_bc_base(net);
int all_dests = tipc_link_bc_peers(bb->link);
- int i, mtu;
+ int i, mtu, prim;
bb->primary_bearer = INVALID_BEARER_ID;
+ bb->bcast_support = true;
if (!all_dests)
return;
@@ -93,7 +114,7 @@ static void tipc_bcbase_select_primary(struct net *net)
mtu = tipc_bearer_mtu(net, i);
if (mtu < tipc_link_mtu(bb->link))
tipc_link_set_mtu(bb->link, mtu);
-
+ bb->bcast_support &= tipc_bearer_bcast_support(net, i);
if (bb->dests[i] < all_dests)
continue;
@@ -103,6 +124,9 @@ static void tipc_bcbase_select_primary(struct net *net)
if ((i ^ tipc_own_addr(net)) & 1)
break;
}
+ prim = bb->primary_bearer;
+ if (prim != INVALID_BEARER_ID)
+ bb->bcast_support = tipc_bearer_bcast_support(net, prim);
}
void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id)
@@ -170,42 +194,128 @@ static void tipc_bcbase_xmit(struct net *net, struct sk_buff_head *xmitq)
__skb_queue_purge(&_xmitq);
}
-/* tipc_bcast_xmit - deliver buffer chain to all nodes in cluster
- * and to identified node local sockets
+static void tipc_bcast_select_xmit_method(struct net *net, int dests,
+ struct tipc_mc_method *method)
+{
+ struct tipc_bc_base *bb = tipc_bc_base(net);
+ unsigned long exp = method->expires;
+
+ /* Broadcast supported by used bearer/bearers? */
+ if (!bb->bcast_support) {
+ method->rcast = true;
+ return;
+ }
+ /* Any destinations which don't support replicast ? */
+ if (!bb->rcast_support) {
+ method->rcast = false;
+ return;
+ }
+ /* Can current method be changed ? */
+ method->expires = jiffies + TIPC_METHOD_EXPIRE;
+ if (method->mandatory || time_before(jiffies, exp))
+ return;
+
+ /* Determine method to use now */
+ method->rcast = dests <= bb->bc_threshold;
+}
+
+/* tipc_bcast_xmit - broadcast the buffer chain to all external nodes
* @net: the applicable net namespace
- * @list: chain of buffers containing message
+ * @pkts: chain of buffers containing message
+ * @cong_link_cnt: set to 1 if broadcast link is congested, otherwise 0
* Consumes the buffer chain.
- * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
+ * Returns 0 if success, otherwise errno: -EHOSTUNREACH,-EMSGSIZE
*/
-int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list)
+static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ u16 *cong_link_cnt)
{
struct tipc_link *l = tipc_bc_sndlink(net);
- struct sk_buff_head xmitq, inputq, rcvq;
+ struct sk_buff_head xmitq;
int rc = 0;
- __skb_queue_head_init(&rcvq);
__skb_queue_head_init(&xmitq);
- skb_queue_head_init(&inputq);
-
- /* Prepare message clone for local node */
- if (unlikely(!tipc_msg_reassemble(list, &rcvq)))
- return -EHOSTUNREACH;
-
tipc_bcast_lock(net);
if (tipc_link_bc_peers(l))
- rc = tipc_link_xmit(l, list, &xmitq);
+ rc = tipc_link_xmit(l, pkts, &xmitq);
tipc_bcast_unlock(net);
+ tipc_bcbase_xmit(net, &xmitq);
+ __skb_queue_purge(pkts);
+ if (rc == -ELINKCONG) {
+ *cong_link_cnt = 1;
+ rc = 0;
+ }
+ return rc;
+}
- /* Don't send to local node if adding to link failed */
- if (unlikely(rc && (rc != -ELINKCONG))) {
- __skb_queue_purge(&rcvq);
- return rc;
+/* tipc_rcast_xmit - replicate and send a message to given destination nodes
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @dests: list of destination nodes
+ * @cong_link_cnt: returns number of congested links
+ * @cong_links: returns identities of congested links
+ * Returns 0 if success, otherwise errno
+ */
+static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ struct tipc_nlist *dests, u16 *cong_link_cnt)
+{
+ struct sk_buff_head _pkts;
+ struct u32_item *n, *tmp;
+ u32 dst, selector;
+
+ selector = msg_link_selector(buf_msg(skb_peek(pkts)));
+ __skb_queue_head_init(&_pkts);
+
+ list_for_each_entry_safe(n, tmp, &dests->list, list) {
+ dst = n->value;
+ if (!tipc_msg_pskb_copy(dst, pkts, &_pkts))
+ return -ENOMEM;
+
+ /* Any other return value than -ELINKCONG is ignored */
+ if (tipc_node_xmit(net, &_pkts, dst, selector) == -ELINKCONG)
+ (*cong_link_cnt)++;
}
+ return 0;
+}
- /* Broadcast to all nodes, inluding local node */
- tipc_bcbase_xmit(net, &xmitq);
- tipc_sk_mcast_rcv(net, &rcvq, &inputq);
- __skb_queue_purge(list);
+/* tipc_mcast_xmit - deliver message to indicated destination nodes
+ * and to identified node local sockets
+ * @net: the applicable net namespace
+ * @pkts: chain of buffers containing message
+ * @method: send method to be used
+ * @dests: destination nodes for message.
+ * @cong_link_cnt: returns number of encountered congested destination links
+ * Consumes buffer chain.
+ * Returns 0 if success, otherwise errno
+ */
+int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ struct tipc_mc_method *method, struct tipc_nlist *dests,
+ u16 *cong_link_cnt)
+{
+ struct sk_buff_head inputq, localq;
+ int rc = 0;
+
+ skb_queue_head_init(&inputq);
+ skb_queue_head_init(&localq);
+
+ /* Clone packets before they are consumed by next call */
+ if (dests->local && !tipc_msg_reassemble(pkts, &localq)) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+ /* Send according to determined transmit method */
+ if (dests->remote) {
+ tipc_bcast_select_xmit_method(net, dests->remote, method);
+ if (method->rcast)
+ rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt);
+ else
+ rc = tipc_bcast_xmit(net, pkts, cong_link_cnt);
+ }
+
+ if (dests->local)
+ tipc_sk_mcast_rcv(net, &localq, &inputq);
+exit:
+ /* This queue should normally be empty by now */
+ __skb_queue_purge(pkts);
return rc;
}
@@ -313,6 +423,7 @@ void tipc_bcast_add_peer(struct net *net, struct tipc_link *uc_l,
tipc_bcast_lock(net);
tipc_link_add_bc_peer(snd_l, uc_l, xmitq);
tipc_bcbase_select_primary(net);
+ tipc_bcbase_calc_bc_threshold(net);
tipc_bcast_unlock(net);
}
@@ -331,6 +442,7 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_l)
tipc_bcast_lock(net);
tipc_link_remove_bc_peer(snd_l, rcv_l, &xmitq);
tipc_bcbase_select_primary(net);
+ tipc_bcbase_calc_bc_threshold(net);
tipc_bcast_unlock(net);
tipc_bcbase_xmit(net, &xmitq);
@@ -413,6 +525,8 @@ int tipc_bcast_init(struct net *net)
goto enomem;
bb->link = l;
tn->bcl = l;
+ bb->rc_ratio = 25;
+ bb->rcast_support = true;
return 0;
enomem:
kfree(bb);
@@ -428,3 +542,33 @@ void tipc_bcast_stop(struct net *net)
kfree(tn->bcbase);
kfree(tn->bcl);
}
+
+void tipc_nlist_init(struct tipc_nlist *nl, u32 self)
+{
+ memset(nl, 0, sizeof(*nl));
+ INIT_LIST_HEAD(&nl->list);
+ nl->self = self;
+}
+
+void tipc_nlist_add(struct tipc_nlist *nl, u32 node)
+{
+ if (node == nl->self)
+ nl->local = true;
+ else if (u32_push(&nl->list, node))
+ nl->remote++;
+}
+
+void tipc_nlist_del(struct tipc_nlist *nl, u32 node)
+{
+ if (node == nl->self)
+ nl->local = false;
+ else if (u32_del(&nl->list, node))
+ nl->remote--;
+}
+
+void tipc_nlist_purge(struct tipc_nlist *nl)
+{
+ u32_list_purge(&nl->list);
+ nl->remote = 0;
+ nl->local = 0;
+}
diff --git a/net/tipc/bcast.h b/net/tipc/bcast.h
index 855d53c..751530a 100644
--- a/net/tipc/bcast.h
+++ b/net/tipc/bcast.h
@@ -42,9 +42,35 @@
struct tipc_node;
struct tipc_msg;
struct tipc_nl_msg;
-struct tipc_node_map;
+struct tipc_nlist;
+struct tipc_nitem;
extern const char tipc_bclink_name[];
+#define TIPC_METHOD_EXPIRE msecs_to_jiffies(5000)
+
+struct tipc_nlist {
+ struct list_head list;
+ u32 self;
+ u16 remote;
+ bool local;
+};
+
+void tipc_nlist_init(struct tipc_nlist *nl, u32 self);
+void tipc_nlist_purge(struct tipc_nlist *nl);
+void tipc_nlist_add(struct tipc_nlist *nl, u32 node);
+void tipc_nlist_del(struct tipc_nlist *nl, u32 node);
+
+/* Cookie to be used between socket and broadcast layer
+ * @rcast: replicast (instead of broadcast) was used at previous xmit
+ * @mandatory: broadcast/replicast indication was set by user
+ * @expires: re-evaluate non-mandatory transmit method if we are past this
+ */
+struct tipc_mc_method {
+ bool rcast;
+ bool mandatory;
+ unsigned long expires;
+};
+
int tipc_bcast_init(struct net *net);
void tipc_bcast_stop(struct net *net);
void tipc_bcast_add_peer(struct net *net, struct tipc_link *l,
@@ -53,7 +79,10 @@ void tipc_bcast_remove_peer(struct net *net, struct tipc_link *rcv_bcl);
void tipc_bcast_inc_bearer_dst_cnt(struct net *net, int bearer_id);
void tipc_bcast_dec_bearer_dst_cnt(struct net *net, int bearer_id);
int tipc_bcast_get_mtu(struct net *net);
-int tipc_bcast_xmit(struct net *net, struct sk_buff_head *list);
+void tipc_bcast_disable_rcast(struct net *net);
+int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
+ struct tipc_mc_method *method, struct tipc_nlist *dests,
+ u16 *cong_link_cnt);
int tipc_bcast_rcv(struct net *net, struct tipc_link *l, struct sk_buff *skb);
void tipc_bcast_ack_rcv(struct net *net, struct tipc_link *l,
struct tipc_msg *hdr);
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 52d7476..33a5bdf 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -431,7 +431,7 @@ int tipc_enable_l2_media(struct net *net, struct tipc_bearer *b,
memset(&b->bcast_addr, 0, sizeof(b->bcast_addr));
memcpy(b->bcast_addr.value, dev->broadcast, b->media->hwaddr_len);
b->bcast_addr.media_id = b->media->type_id;
- b->bcast_addr.broadcast = 1;
+ b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
b->mtu = dev->mtu;
b->media->raw2addr(b, &b->addr, (char *)dev->dev_addr);
rcu_assign_pointer(dev->tipc_ptr, b);
@@ -482,6 +482,19 @@ int tipc_l2_send_msg(struct net *net, struct sk_buff *skb,
return 0;
}
+bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id)
+{
+ bool supp = false;
+ struct tipc_bearer *b;
+
+ rcu_read_lock();
+ b = bearer_get(net, bearer_id);
+ if (b)
+ supp = (b->bcast_addr.broadcast == TIPC_BROADCAST_SUPPORT);
+ rcu_read_unlock();
+ return supp;
+}
+
int tipc_bearer_mtu(struct net *net, u32 bearer_id)
{
int mtu = 0;
diff --git a/net/tipc/bearer.h b/net/tipc/bearer.h
index 278ff7f..635c908 100644
--- a/net/tipc/bearer.h
+++ b/net/tipc/bearer.h
@@ -60,9 +60,14 @@
#define TIPC_MEDIA_TYPE_IB 2
#define TIPC_MEDIA_TYPE_UDP 3
-/* minimum bearer MTU */
+/* Minimum bearer MTU */
#define TIPC_MIN_BEARER_MTU (MAX_H_SIZE + INT_H_SIZE)
+/* Identifiers for distinguishing between broadcast/multicast and replicast
+ */
+#define TIPC_BROADCAST_SUPPORT 1
+#define TIPC_REPLICAST_SUPPORT 2
+
/**
* struct tipc_media_addr - destination address used by TIPC bearers
* @value: address info (format defined by media)
@@ -210,6 +215,7 @@ int tipc_bearer_setup(void);
void tipc_bearer_cleanup(void);
void tipc_bearer_stop(struct net *net);
int tipc_bearer_mtu(struct net *net, u32 bearer_id);
+bool tipc_bearer_bcast_support(struct net *net, u32 bearer_id);
void tipc_bearer_xmit_skb(struct net *net, u32 bearer_id,
struct sk_buff *skb,
struct tipc_media_addr *dest);
diff --git a/net/tipc/discover.c b/net/tipc/discover.c
index 6b109a8..02462d6 100644
--- a/net/tipc/discover.c
+++ b/net/tipc/discover.c
@@ -169,7 +169,7 @@ void tipc_disc_rcv(struct net *net, struct sk_buff *skb,
/* Send response, if necessary */
if (respond && (mtyp == DSC_REQ_MSG)) {
- rskb = tipc_buf_acquire(MAX_H_SIZE);
+ rskb = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!rskb)
return;
tipc_disc_init_msg(net, rskb, DSC_RESP_MSG, bearer);
@@ -278,7 +278,7 @@ int tipc_disc_create(struct net *net, struct tipc_bearer *b,
req = kmalloc(sizeof(*req), GFP_ATOMIC);
if (!req)
return -ENOMEM;
- req->buf = tipc_buf_acquire(MAX_H_SIZE);
+ req->buf = tipc_buf_acquire(MAX_H_SIZE, GFP_ATOMIC);
if (!req->buf) {
kfree(req);
return -ENOMEM;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index b758ca8..ddd2dd6f 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -515,6 +515,10 @@ bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
if (link_is_bc_sndlink(l))
l->state = LINK_ESTABLISHED;
+ /* Disable replicast if even a single peer doesn't support it */
+ if (link_is_bc_rcvlink(l) && !(peer_caps & TIPC_BCAST_RCAST))
+ tipc_bcast_disable_rcast(net);
+
return true;
}
@@ -1032,11 +1036,17 @@ int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
struct sk_buff_head *inputq)
{
- switch (msg_user(buf_msg(skb))) {
+ struct tipc_msg *hdr = buf_msg(skb);
+
+ switch (msg_user(hdr)) {
case TIPC_LOW_IMPORTANCE:
case TIPC_MEDIUM_IMPORTANCE:
case TIPC_HIGH_IMPORTANCE:
case TIPC_CRITICAL_IMPORTANCE:
+ if (unlikely(msg_type(hdr) == TIPC_MCAST_MSG)) {
+ skb_queue_tail(l->bc_rcvlink->inputq, skb);
+ return true;
+ }
case CONN_MANAGER:
skb_queue_tail(inputq, skb);
return true;
@@ -1384,7 +1394,7 @@ tnl:
msg_set_seqno(hdr, seqno++);
pktlen = msg_size(hdr);
msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
- tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
+ tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE, GFP_ATOMIC);
if (!tnlskb) {
pr_warn("%sunable to send packet\n", link_co_err);
return;
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index a22be50..312ef7d 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -58,12 +58,12 @@ static unsigned int align(unsigned int i)
* NOTE: Headroom is reserved to allow prepending of a data link header.
* There may also be unrequested tailroom present at the buffer's end.
*/
-struct sk_buff *tipc_buf_acquire(u32 size)
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp)
{
struct sk_buff *skb;
unsigned int buf_size = (BUF_HEADROOM + size + 3) & ~3u;
- skb = alloc_skb_fclone(buf_size, GFP_ATOMIC);
+ skb = alloc_skb_fclone(buf_size, gfp);
if (skb) {
skb_reserve(skb, BUF_HEADROOM);
skb_put(skb, size);
@@ -95,7 +95,7 @@ struct sk_buff *tipc_msg_create(uint user, uint type,
struct tipc_msg *msg;
struct sk_buff *buf;
- buf = tipc_buf_acquire(hdr_sz + data_sz);
+ buf = tipc_buf_acquire(hdr_sz + data_sz, GFP_ATOMIC);
if (unlikely(!buf))
return NULL;
@@ -261,7 +261,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
/* No fragmentation needed? */
if (likely(msz <= pktmax)) {
- skb = tipc_buf_acquire(msz);
+ skb = tipc_buf_acquire(msz, GFP_KERNEL);
if (unlikely(!skb))
return -ENOMEM;
skb_orphan(skb);
@@ -282,7 +282,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
msg_set_importance(&pkthdr, msg_importance(mhdr));
/* Prepare first fragment */
- skb = tipc_buf_acquire(pktmax);
+ skb = tipc_buf_acquire(pktmax, GFP_KERNEL);
if (!skb)
return -ENOMEM;
skb_orphan(skb);
@@ -313,7 +313,7 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
pktsz = drem + INT_H_SIZE;
else
pktsz = pktmax;
- skb = tipc_buf_acquire(pktsz);
+ skb = tipc_buf_acquire(pktsz, GFP_KERNEL);
if (!skb) {
rc = -ENOMEM;
goto error;
@@ -448,7 +448,7 @@ bool tipc_msg_make_bundle(struct sk_buff **skb, struct tipc_msg *msg,
if (msz > (max / 2))
return false;
- _skb = tipc_buf_acquire(max);
+ _skb = tipc_buf_acquire(max, GFP_ATOMIC);
if (!_skb)
return false;
@@ -496,7 +496,7 @@ bool tipc_msg_reverse(u32 own_node, struct sk_buff **skb, int err)
/* Never return SHORT header; expand by replacing buffer if necessary */
if (msg_short(hdr)) {
- *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen);
+ *skb = tipc_buf_acquire(BASIC_H_SIZE + dlen, GFP_ATOMIC);
if (!*skb)
goto exit;
memcpy((*skb)->data + BASIC_H_SIZE, msg_data(hdr), dlen);
@@ -607,6 +607,23 @@ error:
return false;
}
+bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
+ struct sk_buff_head *cpy)
+{
+ struct sk_buff *skb, *_skb;
+
+ skb_queue_walk(msg, skb) {
+ _skb = pskb_copy(skb, GFP_ATOMIC);
+ if (!_skb) {
+ __skb_queue_purge(cpy);
+ return false;
+ }
+ msg_set_destnode(buf_msg(_skb), dst);
+ __skb_queue_tail(cpy, _skb);
+ }
+ return true;
+}
+
/* tipc_skb_queue_sorted(); sort pkt into list according to sequence number
* @list: list to be appended to
* @seqno: sequence number of buffer to add
diff --git a/net/tipc/msg.h b/net/tipc/msg.h
index 850ae0e..c843fd2 100644
--- a/net/tipc/msg.h
+++ b/net/tipc/msg.h
@@ -631,14 +631,11 @@ static inline void msg_set_bc_netid(struct tipc_msg *m, u32 id)
static inline u32 msg_link_selector(struct tipc_msg *m)
{
+ if (msg_user(m) == MSG_FRAGMENTER)
+ m = (void *)msg_data(m);
return msg_bits(m, 4, 0, 1);
}
-static inline void msg_set_link_selector(struct tipc_msg *m, u32 n)
-{
- msg_set_bits(m, 4, 0, 1, n);
-}
-
/*
* Word 5
*/
@@ -818,7 +815,7 @@ static inline bool msg_is_reset(struct tipc_msg *hdr)
return (msg_user(hdr) == LINK_PROTOCOL) && (msg_type(hdr) == RESET_MSG);
}
-struct sk_buff *tipc_buf_acquire(u32 size);
+struct sk_buff *tipc_buf_acquire(u32 size, gfp_t gfp);
bool tipc_msg_validate(struct sk_buff *skb);
bool tipc_msg_reverse(u32 own_addr, struct sk_buff **skb, int err);
void tipc_msg_init(u32 own_addr, struct tipc_msg *m, u32 user, u32 type,
@@ -835,6 +832,8 @@ int tipc_msg_build(struct tipc_msg *mhdr, struct msghdr *m,
int offset, int dsz, int mtu, struct sk_buff_head *list);
bool tipc_msg_lookup_dest(struct net *net, struct sk_buff *skb, int *err);
bool tipc_msg_reassemble(struct sk_buff_head *list, struct sk_buff_head *rcvq);
+bool tipc_msg_pskb_copy(u32 dst, struct sk_buff_head *msg,
+ struct sk_buff_head *cpy);
void __tipc_skb_queue_sorted(struct sk_buff_head *list, u16 seqno,
struct sk_buff *skb);
diff --git a/net/tipc/name_distr.c b/net/tipc/name_distr.c
index c1cfd92..23f8899 100644
--- a/net/tipc/name_distr.c
+++ b/net/tipc/name_distr.c
@@ -69,7 +69,7 @@ static struct sk_buff *named_prepare_buf(struct net *net, u32 type, u32 size,
u32 dest)
{
struct tipc_net *tn = net_generic(net, tipc_net_id);
- struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size);
+ struct sk_buff *buf = tipc_buf_acquire(INT_H_SIZE + size, GFP_ATOMIC);
struct tipc_msg *msg;
if (buf != NULL) {
diff --git a/net/tipc/name_table.c b/net/tipc/name_table.c
index 5a86df1..9be6592 100644
--- a/net/tipc/name_table.c
+++ b/net/tipc/name_table.c
@@ -645,6 +645,39 @@ exit:
return res;
}
+/* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes
+ * - Creates list of nodes that overlap the given multicast address
+ * - Determines if any node local ports overlap
+ */
+void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
+ u32 upper, u32 domain,
+ struct tipc_nlist *nodes)
+{
+ struct sub_seq *sseq, *stop;
+ struct publication *publ;
+ struct name_info *info;
+ struct name_seq *seq;
+
+ rcu_read_lock();
+ seq = nametbl_find_seq(net, type);
+ if (!seq)
+ goto exit;
+
+ spin_lock_bh(&seq->lock);
+ sseq = seq->sseqs + nameseq_locate_subseq(seq, lower);
+ stop = seq->sseqs + seq->first_free;
+ for (; sseq->lower <= upper && sseq != stop; sseq++) {
+ info = sseq->info;
+ list_for_each_entry(publ, &info->zone_list, zone_list) {
+ if (tipc_in_scope(domain, publ->node))
+ tipc_nlist_add(nodes, publ->node);
+ }
+ }
+ spin_unlock_bh(&seq->lock);
+exit:
+ rcu_read_unlock();
+}
+
/*
* tipc_nametbl_publish - add name publication to network name tables
*/
@@ -1022,11 +1055,6 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb)
return skb->len;
}
-struct u32_item {
- struct list_head list;
- u32 value;
-};
-
bool u32_find(struct list_head *l, u32 value)
{
struct u32_item *item;
diff --git a/net/tipc/name_table.h b/net/tipc/name_table.h
index c89bb3f..6ebdeb1 100644
--- a/net/tipc/name_table.h
+++ b/net/tipc/name_table.h
@@ -39,6 +39,7 @@
struct tipc_subscription;
struct tipc_plist;
+struct tipc_nlist;
/*
* TIPC name types reserved for internal TIPC use (both current and planned)
@@ -100,6 +101,9 @@ int tipc_nl_name_table_dump(struct sk_buff *skb, struct netlink_callback *cb);
u32 tipc_nametbl_translate(struct net *net, u32 type, u32 instance, u32 *node);
int tipc_nametbl_mc_translate(struct net *net, u32 type, u32 lower, u32 upper,
u32 limit, struct list_head *dports);
+void tipc_nametbl_lookup_dst_nodes(struct net *net, u32 type, u32 lower,
+ u32 upper, u32 domain,
+ struct tipc_nlist *nodes);
struct publication *tipc_nametbl_publish(struct net *net, u32 type, u32 lower,
u32 upper, u32 scope, u32 port_ref,
u32 key);
@@ -116,6 +120,11 @@ void tipc_nametbl_unsubscribe(struct tipc_subscription *s);
int tipc_nametbl_init(struct net *net);
void tipc_nametbl_stop(struct net *net);
+struct u32_item {
+ struct list_head list;
+ u32 value;
+};
+
bool u32_push(struct list_head *l, u32 value);
u32 u32_pop(struct list_head *l);
bool u32_find(struct list_head *l, u32 value);
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 2883f6a..e9295fa 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -263,6 +263,11 @@ static void tipc_node_write_lock(struct tipc_node *n)
write_lock_bh(&n->lock);
}
+static void tipc_node_write_unlock_fast(struct tipc_node *n)
+{
+ write_unlock_bh(&n->lock);
+}
+
static void tipc_node_write_unlock(struct tipc_node *n)
{
struct net *net = n->net;
@@ -417,7 +422,7 @@ void tipc_node_subscribe(struct net *net, struct list_head *subscr, u32 addr)
}
tipc_node_write_lock(n);
list_add_tail(subscr, &n->publ_list);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
@@ -435,7 +440,7 @@ void tipc_node_unsubscribe(struct net *net, struct list_head *subscr, u32 addr)
}
tipc_node_write_lock(n);
list_del_init(subscr);
- tipc_node_write_unlock(n);
+ tipc_node_write_unlock_fast(n);
tipc_node_put(n);
}
@@ -1257,6 +1262,19 @@ void tipc_node_broadcast(struct net *net, struct sk_buff *skb)
kfree_skb(skb);
}
+static void tipc_node_mcast_rcv(struct tipc_node *n)
+{
+ struct tipc_bclink_entry *be = &n->bc_entry;
+
+ /* 'arrvq' is under inputq2's lock protection */
+ spin_lock_bh(&be->inputq2.lock);
+ spin_lock_bh(&be->inputq1.lock);
+ skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
+ spin_unlock_bh(&be->inputq1.lock);
+ spin_unlock_bh(&be->inputq2.lock);
+ tipc_sk_mcast_rcv(n->net, &be->arrvq, &be->inputq2);
+}
+
static void tipc_node_bc_sync_rcv(struct tipc_node *n, struct tipc_msg *hdr,
int bearer_id, struct sk_buff_head *xmitq)
{
@@ -1330,15 +1348,8 @@ static void tipc_node_bc_rcv(struct net *net, struct sk_buff *skb, int bearer_id
if (!skb_queue_empty(&xmitq))
tipc_bearer_xmit(net, bearer_id, &xmitq, &le->maddr);
- /* Deliver. 'arrvq' is under inputq2's lock protection */
- if (!skb_queue_empty(&be->inputq1)) {
- spin_lock_bh(&be->inputq2.lock);
- spin_lock_bh(&be->inputq1.lock);
- skb_queue_splice_tail_init(&be->inputq1, &be->arrvq);
- spin_unlock_bh(&be->inputq1.lock);
- spin_unlock_bh(&be->inputq2.lock);
- tipc_sk_mcast_rcv(net, &be->arrvq, &be->inputq2);
- }
+ if (!skb_queue_empty(&be->inputq1))
+ tipc_node_mcast_rcv(n);
if (rc & TIPC_LINK_DOWN_EVT) {
/* Reception reassembly failure => reset all links to peer */
@@ -1565,6 +1576,9 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b)
if (unlikely(!skb_queue_empty(&n->bc_entry.namedq)))
tipc_named_rcv(net, &n->bc_entry.namedq);
+ if (unlikely(!skb_queue_empty(&n->bc_entry.inputq1)))
+ tipc_node_mcast_rcv(n);
+
if (!skb_queue_empty(&le->inputq))
tipc_sk_rcv(net, &le->inputq);
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 39ef54c..898c229 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -47,11 +47,13 @@
enum {
TIPC_BCAST_SYNCH = (1 << 1),
TIPC_BCAST_STATE_NACK = (1 << 2),
- TIPC_BLOCK_FLOWCTL = (1 << 3)
+ TIPC_BLOCK_FLOWCTL = (1 << 3),
+ TIPC_BCAST_RCAST = (1 << 4)
};
#define TIPC_NODE_CAPABILITIES (TIPC_BCAST_SYNCH | \
TIPC_BCAST_STATE_NACK | \
+ TIPC_BCAST_RCAST | \
TIPC_BLOCK_FLOWCTL)
#define INVALID_BEARER_ID -1
diff --git a/net/tipc/server.c b/net/tipc/server.c
index 215849c..3cd6402 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -86,12 +86,12 @@ struct outqueue_entry {
static void tipc_recv_work(struct work_struct *work);
static void tipc_send_work(struct work_struct *work);
static void tipc_clean_outqueues(struct tipc_conn *con);
-static void tipc_sock_release(struct tipc_conn *con);
static void tipc_conn_kref_release(struct kref *kref)
{
struct tipc_conn *con = container_of(kref, struct tipc_conn, kref);
- struct sockaddr_tipc *saddr = con->server->saddr;
+ struct tipc_server *s = con->server;
+ struct sockaddr_tipc *saddr = s->saddr;
struct socket *sock = con->sock;
struct sock *sk;
@@ -103,9 +103,13 @@ static void tipc_conn_kref_release(struct kref *kref)
}
saddr->scope = -TIPC_NODE_SCOPE;
kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
- tipc_sock_release(con);
sock_release(sock);
con->sock = NULL;
+
+ spin_lock_bh(&s->idr_lock);
+ idr_remove(&s->conn_idr, con->conid);
+ s->idr_in_use--;
+ spin_unlock_bh(&s->idr_lock);
}
tipc_clean_outqueues(con);
@@ -128,8 +132,10 @@ static struct tipc_conn *tipc_conn_lookup(struct tipc_server *s, int conid)
spin_lock_bh(&s->idr_lock);
con = idr_find(&s->conn_idr, conid);
- if (con)
+ if (con && test_bit(CF_CONNECTED, &con->flags))
conn_get(con);
+ else
+ con = NULL;
spin_unlock_bh(&s->idr_lock);
return con;
}
@@ -186,26 +192,15 @@ static void tipc_unregister_callbacks(struct tipc_conn *con)
write_unlock_bh(&sk->sk_callback_lock);
}
-static void tipc_sock_release(struct tipc_conn *con)
-{
- struct tipc_server *s = con->server;
-
- if (con->conid)
- s->tipc_conn_release(con->conid, con->usr_data);
-
- tipc_unregister_callbacks(con);
-}
-
static void tipc_close_conn(struct tipc_conn *con)
{
struct tipc_server *s = con->server;
if (test_and_clear_bit(CF_CONNECTED, &con->flags)) {
+ tipc_unregister_callbacks(con);
- spin_lock_bh(&s->idr_lock);
- idr_remove(&s->conn_idr, con->conid);
- s->idr_in_use--;
- spin_unlock_bh(&s->idr_lock);
+ if (con->conid)
+ s->tipc_conn_release(con->conid, con->usr_data);
/* We shouldn't flush pending works as we may be in the
* thread. In fact the races with pending rx/tx work structs
@@ -458,6 +453,11 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
if (!con)
return -EINVAL;
+ if (!test_bit(CF_CONNECTED, &con->flags)) {
+ conn_put(con);
+ return 0;
+ }
+
e = tipc_alloc_entry(data, len);
if (!e) {
conn_put(con);
@@ -471,12 +471,8 @@ int tipc_conn_sendmsg(struct tipc_server *s, int conid,
list_add_tail(&e->list, &con->outqueue);
spin_unlock_bh(&con->outqueue_lock);
- if (test_bit(CF_CONNECTED, &con->flags)) {
- if (!queue_work(s->send_wq, &con->swork))
- conn_put(con);
- } else {
+ if (!queue_work(s->send_wq, &con->swork))
conn_put(con);
- }
return 0;
}
@@ -500,7 +496,7 @@ static void tipc_send_to_sock(struct tipc_conn *con)
int ret;
spin_lock_bh(&con->outqueue_lock);
- while (1) {
+ while (test_bit(CF_CONNECTED, &con->flags)) {
e = list_entry(con->outqueue.next, struct outqueue_entry,
list);
if ((struct list_head *) e == &con->outqueue)
@@ -623,14 +619,12 @@ int tipc_server_start(struct tipc_server *s)
void tipc_server_stop(struct tipc_server *s)
{
struct tipc_conn *con;
- int total = 0;
int id;
spin_lock_bh(&s->idr_lock);
- for (id = 0; total < s->idr_in_use; id++) {
+ for (id = 0; s->idr_in_use; id++) {
con = idr_find(&s->conn_idr, id);
if (con) {
- total++;
spin_unlock_bh(&s->idr_lock);
tipc_close_conn(con);
spin_lock_bh(&s->idr_lock);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index d2f3539..103d1fd 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -79,6 +79,7 @@ enum {
* @rcv_unacked: # messages read by user, but not yet acked back to peer
* @peer: 'connected' peer for dgram/rdm
* @node: hash table node
+ * @mc_method: cookie for use between socket and broadcast layer
* @rcu: rcu struct for tipc_sock
*/
struct tipc_sock {
@@ -103,6 +104,7 @@ struct tipc_sock {
u16 rcv_win;
struct sockaddr_tipc peer;
struct rhash_head node;
+ struct tipc_mc_method mc_method;
struct rcu_head rcu;
};
@@ -740,32 +742,44 @@ static int tipc_sendmcast(struct socket *sock, struct tipc_name_seq *seq,
struct tipc_msg *hdr = &tsk->phdr;
struct net *net = sock_net(sk);
int mtu = tipc_bcast_get_mtu(net);
+ struct tipc_mc_method *method = &tsk->mc_method;
+ u32 domain = addr_domain(net, TIPC_CLUSTER_SCOPE);
struct sk_buff_head pkts;
+ struct tipc_nlist dsts;
int rc;
+ /* Block or return if any destination link is congested */
rc = tipc_wait_for_cond(sock, &timeout, !tsk->cong_link_cnt);
if (unlikely(rc))
return rc;
+ /* Lookup destination nodes */
+ tipc_nlist_init(&dsts, tipc_own_addr(net));
+ tipc_nametbl_lookup_dst_nodes(net, seq->type, seq->lower,
+ seq->upper, domain, &dsts);
+ if (!dsts.local && !dsts.remote)
+ return -EHOSTUNREACH;
+
+ /* Build message header */
msg_set_type(hdr, TIPC_MCAST_MSG);
+ msg_set_hdr_sz(hdr, MCAST_H_SIZE);
msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
msg_set_destport(hdr, 0);
msg_set_destnode(hdr, 0);
msg_set_nametype(hdr, seq->type);
msg_set_namelower(hdr, seq->lower);
msg_set_nameupper(hdr, seq->upper);
- msg_set_hdr_sz(hdr, MCAST_H_SIZE);
+ /* Build message as chain of buffers */
skb_queue_head_init(&pkts);
rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts);
- if (unlikely(rc != dlen))
- return rc;
- rc = tipc_bcast_xmit(net, &pkts);
- if (unlikely(rc == -ELINKCONG)) {
- tsk->cong_link_cnt = 1;
- rc = 0;
- }
+ /* Send message if build was successful */
+ if (unlikely(rc == dlen))
+ rc = tipc_mcast_xmit(net, &pkts, method, &dsts,
+ &tsk->cong_link_cnt);
+
+ tipc_nlist_purge(&dsts);
return rc ? rc : dlen;
}
@@ -2333,18 +2347,29 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
{
struct sock *sk = sock->sk;
struct tipc_sock *tsk = tipc_sk(sk);
- u32 value;
- int res;
+ u32 value = 0;
+ int res = 0;
if ((lvl == IPPROTO_TCP) && (sock->type == SOCK_STREAM))
return 0;
if (lvl != SOL_TIPC)
return -ENOPROTOOPT;
- if (ol < sizeof(value))
- return -EINVAL;
- res = get_user(value, (u32 __user *)ov);
- if (res)
- return res;
+
+ switch (opt) {
+ case TIPC_IMPORTANCE:
+ case TIPC_SRC_DROPPABLE:
+ case TIPC_DEST_DROPPABLE:
+ case TIPC_CONN_TIMEOUT:
+ if (ol < sizeof(value))
+ return -EINVAL;
+ res = get_user(value, (u32 __user *)ov);
+ if (res)
+ return res;
+ break;
+ default:
+ if (ov || ol)
+ return -EINVAL;
+ }
lock_sock(sk);
@@ -2363,7 +2388,14 @@ static int tipc_setsockopt(struct socket *sock, int lvl, int opt,
break;
case TIPC_CONN_TIMEOUT:
tipc_sk(sk)->conn_timeout = value;
- /* no need to set "res", since already 0 at this point */
+ break;
+ case TIPC_MCAST_BROADCAST:
+ tsk->mc_method.rcast = false;
+ tsk->mc_method.mandatory = true;
+ break;
+ case TIPC_MCAST_REPLICAST:
+ tsk->mc_method.rcast = true;
+ tsk->mc_method.mandatory = true;
break;
default:
res = -EINVAL;
diff --git a/net/tipc/subscr.c b/net/tipc/subscr.c
index 0dd0224..9d94e65 100644
--- a/net/tipc/subscr.c
+++ b/net/tipc/subscr.c
@@ -54,6 +54,8 @@ struct tipc_subscriber {
static void tipc_subscrp_delete(struct tipc_subscription *sub);
static void tipc_subscrb_put(struct tipc_subscriber *subscriber);
+static void tipc_subscrp_put(struct tipc_subscription *subscription);
+static void tipc_subscrp_get(struct tipc_subscription *subscription);
/**
* htohl - convert value to endianness used by destination
@@ -123,6 +125,7 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
{
struct tipc_name_seq seq;
+ tipc_subscrp_get(sub);
tipc_subscrp_convert_seq(&sub->evt.s.seq, sub->swap, &seq);
if (!tipc_subscrp_check_overlap(&seq, found_lower, found_upper))
return;
@@ -132,30 +135,23 @@ void tipc_subscrp_report_overlap(struct tipc_subscription *sub, u32 found_lower,
tipc_subscrp_send_event(sub, found_lower, found_upper, event, port_ref,
node);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_timeout(unsigned long data)
{
struct tipc_subscription *sub = (struct tipc_subscription *)data;
- struct tipc_subscriber *subscriber = sub->subscriber;
/* Notify subscriber of timeout */
tipc_subscrp_send_event(sub, sub->evt.s.seq.lower, sub->evt.s.seq.upper,
TIPC_SUBSCR_TIMEOUT, 0, 0);
- spin_lock_bh(&subscriber->lock);
- tipc_subscrp_delete(sub);
- spin_unlock_bh(&subscriber->lock);
-
- tipc_subscrb_put(subscriber);
+ tipc_subscrp_put(sub);
}
static void tipc_subscrb_kref_release(struct kref *kref)
{
- struct tipc_subscriber *subcriber = container_of(kref,
- struct tipc_subscriber, kref);
-
- kfree(subcriber);
+ kfree(container_of(kref,struct tipc_subscriber, kref));
}
static void tipc_subscrb_put(struct tipc_subscriber *subscriber)
@@ -168,6 +164,59 @@ static void tipc_subscrb_get(struct tipc_subscriber *subscriber)
kref_get(&subscriber->kref);
}
+static void tipc_subscrp_kref_release(struct kref *kref)
+{
+ struct tipc_subscription *sub = container_of(kref,
+ struct tipc_subscription,
+ kref);
+ struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ struct tipc_subscriber *subscriber = sub->subscriber;
+
+ spin_lock_bh(&subscriber->lock);
+ tipc_nametbl_unsubscribe(sub);
+ list_del(&sub->subscrp_list);
+ atomic_dec(&tn->subscription_count);
+ spin_unlock_bh(&subscriber->lock);
+ kfree(sub);
+ tipc_subscrb_put(subscriber);
+}
+
+static void tipc_subscrp_put(struct tipc_subscription *subscription)
+{
+ kref_put(&subscription->kref, tipc_subscrp_kref_release);
+}
+
+static void tipc_subscrp_get(struct tipc_subscription *subscription)
+{
+ kref_get(&subscription->kref);
+}
+
+/* tipc_subscrb_subscrp_delete - delete a specific subscription or all
+ * subscriptions for a given subscriber.
+ */
+static void tipc_subscrb_subscrp_delete(struct tipc_subscriber *subscriber,
+ struct tipc_subscr *s)
+{
+ struct list_head *subscription_list = &subscriber->subscrp_list;
+ struct tipc_subscription *sub, *temp;
+
+ spin_lock_bh(&subscriber->lock);
+ list_for_each_entry_safe(sub, temp, subscription_list, subscrp_list) {
+ if (s && memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr)))
+ continue;
+
+ tipc_subscrp_get(sub);
+ spin_unlock_bh(&subscriber->lock);
+ tipc_subscrp_delete(sub);
+ tipc_subscrp_put(sub);
+ spin_lock_bh(&subscriber->lock);
+
+ if (s)
+ break;
+ }
+ spin_unlock_bh(&subscriber->lock);
+}
+
static struct tipc_subscriber *tipc_subscrb_create(int conid)
{
struct tipc_subscriber *subscriber;
@@ -177,8 +226,8 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
pr_warn("Subscriber rejected, no memory\n");
return NULL;
}
- kref_init(&subscriber->kref);
INIT_LIST_HEAD(&subscriber->subscrp_list);
+ kref_init(&subscriber->kref);
subscriber->conid = conid;
spin_lock_init(&subscriber->lock);
@@ -187,55 +236,22 @@ static struct tipc_subscriber *tipc_subscrb_create(int conid)
static void tipc_subscrb_delete(struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Destroy any existing subscriptions for subscriber */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) || del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- }
- spin_unlock_bh(&subscriber->lock);
-
+ tipc_subscrb_subscrp_delete(subscriber, NULL);
tipc_subscrb_put(subscriber);
}
static void tipc_subscrp_delete(struct tipc_subscription *sub)
{
- struct tipc_net *tn = net_generic(sub->net, tipc_net_id);
+ u32 timeout = htohl(sub->evt.s.timeout, sub->swap);
- tipc_nametbl_unsubscribe(sub);
- list_del(&sub->subscrp_list);
- kfree(sub);
- atomic_dec(&tn->subscription_count);
+ if (timeout == TIPC_WAIT_FOREVER || del_timer(&sub->timer))
+ tipc_subscrp_put(sub);
}
static void tipc_subscrp_cancel(struct tipc_subscr *s,
struct tipc_subscriber *subscriber)
{
- struct tipc_subscription *sub, *temp;
- u32 timeout;
-
- spin_lock_bh(&subscriber->lock);
- /* Find first matching subscription, exit if not found */
- list_for_each_entry_safe(sub, temp, &subscriber->subscrp_list,
- subscrp_list) {
- if (!memcmp(s, &sub->evt.s, sizeof(struct tipc_subscr))) {
- timeout = htohl(sub->evt.s.timeout, sub->swap);
- if ((timeout == TIPC_WAIT_FOREVER) ||
- del_timer(&sub->timer)) {
- tipc_subscrp_delete(sub);
- tipc_subscrb_put(subscriber);
- }
- break;
- }
- }
- spin_unlock_bh(&subscriber->lock);
+ tipc_subscrb_subscrp_delete(subscriber, s);
}
static struct tipc_subscription *tipc_subscrp_create(struct net *net,
@@ -272,6 +288,7 @@ static struct tipc_subscription *tipc_subscrp_create(struct net *net,
sub->swap = swap;
memcpy(&sub->evt.s, s, sizeof(*s));
atomic_inc(&tn->subscription_count);
+ kref_init(&sub->kref);
return sub;
}
@@ -288,17 +305,16 @@ static void tipc_subscrp_subscribe(struct net *net, struct tipc_subscr *s,
spin_lock_bh(&subscriber->lock);
list_add(&sub->subscrp_list, &subscriber->subscrp_list);
- tipc_subscrb_get(subscriber);
sub->subscriber = subscriber;
tipc_nametbl_subscribe(sub);
+ tipc_subscrb_get(subscriber);
spin_unlock_bh(&subscriber->lock);
+ setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
timeout = htohl(sub->evt.s.timeout, swap);
- if (timeout == TIPC_WAIT_FOREVER)
- return;
- setup_timer(&sub->timer, tipc_subscrp_timeout, (unsigned long)sub);
- mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
+ if (timeout != TIPC_WAIT_FOREVER)
+ mod_timer(&sub->timer, jiffies + msecs_to_jiffies(timeout));
}
/* Handle one termination request for the subscriber */
diff --git a/net/tipc/subscr.h b/net/tipc/subscr.h
index be60103..ffdc214 100644
--- a/net/tipc/subscr.h
+++ b/net/tipc/subscr.h
@@ -57,6 +57,7 @@ struct tipc_subscriber;
* @evt: template for events generated by subscription
*/
struct tipc_subscription {
+ struct kref kref;
struct tipc_subscriber *subscriber;
struct net *net;
struct timer_list timer;
diff --git a/net/tipc/udp_media.c b/net/tipc/udp_media.c
index b58dc95..46061cf 100644
--- a/net/tipc/udp_media.c
+++ b/net/tipc/udp_media.c
@@ -113,7 +113,7 @@ static void tipc_udp_media_addr_set(struct tipc_media_addr *addr,
memcpy(addr->value, ua, sizeof(struct udp_media_addr));
if (tipc_udp_is_mcast_addr(ua))
- addr->broadcast = 1;
+ addr->broadcast = TIPC_BROADCAST_SUPPORT;
}
/* tipc_udp_addr2str - convert ip/udp address to string */
@@ -229,7 +229,7 @@ static int tipc_udp_send_msg(struct net *net, struct sk_buff *skb,
goto out;
}
- if (!addr->broadcast || list_empty(&ub->rcast.list))
+ if (addr->broadcast != TIPC_REPLICAST_SUPPORT)
return tipc_udp_xmit(net, skb, ub, src, dst);
/* Replicast, send an skb to each configured IP address */
@@ -296,7 +296,7 @@ static int tipc_udp_rcast_add(struct tipc_bearer *b,
else if (ntohs(addr->proto) == ETH_P_IPV6)
pr_info("New replicast peer: %pI6\n", &rcast->addr.ipv6);
#endif
-
+ b->bcast_addr.broadcast = TIPC_REPLICAST_SUPPORT;
list_add_rcu(&rcast->list, &ub->rcast.list);
return 0;
}
@@ -681,7 +681,7 @@ static int tipc_udp_enable(struct net *net, struct tipc_bearer *b,
goto err;
b->bcast_addr.media_id = TIPC_MEDIA_TYPE_UDP;
- b->bcast_addr.broadcast = 1;
+ b->bcast_addr.broadcast = TIPC_BROADCAST_SUPPORT;
rcu_assign_pointer(b->media_ptr, ub);
rcu_assign_pointer(ub->bearer, b);
tipc_udp_media_addr_set(&b->addr, &local);
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 127656e..cef7987 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -995,6 +995,7 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
unsigned int hash;
struct unix_address *addr;
struct hlist_head *list;
+ struct path path = { NULL, NULL };
err = -EINVAL;
if (sunaddr->sun_family != AF_UNIX)
@@ -1010,9 +1011,20 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
goto out;
addr_len = err;
+ if (sun_path[0]) {
+ umode_t mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current_umask());
+ err = unix_mknod(sun_path, mode, &path);
+ if (err) {
+ if (err == -EEXIST)
+ err = -EADDRINUSE;
+ goto out;
+ }
+ }
+
err = mutex_lock_interruptible(&u->bindlock);
if (err)
- goto out;
+ goto out_put;
err = -EINVAL;
if (u->addr)
@@ -1029,16 +1041,6 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
atomic_set(&addr->refcnt, 1);
if (sun_path[0]) {
- struct path path;
- umode_t mode = S_IFSOCK |
- (SOCK_INODE(sock)->i_mode & ~current_umask());
- err = unix_mknod(sun_path, mode, &path);
- if (err) {
- if (err == -EEXIST)
- err = -EADDRINUSE;
- unix_release_addr(addr);
- goto out_up;
- }
addr->hash = UNIX_HASH_SIZE;
hash = d_backing_inode(path.dentry)->i_ino & (UNIX_HASH_SIZE - 1);
spin_lock(&unix_table_lock);
@@ -1065,6 +1067,9 @@ out_unlock:
spin_unlock(&unix_table_lock);
out_up:
mutex_unlock(&u->bindlock);
+out_put:
+ if (err)
+ path_put(&path);
out:
return err;
}
diff --git a/net/wireless/Makefile b/net/wireless/Makefile
index 816c933..d06e501 100644
--- a/net/wireless/Makefile
+++ b/net/wireless/Makefile
@@ -11,6 +11,7 @@ obj-$(CONFIG_WEXT_PRIV) += wext-priv.o
cfg80211-y += core.o sysfs.o radiotap.o util.o reg.o scan.o nl80211.o
cfg80211-y += mlme.o ibss.o sme.o chan.o ethtool.o mesh.o ap.o trace.o ocb.o
+cfg80211-$(CONFIG_OF) += of.o
cfg80211-$(CONFIG_CFG80211_DEBUGFS) += debugfs.o
cfg80211-$(CONFIG_CFG80211_WEXT) += wext-compat.o wext-sme.o
cfg80211-$(CONFIG_CFG80211_INTERNAL_REGDB) += regdb.o
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 158c59e..903fc419 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -1142,6 +1142,8 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
wdev->iftype == NL80211_IFTYPE_ADHOC) && !wdev->use_4addr)
dev->priv_flags |= IFF_DONT_BRIDGE;
+ INIT_WORK(&wdev->disconnect_wk, cfg80211_autodisconnect_wk);
+
nl80211_notify_iface(rdev, wdev, NL80211_CMD_NEW_INTERFACE);
break;
case NETDEV_GOING_DOWN:
@@ -1230,6 +1232,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,
#ifdef CONFIG_CFG80211_WEXT
kzfree(wdev->wext.keys);
#endif
+ flush_work(&wdev->disconnect_wk);
}
/*
* synchronise (so that we won't find this netdev
diff --git a/net/wireless/core.h b/net/wireless/core.h
index af6e023..58ca206 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -228,6 +228,7 @@ struct cfg80211_event {
size_t resp_ie_len;
struct cfg80211_bss *bss;
int status; /* -1 = failed; 0..65535 = status code */
+ enum nl80211_timeout_reason timeout_reason;
} cr;
struct {
const u8 *req_ie;
@@ -388,7 +389,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
int status, bool wextev,
- struct cfg80211_bss *bss);
+ struct cfg80211_bss *bss,
+ enum nl80211_timeout_reason timeout_reason);
void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
size_t ie_len, u16 reason, bool from_ap);
int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
@@ -400,6 +402,7 @@ void __cfg80211_roamed(struct wireless_dev *wdev,
const u8 *resp_ie, size_t resp_ie_len);
int cfg80211_mgd_wext_connect(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
+void cfg80211_autodisconnect_wk(struct work_struct *work);
/* SME implementation */
void cfg80211_conn_work(struct work_struct *work);
@@ -430,6 +433,9 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
void cfg80211_process_wdev_events(struct wireless_dev *wdev);
+bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+ u32 center_freq_khz, u32 bw_khz);
+
/**
* cfg80211_chandef_dfs_usable - checks if chandef is DFS usable
* @wiphy: the wiphy to validate against
diff --git a/net/wireless/mlme.c b/net/wireless/mlme.c
index 4646cf5..22b3d99 100644
--- a/net/wireless/mlme.c
+++ b/net/wireless/mlme.c
@@ -48,7 +48,8 @@ void cfg80211_rx_assoc_resp(struct net_device *dev, struct cfg80211_bss *bss,
/* update current_bss etc., consumes the bss reference */
__cfg80211_connect_result(dev, mgmt->bssid, NULL, 0, ie, len - ieoffs,
status_code,
- status_code == WLAN_STATUS_SUCCESS, bss);
+ status_code == WLAN_STATUS_SUCCESS, bss,
+ NL80211_TIMEOUT_UNSPECIFIED);
}
EXPORT_SYMBOL(cfg80211_rx_assoc_resp);
@@ -345,6 +346,11 @@ int cfg80211_mlme_deauth(struct cfg80211_registered_device *rdev,
!ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
return 0;
+ if (ether_addr_equal(wdev->disconnect_bssid, bssid) ||
+ (wdev->current_bss &&
+ ether_addr_equal(wdev->current_bss->pub.bssid, bssid)))
+ wdev->conn_owner_nlportid = 0;
+
return rdev_deauth(rdev, dev, &req);
}
@@ -657,8 +663,25 @@ int cfg80211_mlme_mgmt_tx(struct cfg80211_registered_device *rdev,
return err;
}
- if (!ether_addr_equal(mgmt->sa, wdev_address(wdev)))
- return -EINVAL;
+ if (!ether_addr_equal(mgmt->sa, wdev_address(wdev))) {
+ /* Allow random TA to be used with Public Action frames if the
+ * driver has indicated support for this. Otherwise, only allow
+ * the local address to be used.
+ */
+ if (!ieee80211_is_action(mgmt->frame_control) ||
+ mgmt->u.action.category != WLAN_CATEGORY_PUBLIC)
+ return -EINVAL;
+ if (!wdev->current_bss &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA))
+ return -EINVAL;
+ if (wdev->current_bss &&
+ !wiphy_ext_feature_isset(
+ &rdev->wiphy,
+ NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED))
+ return -EINVAL;
+ }
/* Transmit the Action frame as requested by user space */
return rdev_mgmt_tx(rdev, wdev, params, cookie);
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ef5eff93..63dfa60 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -405,6 +405,11 @@ static const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_FILS_NONCES] = { .len = 2 * FILS_NONCE_LEN },
[NL80211_ATTR_MULTICAST_TO_UNICAST_ENABLED] = { .type = NLA_FLAG, },
[NL80211_ATTR_BSSID] = { .len = ETH_ALEN },
+ [NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] = { .type = NLA_S8 },
+ [NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST] = {
+ .len = sizeof(struct nl80211_bss_select_rssi_adjust)
+ },
+ [NL80211_ATTR_TIMEOUT_REASON] = { .type = NLA_U32 },
};
/* policy for the key attributes */
@@ -4615,6 +4620,15 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
break;
}
+ /*
+ * Older kernel versions ignored this attribute entirely, so don't
+ * reject attempts to update it but mark it as unused instead so the
+ * driver won't look at the data.
+ */
+ if (statype != CFG80211_STA_AP_CLIENT_UNASSOC &&
+ statype != CFG80211_STA_TDLS_PEER_SETUP)
+ params->opmode_notif_used = false;
+
return 0;
}
EXPORT_SYMBOL(cfg80211_check_station_change);
@@ -4854,6 +4868,12 @@ static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info)
params.local_pm = pm;
}
+ if (info->attrs[NL80211_ATTR_OPMODE_NOTIF]) {
+ params.opmode_notif_used = true;
+ params.opmode_notif =
+ nla_get_u8(info->attrs[NL80211_ATTR_OPMODE_NOTIF]);
+ }
+
/* Include parameters for TDLS peer (will check later) */
err = nl80211_set_station_tdls(info, &params);
if (err)
@@ -6775,13 +6795,10 @@ nl80211_parse_sched_scan_plans(struct wiphy *wiphy, int n_plans,
/*
* If scan plans are not specified,
- * %NL80211_ATTR_SCHED_SCAN_INTERVAL must be specified. In this
+ * %NL80211_ATTR_SCHED_SCAN_INTERVAL will be specified. In this
* case one scan plan will be set with the specified scan
* interval and infinite number of iterations.
*/
- if (!attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL])
- return -EINVAL;
-
interval = nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]);
if (!interval)
return -EINVAL;
@@ -6953,6 +6970,12 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
if (!n_plans || n_plans > wiphy->max_sched_scan_plans)
return ERR_PTR(-EINVAL);
+ if (!wiphy_ext_feature_isset(
+ wiphy, NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI) &&
+ (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI] ||
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]))
+ return ERR_PTR(-EINVAL);
+
request = kzalloc(sizeof(*request)
+ sizeof(*request->ssids) * n_ssids
+ sizeof(*request->match_sets) * n_match_sets
@@ -7159,6 +7182,26 @@ nl80211_parse_sched_scan(struct wiphy *wiphy, struct wireless_dev *wdev,
request->delay =
nla_get_u32(attrs[NL80211_ATTR_SCHED_SCAN_DELAY]);
+ if (attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]) {
+ request->relative_rssi = nla_get_s8(
+ attrs[NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI]);
+ request->relative_rssi_set = true;
+ }
+
+ if (request->relative_rssi_set &&
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]) {
+ struct nl80211_bss_select_rssi_adjust *rssi_adjust;
+
+ rssi_adjust = nla_data(
+ attrs[NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST]);
+ request->rssi_adjust.band = rssi_adjust->band;
+ request->rssi_adjust.delta = rssi_adjust->delta;
+ if (!is_band_valid(wiphy, request->rssi_adjust.band)) {
+ err = -EINVAL;
+ goto out_free;
+ }
+ }
+
err = nl80211_parse_sched_scan_plans(wiphy, n_plans, request, attrs);
if (err)
goto out_free;
@@ -8053,8 +8096,17 @@ static int nl80211_associate(struct sk_buff *skb, struct genl_info *info)
err = nl80211_crypto_settings(rdev, info, &req.crypto, 1);
if (!err) {
wdev_lock(dev->ieee80211_ptr);
+
err = cfg80211_mlme_assoc(rdev, dev, chan, bssid,
ssid, ssid_len, &req);
+
+ if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+ dev->ieee80211_ptr->conn_owner_nlportid =
+ info->snd_portid;
+ memcpy(dev->ieee80211_ptr->disconnect_bssid,
+ bssid, ETH_ALEN);
+ }
+
wdev_unlock(dev->ieee80211_ptr);
}
@@ -8773,11 +8825,24 @@ static int nl80211_connect(struct sk_buff *skb, struct genl_info *info)
}
wdev_lock(dev->ieee80211_ptr);
+
err = cfg80211_connect(rdev, dev, &connect, connkeys,
connect.prev_bssid);
- wdev_unlock(dev->ieee80211_ptr);
if (err)
kzfree(connkeys);
+
+ if (!err && info->attrs[NL80211_ATTR_SOCKET_OWNER]) {
+ dev->ieee80211_ptr->conn_owner_nlportid = info->snd_portid;
+ if (connect.bssid)
+ memcpy(dev->ieee80211_ptr->disconnect_bssid,
+ connect.bssid, ETH_ALEN);
+ else
+ memset(dev->ieee80211_ptr->disconnect_bssid,
+ 0, ETH_ALEN);
+ }
+
+ wdev_unlock(dev->ieee80211_ptr);
+
return err;
}
@@ -9673,6 +9738,20 @@ static int nl80211_send_wowlan_nd(struct sk_buff *msg,
if (nla_put_u32(msg, NL80211_ATTR_SCHED_SCAN_DELAY, req->delay))
return -ENOBUFS;
+ if (req->relative_rssi_set) {
+ struct nl80211_bss_select_rssi_adjust rssi_adjust;
+
+ if (nla_put_s8(msg, NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI,
+ req->relative_rssi))
+ return -ENOBUFS;
+
+ rssi_adjust.band = req->rssi_adjust.band;
+ rssi_adjust.delta = req->rssi_adjust.delta;
+ if (nla_put(msg, NL80211_ATTR_SCHED_SCAN_RSSI_ADJUST,
+ sizeof(rssi_adjust), &rssi_adjust))
+ return -ENOBUFS;
+ }
+
freqs = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES);
if (!freqs)
return -ENOBUFS;
@@ -11807,9 +11886,6 @@ static int nl80211_set_multicast_to_unicast(struct sk_buff *skb,
const struct nlattr *nla;
bool enabled;
- if (netif_running(dev))
- return -EBUSY;
-
if (!rdev->ops->set_multicast_to_unicast)
return -EOPNOTSUPP;
@@ -12810,7 +12886,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,
return -ENOBUFS;
}
-static int nl80211_send_scan_msg(struct sk_buff *msg,
+static int nl80211_prep_scan_msg(struct sk_buff *msg,
struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
u32 portid, u32 seq, int flags,
@@ -12841,7 +12917,7 @@ static int nl80211_send_scan_msg(struct sk_buff *msg,
}
static int
-nl80211_send_sched_scan_msg(struct sk_buff *msg,
+nl80211_prep_sched_scan_msg(struct sk_buff *msg,
struct cfg80211_registered_device *rdev,
struct net_device *netdev,
u32 portid, u32 seq, int flags, u32 cmd)
@@ -12873,7 +12949,7 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
+ if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0,
NL80211_CMD_TRIGGER_SCAN) < 0) {
nlmsg_free(msg);
return;
@@ -12892,7 +12968,7 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
if (!msg)
return NULL;
- if (nl80211_send_scan_msg(msg, rdev, wdev, 0, 0, 0,
+ if (nl80211_prep_scan_msg(msg, rdev, wdev, 0, 0, 0,
aborted ? NL80211_CMD_SCAN_ABORTED :
NL80211_CMD_NEW_SCAN_RESULTS) < 0) {
nlmsg_free(msg);
@@ -12902,31 +12978,13 @@ struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
return msg;
}
-void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
- struct sk_buff *msg)
-{
- if (!msg)
- return;
-
- genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
- NL80211_MCGRP_SCAN, GFP_KERNEL);
-}
-
-void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
- struct net_device *netdev)
+/* send message created by nl80211_build_scan_msg() */
+void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg)
{
- struct sk_buff *msg;
-
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
if (!msg)
return;
- if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0,
- NL80211_CMD_SCHED_SCAN_RESULTS) < 0) {
- nlmsg_free(msg);
- return;
- }
-
genlmsg_multicast_netns(&nl80211_fam, wiphy_net(&rdev->wiphy), msg, 0,
NL80211_MCGRP_SCAN, GFP_KERNEL);
}
@@ -12940,7 +12998,7 @@ void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
if (!msg)
return;
- if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
+ if (nl80211_prep_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) {
nlmsg_free(msg);
return;
}
@@ -13042,7 +13100,7 @@ static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -13189,12 +13247,14 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
- int status, gfp_t gfp)
+ int status,
+ enum nl80211_timeout_reason timeout_reason,
+ gfp_t gfp)
{
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -13210,7 +13270,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
nla_put_u16(msg, NL80211_ATTR_STATUS_CODE,
status < 0 ? WLAN_STATUS_UNSPECIFIED_FAILURE :
status) ||
- (status < 0 && nla_put_flag(msg, NL80211_ATTR_TIMED_OUT)) ||
+ (status < 0 &&
+ (nla_put_flag(msg, NL80211_ATTR_TIMED_OUT) ||
+ nla_put_u32(msg, NL80211_ATTR_TIMEOUT_REASON, timeout_reason))) ||
(req_ie &&
nla_put(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie)) ||
(resp_ie &&
@@ -13236,7 +13298,7 @@ void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + req_ie_len + resp_ie_len, gfp);
if (!msg)
return;
@@ -13273,7 +13335,7 @@ void nl80211_send_disconnected(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ie_len, GFP_KERNEL);
if (!msg)
return;
@@ -13349,7 +13411,7 @@ void cfg80211_notify_new_peer_candidate(struct net_device *dev, const u8 *addr,
trace_cfg80211_notify_new_peer_candidate(dev, addr);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + ie_len, gfp);
if (!msg)
return;
@@ -13720,7 +13782,7 @@ int nl80211_send_mgmt(struct cfg80211_registered_device *rdev,
struct sk_buff *msg;
void *hdr;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return -ENOMEM;
@@ -13764,7 +13826,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
trace_cfg80211_mgmt_tx_status(wdev, cookie, ack);
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp);
+ msg = nlmsg_new(100 + len, gfp);
if (!msg)
return;
@@ -14519,6 +14581,8 @@ static int nl80211_netlink_notify(struct notifier_block * nb,
if (wdev->owner_nlportid == notify->portid)
schedule_destroy_work = true;
+ else if (wdev->conn_owner_nlportid == notify->portid)
+ schedule_work(&wdev->disconnect_wk);
}
spin_lock_bh(&rdev->beacon_registrations_lock);
@@ -14573,7 +14637,7 @@ void cfg80211_ft_event(struct net_device *netdev,
if (!ft_event->target_ap)
return;
- msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
+ msg = nlmsg_new(100 + ft_event->ric_ies_len, GFP_KERNEL);
if (!msg)
return;
diff --git a/net/wireless/nl80211.h b/net/wireless/nl80211.h
index 7e3821d..e488dca 100644
--- a/net/wireless/nl80211.h
+++ b/net/wireless/nl80211.h
@@ -14,12 +14,10 @@ void nl80211_send_scan_start(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev);
struct sk_buff *nl80211_build_scan_msg(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev, bool aborted);
-void nl80211_send_scan_result(struct cfg80211_registered_device *rdev,
- struct sk_buff *msg);
+void nl80211_send_scan_msg(struct cfg80211_registered_device *rdev,
+ struct sk_buff *msg);
void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev,
struct net_device *netdev, u32 cmd);
-void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev,
- struct net_device *netdev);
void nl80211_common_reg_change_event(enum nl80211_commands cmd_id,
struct regulatory_request *request);
@@ -58,7 +56,9 @@ void nl80211_send_connect_result(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
- int status, gfp_t gfp);
+ int status,
+ enum nl80211_timeout_reason timeout_reason,
+ gfp_t gfp);
void nl80211_send_roamed(struct cfg80211_registered_device *rdev,
struct net_device *netdev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
diff --git a/net/wireless/of.c b/net/wireless/of.c
new file mode 100644
index 0000000..de221f0
--- /dev/null
+++ b/net/wireless/of.c
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2017 Rafał Miłecki <rafal@milecki.pl>
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/of.h>
+#include <net/cfg80211.h>
+#include "core.h"
+
+static bool wiphy_freq_limits_valid_chan(struct wiphy *wiphy,
+ struct ieee80211_freq_range *freq_limits,
+ unsigned int n_freq_limits,
+ struct ieee80211_channel *chan)
+{
+ u32 bw = MHZ_TO_KHZ(20);
+ int i;
+
+ for (i = 0; i < n_freq_limits; i++) {
+ struct ieee80211_freq_range *limit = &freq_limits[i];
+
+ if (cfg80211_does_bw_fit_range(limit,
+ MHZ_TO_KHZ(chan->center_freq),
+ bw))
+ return true;
+ }
+
+ return false;
+}
+
+static void wiphy_freq_limits_apply(struct wiphy *wiphy,
+ struct ieee80211_freq_range *freq_limits,
+ unsigned int n_freq_limits)
+{
+ enum nl80211_band band;
+ int i;
+
+ if (WARN_ON(!n_freq_limits))
+ return;
+
+ for (band = 0; band < NUM_NL80211_BANDS; band++) {
+ struct ieee80211_supported_band *sband = wiphy->bands[band];
+
+ if (!sband)
+ continue;
+
+ for (i = 0; i < sband->n_channels; i++) {
+ struct ieee80211_channel *chan = &sband->channels[i];
+
+ if (chan->flags & IEEE80211_CHAN_DISABLED)
+ continue;
+
+ if (!wiphy_freq_limits_valid_chan(wiphy, freq_limits,
+ n_freq_limits,
+ chan)) {
+ pr_debug("Disabling freq %d MHz as it's out of OF limits\n",
+ chan->center_freq);
+ chan->flags |= IEEE80211_CHAN_DISABLED;
+ }
+ }
+ }
+}
+
+void wiphy_read_of_freq_limits(struct wiphy *wiphy)
+{
+ struct device *dev = wiphy_dev(wiphy);
+ struct device_node *np;
+ struct property *prop;
+ struct ieee80211_freq_range *freq_limits;
+ unsigned int n_freq_limits;
+ const __be32 *p;
+ int len, i;
+ int err = 0;
+
+ if (!dev)
+ return;
+ np = dev_of_node(dev);
+ if (!np)
+ return;
+
+ prop = of_find_property(np, "ieee80211-freq-limit", &len);
+ if (!prop)
+ return;
+
+ if (!len || len % sizeof(u32) || len / sizeof(u32) % 2) {
+ dev_err(dev, "ieee80211-freq-limit wrong format");
+ return;
+ }
+ n_freq_limits = len / sizeof(u32) / 2;
+
+ freq_limits = kcalloc(n_freq_limits, sizeof(*freq_limits), GFP_KERNEL);
+ if (!freq_limits) {
+ err = -ENOMEM;
+ goto out_kfree;
+ }
+
+ p = NULL;
+ for (i = 0; i < n_freq_limits; i++) {
+ struct ieee80211_freq_range *limit = &freq_limits[i];
+
+ p = of_prop_next_u32(prop, p, &limit->start_freq_khz);
+ if (!p) {
+ err = -EINVAL;
+ goto out_kfree;
+ }
+
+ p = of_prop_next_u32(prop, p, &limit->end_freq_khz);
+ if (!p) {
+ err = -EINVAL;
+ goto out_kfree;
+ }
+
+ if (!limit->start_freq_khz ||
+ !limit->end_freq_khz ||
+ limit->start_freq_khz >= limit->end_freq_khz) {
+ err = -EINVAL;
+ goto out_kfree;
+ }
+ }
+
+ wiphy_freq_limits_apply(wiphy, freq_limits, n_freq_limits);
+
+out_kfree:
+ kfree(freq_limits);
+ if (err)
+ dev_err(dev, "Failed to get limits: %d\n", err);
+}
+EXPORT_SYMBOL(wiphy_read_of_freq_limits);
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 5dbac37..753efcd 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -748,21 +748,6 @@ static bool is_valid_rd(const struct ieee80211_regdomain *rd)
return true;
}
-static bool reg_does_bw_fit(const struct ieee80211_freq_range *freq_range,
- u32 center_freq_khz, u32 bw_khz)
-{
- u32 start_freq_khz, end_freq_khz;
-
- start_freq_khz = center_freq_khz - (bw_khz/2);
- end_freq_khz = center_freq_khz + (bw_khz/2);
-
- if (start_freq_khz >= freq_range->start_freq_khz &&
- end_freq_khz <= freq_range->end_freq_khz)
- return true;
-
- return false;
-}
-
/**
* freq_in_rule_band - tells us if a frequency is in a frequency band
* @freq_range: frequency rule we want to query
@@ -1070,7 +1055,7 @@ freq_reg_info_regd(u32 center_freq,
if (!band_rule_found)
band_rule_found = freq_in_rule_band(fr, center_freq);
- bw_fits = reg_does_bw_fit(fr, center_freq, bw);
+ bw_fits = cfg80211_does_bw_fit_range(fr, center_freq, bw);
if (band_rule_found && bw_fits)
return rr;
@@ -1138,11 +1123,13 @@ static uint32_t reg_rule_to_chan_bw_flags(const struct ieee80211_regdomain *regd
max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
/* If we get a reg_rule we can assume that at least 5Mhz fit */
- if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
- MHZ_TO_KHZ(10)))
+ if (!cfg80211_does_bw_fit_range(freq_range,
+ MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(10)))
bw_flags |= IEEE80211_CHAN_NO_10MHZ;
- if (!reg_does_bw_fit(freq_range, MHZ_TO_KHZ(chan->center_freq),
- MHZ_TO_KHZ(20)))
+ if (!cfg80211_does_bw_fit_range(freq_range,
+ MHZ_TO_KHZ(chan->center_freq),
+ MHZ_TO_KHZ(20)))
bw_flags |= IEEE80211_CHAN_NO_20MHZ;
if (max_bandwidth_khz < MHZ_TO_KHZ(10))
diff --git a/net/wireless/scan.c b/net/wireless/scan.c
index 35ad69f..21be56b 100644
--- a/net/wireless/scan.c
+++ b/net/wireless/scan.c
@@ -227,7 +227,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
ASSERT_RTNL();
if (rdev->scan_msg) {
- nl80211_send_scan_result(rdev, rdev->scan_msg);
+ nl80211_send_scan_msg(rdev, rdev->scan_msg);
rdev->scan_msg = NULL;
return;
}
@@ -273,7 +273,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev,
if (!send_message)
rdev->scan_msg = msg;
else
- nl80211_send_scan_result(rdev, msg);
+ nl80211_send_scan_msg(rdev, msg);
}
void __cfg80211_scan_done(struct work_struct *wk)
@@ -321,7 +321,8 @@ void __cfg80211_sched_scan_results(struct work_struct *wk)
spin_unlock_bh(&rdev->bss_lock);
request->scan_start = jiffies;
}
- nl80211_send_sched_scan_results(rdev, request->dev);
+ nl80211_send_sched_scan(rdev, request->dev,
+ NL80211_CMD_SCHED_SCAN_RESULTS);
}
rtnl_unlock();
@@ -1147,7 +1148,7 @@ cfg80211_inform_bss_frame_data(struct wiphy *wiphy,
else
rcu_assign_pointer(tmp.pub.beacon_ies, ies);
rcu_assign_pointer(tmp.pub.ies, ies);
-
+
memcpy(tmp.pub.bssid, mgmt->bssid, ETH_ALEN);
tmp.pub.channel = channel;
tmp.pub.scan_width = data->scan_width;
diff --git a/net/wireless/sme.c b/net/wireless/sme.c
index 5e0d193..b347e63 100644
--- a/net/wireless/sme.c
+++ b/net/wireless/sme.c
@@ -34,10 +34,11 @@ struct cfg80211_conn {
CFG80211_CONN_SCAN_AGAIN,
CFG80211_CONN_AUTHENTICATE_NEXT,
CFG80211_CONN_AUTHENTICATING,
- CFG80211_CONN_AUTH_FAILED,
+ CFG80211_CONN_AUTH_FAILED_TIMEOUT,
CFG80211_CONN_ASSOCIATE_NEXT,
CFG80211_CONN_ASSOCIATING,
CFG80211_CONN_ASSOC_FAILED,
+ CFG80211_CONN_ASSOC_FAILED_TIMEOUT,
CFG80211_CONN_DEAUTH,
CFG80211_CONN_ABANDON,
CFG80211_CONN_CONNECTED,
@@ -140,7 +141,8 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)
return err;
}
-static int cfg80211_conn_do_work(struct wireless_dev *wdev)
+static int cfg80211_conn_do_work(struct wireless_dev *wdev,
+ enum nl80211_timeout_reason *treason)
{
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
struct cfg80211_connect_params *params;
@@ -171,7 +173,8 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
NULL, 0,
params->key, params->key_len,
params->key_idx, NULL, 0);
- case CFG80211_CONN_AUTH_FAILED:
+ case CFG80211_CONN_AUTH_FAILED_TIMEOUT:
+ *treason = NL80211_TIMEOUT_AUTH;
return -ENOTCONN;
case CFG80211_CONN_ASSOCIATE_NEXT:
if (WARN_ON(!rdev->ops->assoc))
@@ -198,6 +201,9 @@ static int cfg80211_conn_do_work(struct wireless_dev *wdev)
WLAN_REASON_DEAUTH_LEAVING,
false);
return err;
+ case CFG80211_CONN_ASSOC_FAILED_TIMEOUT:
+ *treason = NL80211_TIMEOUT_ASSOC;
+ /* fall through */
case CFG80211_CONN_ASSOC_FAILED:
cfg80211_mlme_deauth(rdev, wdev->netdev, params->bssid,
NULL, 0,
@@ -223,6 +229,7 @@ void cfg80211_conn_work(struct work_struct *work)
container_of(work, struct cfg80211_registered_device, conn_work);
struct wireless_dev *wdev;
u8 bssid_buf[ETH_ALEN], *bssid = NULL;
+ enum nl80211_timeout_reason treason;
rtnl_lock();
@@ -244,10 +251,12 @@ void cfg80211_conn_work(struct work_struct *work)
memcpy(bssid_buf, wdev->conn->params.bssid, ETH_ALEN);
bssid = bssid_buf;
}
- if (cfg80211_conn_do_work(wdev)) {
+ treason = NL80211_TIMEOUT_UNSPECIFIED;
+ if (cfg80211_conn_do_work(wdev, &treason)) {
__cfg80211_connect_result(
wdev->netdev, bssid,
- NULL, 0, NULL, 0, -1, false, NULL);
+ NULL, 0, NULL, 0, -1, false, NULL,
+ treason);
}
wdev_unlock(wdev);
}
@@ -352,7 +361,8 @@ void cfg80211_sme_rx_auth(struct wireless_dev *wdev, const u8 *buf, size_t len)
} else if (status_code != WLAN_STATUS_SUCCESS) {
__cfg80211_connect_result(wdev->netdev, mgmt->bssid,
NULL, 0, NULL, 0,
- status_code, false, NULL);
+ status_code, false, NULL,
+ NL80211_TIMEOUT_UNSPECIFIED);
} else if (wdev->conn->state == CFG80211_CONN_AUTHENTICATING) {
wdev->conn->state = CFG80211_CONN_ASSOCIATE_NEXT;
schedule_work(&rdev->conn_work);
@@ -400,7 +410,7 @@ void cfg80211_sme_auth_timeout(struct wireless_dev *wdev)
if (!wdev->conn)
return;
- wdev->conn->state = CFG80211_CONN_AUTH_FAILED;
+ wdev->conn->state = CFG80211_CONN_AUTH_FAILED_TIMEOUT;
schedule_work(&rdev->conn_work);
}
@@ -422,7 +432,7 @@ void cfg80211_sme_assoc_timeout(struct wireless_dev *wdev)
if (!wdev->conn)
return;
- wdev->conn->state = CFG80211_CONN_ASSOC_FAILED;
+ wdev->conn->state = CFG80211_CONN_ASSOC_FAILED_TIMEOUT;
schedule_work(&rdev->conn_work);
}
@@ -564,7 +574,9 @@ static int cfg80211_sme_connect(struct wireless_dev *wdev,
/* we're good if we have a matching bss struct */
if (bss) {
- err = cfg80211_conn_do_work(wdev);
+ enum nl80211_timeout_reason treason;
+
+ err = cfg80211_conn_do_work(wdev, &treason);
cfg80211_put_bss(wdev->wiphy, bss);
} else {
/* otherwise we'll need to scan for the AP first */
@@ -661,7 +673,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
const u8 *req_ie, size_t req_ie_len,
const u8 *resp_ie, size_t resp_ie_len,
int status, bool wextev,
- struct cfg80211_bss *bss)
+ struct cfg80211_bss *bss,
+ enum nl80211_timeout_reason timeout_reason)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
const u8 *country_ie;
@@ -680,7 +693,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
nl80211_send_connect_result(wiphy_to_rdev(wdev->wiphy), dev,
bssid, req_ie, req_ie_len,
resp_ie, resp_ie_len,
- status, GFP_KERNEL);
+ status, timeout_reason, GFP_KERNEL);
#ifdef CONFIG_CFG80211_WEXT
if (wextev) {
@@ -727,6 +740,7 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
wdev->ssid_len = 0;
+ wdev->conn_owner_nlportid = 0;
if (bss) {
cfg80211_unhold_bss(bss_from_pub(bss));
cfg80211_put_bss(wdev->wiphy, bss);
@@ -770,7 +784,8 @@ void __cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
struct cfg80211_bss *bss, const u8 *req_ie,
size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp)
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
@@ -810,6 +825,7 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
cfg80211_hold_bss(bss_from_pub(bss));
ev->cr.bss = bss;
ev->cr.status = status;
+ ev->cr.timeout_reason = timeout_reason;
spin_lock_irqsave(&wdev->event_lock, flags);
list_add_tail(&ev->list, &wdev->event_list);
@@ -955,6 +971,7 @@ void __cfg80211_disconnected(struct net_device *dev, const u8 *ie,
wdev->current_bss = NULL;
wdev->ssid_len = 0;
+ wdev->conn_owner_nlportid = 0;
nl80211_send_disconnected(rdev, dev, reason, ie, ie_len, from_ap);
@@ -1098,6 +1115,8 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
kzfree(wdev->connect_keys);
wdev->connect_keys = NULL;
+ wdev->conn_owner_nlportid = 0;
+
if (wdev->conn)
err = cfg80211_sme_disconnect(wdev, reason);
else if (!rdev->ops->disconnect)
@@ -1107,3 +1126,32 @@ int cfg80211_disconnect(struct cfg80211_registered_device *rdev,
return err;
}
+
+/*
+ * Used to clean up after the connection / connection attempt owner socket
+ * disconnects
+ */
+void cfg80211_autodisconnect_wk(struct work_struct *work)
+{
+ struct wireless_dev *wdev =
+ container_of(work, struct wireless_dev, disconnect_wk);
+ struct cfg80211_registered_device *rdev = wiphy_to_rdev(wdev->wiphy);
+
+ wdev_lock(wdev);
+
+ if (wdev->conn_owner_nlportid) {
+ /*
+ * Use disconnect_bssid if still connecting and ops->disconnect
+ * not implemented. Otherwise we can use cfg80211_disconnect.
+ */
+ if (rdev->ops->disconnect || wdev->current_bss)
+ cfg80211_disconnect(rdev, wdev->netdev,
+ WLAN_REASON_DEAUTH_LEAVING, true);
+ else
+ cfg80211_mlme_deauth(rdev, wdev->netdev,
+ wdev->disconnect_bssid, NULL, 0,
+ WLAN_REASON_DEAUTH_LEAVING, false);
+ }
+
+ wdev_unlock(wdev);
+}
diff --git a/net/wireless/sysfs.c b/net/wireless/sysfs.c
index 14b3f00..16b6b59 100644
--- a/net/wireless/sysfs.c
+++ b/net/wireless/sysfs.c
@@ -39,9 +39,11 @@ SHOW_FMT(address_mask, "%pM", wiphy.addr_mask);
static ssize_t name_show(struct device *dev,
struct device_attribute *attr,
- char *buf) {
+ char *buf)
+{
struct wiphy *wiphy = &dev_to_rdev(dev)->wiphy;
- return sprintf(buf, "%s\n", dev_name(&wiphy->dev));
+
+ return sprintf(buf, "%s\n", wiphy_name(wiphy));
}
static DEVICE_ATTR_RO(name);
diff --git a/net/wireless/util.c b/net/wireless/util.c
index e9d040d..68e5f2ec 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -114,8 +114,7 @@ int ieee80211_frequency_to_channel(int freq)
}
EXPORT_SYMBOL(ieee80211_frequency_to_channel);
-struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
- int freq)
+struct ieee80211_channel *ieee80211_get_channel(struct wiphy *wiphy, int freq)
{
enum nl80211_band band;
struct ieee80211_supported_band *sband;
@@ -135,14 +134,13 @@ struct ieee80211_channel *__ieee80211_get_channel(struct wiphy *wiphy,
return NULL;
}
-EXPORT_SYMBOL(__ieee80211_get_channel);
+EXPORT_SYMBOL(ieee80211_get_channel);
-static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
- enum nl80211_band band)
+static void set_mandatory_flags_band(struct ieee80211_supported_band *sband)
{
int i, want;
- switch (band) {
+ switch (sband->band) {
case NL80211_BAND_5GHZ:
want = 3;
for (i = 0; i < sband->n_bitrates; i++) {
@@ -192,6 +190,7 @@ static void set_mandatory_flags_band(struct ieee80211_supported_band *sband,
WARN_ON((sband->ht_cap.mcs.rx_mask[0] & 0x1e) != 0x1e);
break;
case NUM_NL80211_BANDS:
+ default:
WARN_ON(1);
break;
}
@@ -203,7 +202,7 @@ void ieee80211_set_bitrate_flags(struct wiphy *wiphy)
for (band = 0; band < NUM_NL80211_BANDS; band++)
if (wiphy->bands[band])
- set_mandatory_flags_band(wiphy->bands[band], band);
+ set_mandatory_flags_band(wiphy->bands[band]);
}
bool cfg80211_supported_cipher_suite(struct wiphy *wiphy, u32 cipher)
@@ -619,8 +618,6 @@ int ieee80211_data_from_8023(struct sk_buff *skb, const u8 *addr,
if (pskb_expand_head(skb, head_need, 0, GFP_ATOMIC))
return -ENOMEM;
-
- skb->truesize += head_need;
}
if (encaps_data) {
@@ -952,7 +949,7 @@ void cfg80211_process_wdev_events(struct wireless_dev *wdev)
ev->cr.resp_ie, ev->cr.resp_ie_len,
ev->cr.status,
ev->cr.status == WLAN_STATUS_SUCCESS,
- ev->cr.bss);
+ ev->cr.bss, ev->cr.timeout_reason);
break;
case EVENT_ROAMED:
__cfg80211_roamed(wdev, ev->rm.bss, ev->rm.req_ie,
@@ -1848,6 +1845,21 @@ void cfg80211_free_nan_func(struct cfg80211_nan_func *f)
}
EXPORT_SYMBOL(cfg80211_free_nan_func);
+bool cfg80211_does_bw_fit_range(const struct ieee80211_freq_range *freq_range,
+ u32 center_freq_khz, u32 bw_khz)
+{
+ u32 start_freq_khz, end_freq_khz;
+
+ start_freq_khz = center_freq_khz - (bw_khz / 2);
+ end_freq_khz = center_freq_khz + (bw_khz / 2);
+
+ if (start_freq_khz >= freq_range->start_freq_khz &&
+ end_freq_khz <= freq_range->end_freq_khz)
+ return true;
+
+ return false;
+}
+
/* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */
/* Ethernet-II snap header (RFC1042 for most EtherTypes) */
const unsigned char rfc1042_header[] __aligned(2) =
diff --git a/net/wireless/wext-core.c b/net/wireless/wext-core.c
index 6250b1c..1a4db67 100644
--- a/net/wireless/wext-core.c
+++ b/net/wireless/wext-core.c
@@ -1119,3 +1119,70 @@ int compat_wext_handle_ioctl(struct net *net, unsigned int cmd,
return ret;
}
#endif
+
+char *iwe_stream_add_event(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, int event_len)
+{
+ int lcp_len = iwe_stream_lcp_len(info);
+
+ event_len = iwe_stream_event_len_adjust(info, event_len);
+
+ /* Check if it's possible */
+ if (likely((stream + event_len) < ends)) {
+ iwe->len = event_len;
+ /* Beware of alignement issues on 64 bits */
+ memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
+ memcpy(stream + lcp_len, &iwe->u,
+ event_len - lcp_len);
+ stream += event_len;
+ }
+
+ return stream;
+}
+EXPORT_SYMBOL(iwe_stream_add_event);
+
+char *iwe_stream_add_point(struct iw_request_info *info, char *stream,
+ char *ends, struct iw_event *iwe, char *extra)
+{
+ int event_len = iwe_stream_point_len(info) + iwe->u.data.length;
+ int point_len = iwe_stream_point_len(info);
+ int lcp_len = iwe_stream_lcp_len(info);
+
+ /* Check if it's possible */
+ if (likely((stream + event_len) < ends)) {
+ iwe->len = event_len;
+ memcpy(stream, (char *) iwe, IW_EV_LCP_PK_LEN);
+ memcpy(stream + lcp_len,
+ ((char *) &iwe->u) + IW_EV_POINT_OFF,
+ IW_EV_POINT_PK_LEN - IW_EV_LCP_PK_LEN);
+ if (iwe->u.data.length && extra)
+ memcpy(stream + point_len, extra, iwe->u.data.length);
+ stream += event_len;
+ }
+
+ return stream;
+}
+EXPORT_SYMBOL(iwe_stream_add_point);
+
+char *iwe_stream_add_value(struct iw_request_info *info, char *event,
+ char *value, char *ends, struct iw_event *iwe,
+ int event_len)
+{
+ int lcp_len = iwe_stream_lcp_len(info);
+
+ /* Don't duplicate LCP */
+ event_len -= IW_EV_LCP_LEN;
+
+ /* Check if it's possible */
+ if (likely((value + event_len) < ends)) {
+ /* Add new value */
+ memcpy(value, &iwe->u, event_len);
+ value += event_len;
+ /* Patch LCP */
+ iwe->len = value - event;
+ memcpy(event, (char *) iwe, lcp_len);
+ }
+
+ return value;
+}
+EXPORT_SYMBOL(iwe_stream_add_value);
diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c
index 9951638..c434f19 100644
--- a/net/wireless/wext-sme.c
+++ b/net/wireless/wext-sme.c
@@ -105,30 +105,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,
goto out;
}
-
wdev->wext.connect.channel = chan;
-
- /*
- * SSID is not set, we just want to switch monitor channel,
- * this is really just backward compatibility, if the SSID
- * is set then we use the channel to select the BSS to use
- * to connect to instead. If we were connected on another
- * channel we disconnected above and reconnect below.
- */
- if (chan && !wdev->wext.connect.ssid_len) {
- struct cfg80211_chan_def chandef = {
- .width = NL80211_CHAN_WIDTH_20_NOHT,
- .center_freq1 = freq,
- };
-
- chandef.chan = ieee80211_get_channel(&rdev->wiphy, freq);
- if (chandef.chan)
- err = cfg80211_set_monitor_channel(rdev, &chandef);
- else
- err = -EINVAL;
- goto out;
- }
-
err = cfg80211_mgd_wext_connect(rdev, wdev);
out:
wdev_unlock(wdev);
diff --git a/net/xfrm/xfrm_input.c b/net/xfrm/xfrm_input.c
index 6e3f025..3213fe8 100644
--- a/net/xfrm/xfrm_input.c
+++ b/net/xfrm/xfrm_input.c
@@ -21,6 +21,9 @@ static struct kmem_cache *secpath_cachep __read_mostly;
static DEFINE_SPINLOCK(xfrm_input_afinfo_lock);
static struct xfrm_input_afinfo __rcu *xfrm_input_afinfo[NPROTO];
+static struct gro_cells gro_cells;
+static struct net_device xfrm_napi_dev;
+
int xfrm_input_register_afinfo(struct xfrm_input_afinfo *afinfo)
{
int err = 0;
@@ -371,7 +374,7 @@ resume:
if (decaps) {
skb_dst_drop(skb);
- netif_rx(skb);
+ gro_cells_receive(&gro_cells, skb);
return 0;
} else {
return x->inner_mode->afinfo->transport_finish(skb, async);
@@ -394,6 +397,13 @@ EXPORT_SYMBOL(xfrm_input_resume);
void __init xfrm_input_init(void)
{
+ int err;
+
+ init_dummy_netdev(&xfrm_napi_dev);
+ err = gro_cells_init(&gro_cells, &xfrm_napi_dev);
+ if (err)
+ gro_cells.cells = NULL;
+
secpath_cachep = kmem_cache_create("secpath_cache",
sizeof(struct sec_path),
0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 637387b..8ba29fe 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -246,10 +246,8 @@ void xfrm_local_error(struct sk_buff *skb, int mtu)
return;
afinfo = xfrm_state_get_afinfo(proto);
- if (!afinfo)
- return;
-
- afinfo->local_error(skb, mtu);
- xfrm_state_put_afinfo(afinfo);
+ if (afinfo)
+ afinfo->local_error(skb, mtu);
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(xfrm_local_error);
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 177e208..99ad1af2 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -330,7 +330,7 @@ void xfrm_policy_destroy(struct xfrm_policy *policy)
}
EXPORT_SYMBOL(xfrm_policy_destroy);
-/* Rule must be locked. Release descentant resources, announce
+/* Rule must be locked. Release descendant resources, announce
* entry dead. The rule must be unlinked from lists to the moment.
*/
diff --git a/net/xfrm/xfrm_state.c b/net/xfrm/xfrm_state.c
index 64e3c82..5a597db 100644
--- a/net/xfrm/xfrm_state.c
+++ b/net/xfrm/xfrm_state.c
@@ -192,7 +192,7 @@ int xfrm_register_type(const struct xfrm_type *type, unsigned short family)
else
err = -EEXIST;
spin_unlock_bh(&xfrm_type_lock);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_register_type);
@@ -213,7 +213,7 @@ int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family)
else
typemap[type->proto] = NULL;
spin_unlock_bh(&xfrm_type_lock);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_unregister_type);
@@ -231,17 +231,18 @@ retry:
return NULL;
typemap = afinfo->type_map;
- type = typemap[proto];
+ type = READ_ONCE(typemap[proto]);
if (unlikely(type && !try_module_get(type->owner)))
type = NULL;
+
+ rcu_read_unlock();
+
if (!type && !modload_attempted) {
- xfrm_state_put_afinfo(afinfo);
request_module("xfrm-type-%d-%d", family, proto);
modload_attempted = 1;
goto retry;
}
- xfrm_state_put_afinfo(afinfo);
return type;
}
@@ -280,7 +281,7 @@ int xfrm_register_mode(struct xfrm_mode *mode, int family)
out:
spin_unlock_bh(&xfrm_mode_lock);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_register_mode);
@@ -308,7 +309,7 @@ int xfrm_unregister_mode(struct xfrm_mode *mode, int family)
}
spin_unlock_bh(&xfrm_mode_lock);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_unregister_mode);
@@ -327,17 +328,17 @@ retry:
if (unlikely(afinfo == NULL))
return NULL;
- mode = afinfo->mode_map[encap];
+ mode = READ_ONCE(afinfo->mode_map[encap]);
if (unlikely(mode && !try_module_get(mode->owner)))
mode = NULL;
+
+ rcu_read_unlock();
if (!mode && !modload_attempted) {
- xfrm_state_put_afinfo(afinfo);
request_module("xfrm-mode-%d-%d", family, encap);
modload_attempted = 1;
goto retry;
}
- xfrm_state_put_afinfo(afinfo);
return mode;
}
@@ -409,7 +410,7 @@ static enum hrtimer_restart xfrm_timer_handler(struct hrtimer *me)
if (x->xflags & XFRM_SOFT_EXPIRE) {
/* enter hard expire without soft expire first?!
* setting a new date could trigger this.
- * workarbound: fix x->curflt.add_time by below:
+ * workaround: fix x->curflt.add_time by below:
*/
x->curlft.add_time = now - x->saved_tmo - 1;
tmo = x->lft.hard_add_expires_seconds - x->saved_tmo;
@@ -639,26 +640,25 @@ void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si)
}
EXPORT_SYMBOL(xfrm_sad_getinfo);
-static int
+static void
xfrm_init_tempstate(struct xfrm_state *x, const struct flowi *fl,
const struct xfrm_tmpl *tmpl,
const xfrm_address_t *daddr, const xfrm_address_t *saddr,
unsigned short family)
{
- struct xfrm_state_afinfo *afinfo = xfrm_state_get_afinfo(family);
+ struct xfrm_state_afinfo *afinfo = xfrm_state_afinfo_get_rcu(family);
+
if (!afinfo)
- return -1;
+ return;
+
afinfo->init_tempsel(&x->sel, fl);
if (family != tmpl->encap_family) {
- xfrm_state_put_afinfo(afinfo);
- afinfo = xfrm_state_get_afinfo(tmpl->encap_family);
+ afinfo = xfrm_state_afinfo_get_rcu(tmpl->encap_family);
if (!afinfo)
- return -1;
+ return;
}
afinfo->init_temprop(x, tmpl, daddr, saddr);
- xfrm_state_put_afinfo(afinfo);
- return 0;
}
static struct xfrm_state *__xfrm_state_lookup(struct net *net, u32 mark,
@@ -1474,7 +1474,7 @@ xfrm_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n,
if (afinfo->tmpl_sort)
err = afinfo->tmpl_sort(dst, src, n);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_tmpl_sort);
@@ -1494,7 +1494,7 @@ xfrm_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n,
if (afinfo->state_sort)
err = afinfo->state_sort(dst, src, n);
spin_unlock_bh(&net->xfrm.xfrm_state_lock);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
return err;
}
EXPORT_SYMBOL(xfrm_state_sort);
@@ -1932,10 +1932,10 @@ EXPORT_SYMBOL(xfrm_unregister_km);
int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo)
{
int err = 0;
- if (unlikely(afinfo == NULL))
- return -EINVAL;
- if (unlikely(afinfo->family >= NPROTO))
+
+ if (WARN_ON(afinfo->family >= NPROTO))
return -EAFNOSUPPORT;
+
spin_lock_bh(&xfrm_state_afinfo_lock);
if (unlikely(xfrm_state_afinfo[afinfo->family] != NULL))
err = -EEXIST;
@@ -1948,14 +1948,14 @@ EXPORT_SYMBOL(xfrm_state_register_afinfo);
int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
{
- int err = 0;
- if (unlikely(afinfo == NULL))
- return -EINVAL;
- if (unlikely(afinfo->family >= NPROTO))
+ int err = 0, family = afinfo->family;
+
+ if (WARN_ON(family >= NPROTO))
return -EAFNOSUPPORT;
+
spin_lock_bh(&xfrm_state_afinfo_lock);
if (likely(xfrm_state_afinfo[afinfo->family] != NULL)) {
- if (unlikely(xfrm_state_afinfo[afinfo->family] != afinfo))
+ if (rcu_access_pointer(xfrm_state_afinfo[family]) != afinfo)
err = -EINVAL;
else
RCU_INIT_POINTER(xfrm_state_afinfo[afinfo->family], NULL);
@@ -1966,6 +1966,14 @@ int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo)
}
EXPORT_SYMBOL(xfrm_state_unregister_afinfo);
+struct xfrm_state_afinfo *xfrm_state_afinfo_get_rcu(unsigned int family)
+{
+ if (unlikely(family >= NPROTO))
+ return NULL;
+
+ return rcu_dereference(xfrm_state_afinfo[family]);
+}
+
struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
{
struct xfrm_state_afinfo *afinfo;
@@ -1978,11 +1986,6 @@ struct xfrm_state_afinfo *xfrm_state_get_afinfo(unsigned int family)
return afinfo;
}
-void xfrm_state_put_afinfo(struct xfrm_state_afinfo *afinfo)
-{
- rcu_read_unlock();
-}
-
/* Temporarily located here until net/xfrm/xfrm_tunnel.c is created */
void xfrm_state_delete_tunnel(struct xfrm_state *x)
{
@@ -2000,16 +2003,13 @@ EXPORT_SYMBOL(xfrm_state_delete_tunnel);
int xfrm_state_mtu(struct xfrm_state *x, int mtu)
{
- int res;
+ const struct xfrm_type *type = READ_ONCE(x->type);
- spin_lock_bh(&x->lock);
if (x->km.state == XFRM_STATE_VALID &&
- x->type && x->type->get_mtu)
- res = x->type->get_mtu(x, mtu);
- else
- res = mtu - x->props.header_len;
- spin_unlock_bh(&x->lock);
- return res;
+ type && type->get_mtu)
+ return type->get_mtu(x, mtu);
+
+ return mtu - x->props.header_len;
}
int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
@@ -2028,7 +2028,7 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay)
if (afinfo->init_flags)
err = afinfo->init_flags(x);
- xfrm_state_put_afinfo(afinfo);
+ rcu_read_unlock();
if (err)
goto error;
diff --git a/samples/bpf/map_perf_test_kern.c b/samples/bpf/map_perf_test_kern.c
index 7ee1574..a91872a 100644
--- a/samples/bpf/map_perf_test_kern.c
+++ b/samples/bpf/map_perf_test_kern.c
@@ -57,6 +57,14 @@ struct bpf_map_def SEC("maps") percpu_hash_map_alloc = {
.map_flags = BPF_F_NO_PREALLOC,
};
+struct bpf_map_def SEC("maps") lpm_trie_map_alloc = {
+ .type = BPF_MAP_TYPE_LPM_TRIE,
+ .key_size = 8,
+ .value_size = sizeof(long),
+ .max_entries = 10000,
+ .map_flags = BPF_F_NO_PREALLOC,
+};
+
SEC("kprobe/sys_getuid")
int stress_hmap(struct pt_regs *ctx)
{
@@ -135,5 +143,27 @@ int stress_percpu_lru_hmap_alloc(struct pt_regs *ctx)
return 0;
}
+SEC("kprobe/sys_gettid")
+int stress_lpm_trie_map_alloc(struct pt_regs *ctx)
+{
+ union {
+ u32 b32[2];
+ u8 b8[8];
+ } key;
+ unsigned int i;
+
+ key.b32[0] = 32;
+ key.b8[4] = 192;
+ key.b8[5] = 168;
+ key.b8[6] = 0;
+ key.b8[7] = 1;
+
+#pragma clang loop unroll(full)
+ for (i = 0; i < 32; ++i)
+ bpf_map_lookup_elem(&lpm_trie_map_alloc, &key);
+
+ return 0;
+}
+
char _license[] SEC("license") = "GPL";
u32 _version SEC("version") = LINUX_VERSION_CODE;
diff --git a/samples/bpf/map_perf_test_user.c b/samples/bpf/map_perf_test_user.c
index 9505b4d..680260a 100644
--- a/samples/bpf/map_perf_test_user.c
+++ b/samples/bpf/map_perf_test_user.c
@@ -37,6 +37,7 @@ static __u64 time_get_ns(void)
#define PERCPU_HASH_KMALLOC (1 << 3)
#define LRU_HASH_PREALLOC (1 << 4)
#define PERCPU_LRU_HASH_PREALLOC (1 << 5)
+#define LPM_KMALLOC (1 << 6)
static int test_flags = ~0;
@@ -112,6 +113,18 @@ static void test_percpu_hash_kmalloc(int cpu)
cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
}
+static void test_lpm_kmalloc(int cpu)
+{
+ __u64 start_time;
+ int i;
+
+ start_time = time_get_ns();
+ for (i = 0; i < MAX_CNT; i++)
+ syscall(__NR_gettid);
+ printf("%d:lpm_perf kmalloc %lld events per sec\n",
+ cpu, MAX_CNT * 1000000000ll / (time_get_ns() - start_time));
+}
+
static void loop(int cpu)
{
cpu_set_t cpuset;
@@ -137,6 +150,9 @@ static void loop(int cpu)
if (test_flags & PERCPU_LRU_HASH_PREALLOC)
test_percpu_lru_hash_prealloc(cpu);
+
+ if (test_flags & LPM_KMALLOC)
+ test_lpm_kmalloc(cpu);
}
static void run_perf_test(int tasks)
@@ -162,6 +178,37 @@ static void run_perf_test(int tasks)
}
}
+static void fill_lpm_trie(void)
+{
+ struct bpf_lpm_trie_key *key;
+ unsigned long value = 0;
+ unsigned int i;
+ int r;
+
+ key = alloca(sizeof(*key) + 4);
+ key->prefixlen = 32;
+
+ for (i = 0; i < 512; ++i) {
+ key->prefixlen = rand() % 33;
+ key->data[0] = rand() & 0xff;
+ key->data[1] = rand() & 0xff;
+ key->data[2] = rand() & 0xff;
+ key->data[3] = rand() & 0xff;
+ r = bpf_map_update_elem(map_fd[6], key, &value, 0);
+ assert(!r);
+ }
+
+ key->prefixlen = 32;
+ key->data[0] = 192;
+ key->data[1] = 168;
+ key->data[2] = 0;
+ key->data[3] = 1;
+ value = 128;
+
+ r = bpf_map_update_elem(map_fd[6], key, &value, 0);
+ assert(!r);
+}
+
int main(int argc, char **argv)
{
struct rlimit r = {RLIM_INFINITY, RLIM_INFINITY};
@@ -182,6 +229,8 @@ int main(int argc, char **argv)
return 1;
}
+ fill_lpm_trie();
+
run_perf_test(num_cpu);
return 0;
diff --git a/samples/bpf/sock_example.h b/samples/bpf/sock_example.h
index 09f7fe7..d801406 100644
--- a/samples/bpf/sock_example.h
+++ b/samples/bpf/sock_example.h
@@ -4,7 +4,7 @@
#include <unistd.h>
#include <string.h>
#include <errno.h>
-#include <net/ethernet.h>
+#include <linux/if_ether.h>
#include <net/if.h>
#include <linux/if_packet.h>
#include <arpa/inet.h>
diff --git a/samples/bpf/tc_l2_redirect_kern.c b/samples/bpf/tc_l2_redirect_kern.c
index 92a4472..7ef2a12 100644
--- a/samples/bpf/tc_l2_redirect_kern.c
+++ b/samples/bpf/tc_l2_redirect_kern.c
@@ -4,6 +4,7 @@
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <uapi/linux/if_ether.h>
#include <uapi/linux/if_packet.h>
diff --git a/samples/bpf/trace_output_user.c b/samples/bpf/trace_output_user.c
index f4fa6af..ccca1e3 100644
--- a/samples/bpf/trace_output_user.c
+++ b/samples/bpf/trace_output_user.c
@@ -9,7 +9,6 @@
#include <string.h>
#include <fcntl.h>
#include <poll.h>
-#include <sys/ioctl.h>
#include <linux/perf_event.h>
#include <linux/bpf.h>
#include <errno.h>
diff --git a/samples/bpf/xdp_tx_iptunnel_kern.c b/samples/bpf/xdp_tx_iptunnel_kern.c
index 85c38ec..0f4f6e8 100644
--- a/samples/bpf/xdp_tx_iptunnel_kern.c
+++ b/samples/bpf/xdp_tx_iptunnel_kern.c
@@ -8,6 +8,7 @@
* encapsulating the incoming packet in an IPv4/v6 header
* and then XDP_TX it out.
*/
+#define KBUILD_MODNAME "foo"
#include <uapi/linux/bpf.h>
#include <linux/in.h>
#include <linux/if_ether.h>
diff --git a/samples/vfio-mdev/mtty.c b/samples/vfio-mdev/mtty.c
index 1fc57a5..ca49568 100644
--- a/samples/vfio-mdev/mtty.c
+++ b/samples/vfio-mdev/mtty.c
@@ -1073,7 +1073,7 @@ int mtty_get_region_info(struct mdev_device *mdev,
{
unsigned int size = 0;
struct mdev_state *mdev_state;
- int bar_index;
+ u32 bar_index;
if (!mdev)
return -EINVAL;
@@ -1082,8 +1082,11 @@ int mtty_get_region_info(struct mdev_device *mdev,
if (!mdev_state)
return -EINVAL;
- mutex_lock(&mdev_state->ops_lock);
bar_index = region_info->index;
+ if (bar_index >= VFIO_PCI_NUM_REGIONS)
+ return -EINVAL;
+
+ mutex_lock(&mdev_state->ops_lock);
switch (bar_index) {
case VFIO_PCI_CONFIG_REGION_INDEX:
@@ -1180,7 +1183,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
memcpy(&mdev_state->dev_info, &info, sizeof(info));
- return copy_to_user((void __user *)arg, &info, minsz);
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
}
case VFIO_DEVICE_GET_REGION_INFO:
{
@@ -1201,7 +1207,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (ret)
return ret;
- return copy_to_user((void __user *)arg, &info, minsz);
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
+
+ return 0;
}
case VFIO_DEVICE_GET_IRQ_INFO:
@@ -1221,10 +1230,10 @@ static long mtty_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (ret)
return ret;
- if (info.count == -1)
- return -EINVAL;
+ if (copy_to_user((void __user *)arg, &info, minsz))
+ return -EFAULT;
- return copy_to_user((void __user *)arg, &info, minsz);
+ return 0;
}
case VFIO_DEVICE_SET_IRQS:
{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index c7c6619..53cb6da 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4365,7 +4365,8 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
inet_get_local_port_range(sock_net(sk), &low, &high);
- if (snum < max(PROT_SOCK, low) || snum > high) {
+ if (snum < max(inet_prot_sock(sock_net(sk)), low) ||
+ snum > high) {
err = sel_netport_sid(sk->sk_protocol,
snum, &sid);
if (err)
diff --git a/sound/soc/codecs/nau8825.c b/sound/soc/codecs/nau8825.c
index efe3a44..4576f98 100644
--- a/sound/soc/codecs/nau8825.c
+++ b/sound/soc/codecs/nau8825.c
@@ -561,9 +561,9 @@ static void nau8825_xtalk_prepare(struct nau8825 *nau8825)
nau8825_xtalk_backup(nau8825);
/* Config IIS as master to output signal by codec */
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_MASTER |
- (0x2 << NAU8825_I2S_DRV_SFT) | 0x1);
+ (0x2 << NAU8825_I2S_LRC_DIV_SFT) | 0x1);
/* Ramp up headphone volume to 0dB to get better performance and
* avoid pop noise in headphone.
*/
@@ -657,7 +657,7 @@ static void nau8825_xtalk_clean(struct nau8825 *nau8825)
NAU8825_IRQ_RMS_EN, NAU8825_IRQ_RMS_EN);
/* Recover default value for IIS */
regmap_update_bits(nau8825->regmap, NAU8825_REG_I2S_PCM_CTRL2,
- NAU8825_I2S_MS_MASK | NAU8825_I2S_DRV_MASK |
+ NAU8825_I2S_MS_MASK | NAU8825_I2S_LRC_DIV_MASK |
NAU8825_I2S_BLK_DIV_MASK, NAU8825_I2S_MS_SLAVE);
/* Restore value of specific register for cross talk */
nau8825_xtalk_restore(nau8825);
@@ -2006,7 +2006,8 @@ static void nau8825_fll_apply(struct nau8825 *nau8825,
NAU8825_FLL_INTEGER_MASK, fll_param->fll_int);
/* FLL pre-scaler */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL4,
- NAU8825_FLL_REF_DIV_MASK, fll_param->clk_ref_div);
+ NAU8825_FLL_REF_DIV_MASK,
+ fll_param->clk_ref_div << NAU8825_FLL_REF_DIV_SFT);
/* select divided VCO input */
regmap_update_bits(nau8825->regmap, NAU8825_REG_FLL5,
NAU8825_FLL_CLK_SW_MASK, NAU8825_FLL_CLK_SW_REF);
diff --git a/sound/soc/codecs/nau8825.h b/sound/soc/codecs/nau8825.h
index 5d1704e..514fd13 100644
--- a/sound/soc/codecs/nau8825.h
+++ b/sound/soc/codecs/nau8825.h
@@ -137,7 +137,8 @@
#define NAU8825_FLL_CLK_SRC_FS (0x3 << NAU8825_FLL_CLK_SRC_SFT)
/* FLL4 (0x07) */
-#define NAU8825_FLL_REF_DIV_MASK (0x3 << 10)
+#define NAU8825_FLL_REF_DIV_SFT 10
+#define NAU8825_FLL_REF_DIV_MASK (0x3 << NAU8825_FLL_REF_DIV_SFT)
/* FLL5 (0x08) */
#define NAU8825_FLL_PDB_DAC_EN (0x1 << 15)
@@ -247,8 +248,8 @@
/* I2S_PCM_CTRL2 (0x1d) */
#define NAU8825_I2S_TRISTATE (1 << 15) /* 0 - normal mode, 1 - Hi-Z output */
-#define NAU8825_I2S_DRV_SFT 12
-#define NAU8825_I2S_DRV_MASK (0x3 << NAU8825_I2S_DRV_SFT)
+#define NAU8825_I2S_LRC_DIV_SFT 12
+#define NAU8825_I2S_LRC_DIV_MASK (0x3 << NAU8825_I2S_LRC_DIV_SFT)
#define NAU8825_I2S_MS_SFT 3
#define NAU8825_I2S_MS_MASK (1 << NAU8825_I2S_MS_SFT)
#define NAU8825_I2S_MS_MASTER (1 << NAU8825_I2S_MS_SFT)
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 10c2a564..1ac96ef 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -3833,6 +3833,9 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
}
}
+ regmap_update_bits(rt5645->regmap, RT5645_ADDA_CLK1,
+ RT5645_I2S_PD1_MASK, RT5645_I2S_PD1_2);
+
if (rt5645->pdata.jd_invert) {
regmap_update_bits(rt5645->regmap, RT5645_IRQ_CTRL2,
RT5645_JD_1_1_MASK, RT5645_JD_1_1_INV);
diff --git a/sound/soc/codecs/tlv320aic3x.c b/sound/soc/codecs/tlv320aic3x.c
index 8877b74..bb94d50 100644
--- a/sound/soc/codecs/tlv320aic3x.c
+++ b/sound/soc/codecs/tlv320aic3x.c
@@ -126,6 +126,16 @@ static const struct reg_default aic3x_reg[] = {
{ 108, 0x00 }, { 109, 0x00 },
};
+static bool aic3x_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case AIC3X_RESET:
+ return true;
+ default:
+ return false;
+ }
+}
+
static const struct regmap_config aic3x_regmap = {
.reg_bits = 8,
.val_bits = 8,
@@ -133,6 +143,9 @@ static const struct regmap_config aic3x_regmap = {
.max_register = DAC_ICC_ADJ,
.reg_defaults = aic3x_reg,
.num_reg_defaults = ARRAY_SIZE(aic3x_reg),
+
+ .volatile_reg = aic3x_volatile_reg,
+
.cache_type = REGCACHE_RBTREE,
};
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index 593b7d1..d72ccef 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -1551,7 +1551,7 @@ static int wm_adsp_load(struct wm_adsp *dsp)
const struct wmfw_region *region;
const struct wm_adsp_region *mem;
const char *region_name;
- char *file, *text;
+ char *file, *text = NULL;
struct wm_adsp_buf *buf;
unsigned int reg;
int regions = 0;
@@ -1700,10 +1700,21 @@ static int wm_adsp_load(struct wm_adsp *dsp)
regions, le32_to_cpu(region->len), offset,
region_name);
+ if ((pos + le32_to_cpu(region->len) + sizeof(*region)) >
+ firmware->size) {
+ adsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, regions, region_name,
+ le32_to_cpu(region->len), firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
if (text) {
memcpy(text, region->data, le32_to_cpu(region->len));
adsp_info(dsp, "%s: %s\n", file, text);
kfree(text);
+ text = NULL;
}
if (reg) {
@@ -1748,6 +1759,7 @@ out_fw:
regmap_async_complete(regmap);
wm_adsp_buf_free(&buf_list);
release_firmware(firmware);
+ kfree(text);
out:
kfree(file);
@@ -2233,6 +2245,17 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
}
if (reg) {
+ if ((pos + le32_to_cpu(blk->len) + sizeof(*blk)) >
+ firmware->size) {
+ adsp_err(dsp,
+ "%s.%d: %s region len %d bytes exceeds file length %zu\n",
+ file, blocks, region_name,
+ le32_to_cpu(blk->len),
+ firmware->size);
+ ret = -EINVAL;
+ goto out_fw;
+ }
+
buf = wm_adsp_buf_alloc(blk->data,
le32_to_cpu(blk->len),
&buf_list);
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index 2998954a..bdf8398 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -681,22 +681,19 @@ static int dw_i2s_probe(struct platform_device *pdev)
}
if (!pdata) {
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
- if (ret == -EPROBE_DEFER) {
- dev_err(&pdev->dev,
- "failed to register PCM, deferring probe\n");
- return ret;
- } else if (ret) {
- dev_err(&pdev->dev,
- "Could not register DMA PCM: %d\n"
- "falling back to PIO mode\n", ret);
+ if (irq >= 0) {
ret = dw_pcm_register(pdev);
- if (ret) {
- dev_err(&pdev->dev,
- "Could not register PIO PCM: %d\n",
+ dev->use_pio = true;
+ } else {
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL,
+ 0);
+ dev->use_pio = false;
+ }
+
+ if (ret) {
+ dev_err(&pdev->dev, "could not register pcm: %d\n",
ret);
- goto err_clk_disable;
- }
+ goto err_clk_disable;
}
}
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 5034943..fde08660 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -224,6 +224,12 @@ struct fsl_ssi_soc_data {
* @dbg_stats: Debugging statistics
*
* @soc: SoC specific data
+ *
+ * @fifo_watermark: the FIFO watermark setting. Notifies DMA when
+ * there are @fifo_watermark or fewer words in TX fifo or
+ * @fifo_watermark or more empty words in RX fifo.
+ * @dma_maxburst: max number of words to transfer in one go. So far,
+ * this is always the same as fifo_watermark.
*/
struct fsl_ssi_private {
struct regmap *regs;
@@ -263,6 +269,9 @@ struct fsl_ssi_private {
const struct fsl_ssi_soc_data *soc;
struct device *dev;
+
+ u32 fifo_watermark;
+ u32 dma_maxburst;
};
/*
@@ -1051,21 +1060,7 @@ static int _fsl_ssi_set_dai_fmt(struct device *dev,
regmap_write(regs, CCSR_SSI_SRCR, srcr);
regmap_write(regs, CCSR_SSI_SCR, scr);
- /*
- * Set the watermark for transmit FIFI 0 and receive FIFO 0. We don't
- * use FIFO 1. We program the transmit water to signal a DMA transfer
- * if there are only two (or fewer) elements left in the FIFO. Two
- * elements equals one frame (left channel, right channel). This value,
- * however, depends on the depth of the transmit buffer.
- *
- * We set the watermark on the same level as the DMA burstsize. For
- * fiq it is probably better to use the biggest possible watermark
- * size.
- */
- if (ssi_private->use_dma)
- wm = ssi_private->fifo_depth - 2;
- else
- wm = ssi_private->fifo_depth;
+ wm = ssi_private->fifo_watermark;
regmap_write(regs, CCSR_SSI_SFCSR,
CCSR_SSI_SFCSR_TFWM0(wm) | CCSR_SSI_SFCSR_RFWM0(wm) |
@@ -1373,12 +1368,8 @@ static int fsl_ssi_imx_probe(struct platform_device *pdev,
dev_dbg(&pdev->dev, "could not get baud clock: %ld\n",
PTR_ERR(ssi_private->baudclk));
- /*
- * We have burstsize be "fifo_depth - 2" to match the SSI
- * watermark setting in fsl_ssi_startup().
- */
- ssi_private->dma_params_tx.maxburst = ssi_private->fifo_depth - 2;
- ssi_private->dma_params_rx.maxburst = ssi_private->fifo_depth - 2;
+ ssi_private->dma_params_tx.maxburst = ssi_private->dma_maxburst;
+ ssi_private->dma_params_rx.maxburst = ssi_private->dma_maxburst;
ssi_private->dma_params_tx.addr = ssi_private->ssi_phys + CCSR_SSI_STX0;
ssi_private->dma_params_rx.addr = ssi_private->ssi_phys + CCSR_SSI_SRX0;
@@ -1543,6 +1534,47 @@ static int fsl_ssi_probe(struct platform_device *pdev)
/* Older 8610 DTs didn't have the fifo-depth property */
ssi_private->fifo_depth = 8;
+ /*
+ * Set the watermark for transmit FIFO 0 and receive FIFO 0. We don't
+ * use FIFO 1 but set the watermark appropriately nontheless.
+ * We program the transmit water to signal a DMA transfer
+ * if there are N elements left in the FIFO. For chips with 15-deep
+ * FIFOs, set watermark to 8. This allows the SSI to operate at a
+ * high data rate without channel slipping. Behavior is unchanged
+ * for the older chips with a fifo depth of only 8. A value of 4
+ * might be appropriate for the older chips, but is left at
+ * fifo_depth-2 until sombody has a chance to test.
+ *
+ * We set the watermark on the same level as the DMA burstsize. For
+ * fiq it is probably better to use the biggest possible watermark
+ * size.
+ */
+ switch (ssi_private->fifo_depth) {
+ case 15:
+ /*
+ * 2 samples is not enough when running at high data
+ * rates (like 48kHz @ 16 bits/channel, 16 channels)
+ * 8 seems to split things evenly and leave enough time
+ * for the DMA to fill the FIFO before it's over/under
+ * run.
+ */
+ ssi_private->fifo_watermark = 8;
+ ssi_private->dma_maxburst = 8;
+ break;
+ case 8:
+ default:
+ /*
+ * maintain old behavior for older chips.
+ * Keeping it the same because I don't have an older
+ * board to test with.
+ * I suspect this could be changed to be something to
+ * leave some more space in the fifo.
+ */
+ ssi_private->fifo_watermark = ssi_private->fifo_depth - 2;
+ ssi_private->dma_maxburst = ssi_private->fifo_depth - 2;
+ break;
+ }
+
dev_set_drvdata(&pdev->dev, ssi_private);
if (ssi_private->soc->imx) {
diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
index 507a86a..8d2fb2d 100644
--- a/sound/soc/intel/boards/bytcr_rt5640.c
+++ b/sound/soc/intel/boards/bytcr_rt5640.c
@@ -142,7 +142,7 @@ static int platform_clock_control(struct snd_soc_dapm_widget *w,
* for Jack detection and button press
*/
ret = snd_soc_dai_set_sysclk(codec_dai, RT5640_SCLK_S_RCCLK,
- 0,
+ 48000 * 512,
SND_SOC_CLOCK_IN);
if (!ret) {
if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && priv->mclk)
@@ -825,10 +825,20 @@ static int snd_byt_rt5640_mc_probe(struct platform_device *pdev)
if ((byt_rt5640_quirk & BYT_RT5640_MCLK_EN) && (is_valleyview())) {
priv->mclk = devm_clk_get(&pdev->dev, "pmc_plt_clk_3");
if (IS_ERR(priv->mclk)) {
+ ret_val = PTR_ERR(priv->mclk);
+
dev_err(&pdev->dev,
- "Failed to get MCLK from pmc_plt_clk_3: %ld\n",
- PTR_ERR(priv->mclk));
- return PTR_ERR(priv->mclk);
+ "Failed to get MCLK from pmc_plt_clk_3: %d\n",
+ ret_val);
+
+ /*
+ * Fall back to bit clock usage for -ENOENT (clock not
+ * available likely due to missing dependencies), bail
+ * for all other errors, including -EPROBE_DEFER
+ */
+ if (ret_val != -ENOENT)
+ return ret_val;
+ byt_rt5640_quirk &= ~BYT_RT5640_MCLK_EN;
}
}
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index 84b5101..6c6b63a 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -180,6 +180,9 @@ static int skl_pcm_open(struct snd_pcm_substream *substream,
snd_pcm_set_sync(substream);
mconfig = skl_tplg_fe_get_cpr_module(dai, substream->stream);
+ if (!mconfig)
+ return -EINVAL;
+
skl_tplg_d0i3_get(skl, mconfig->d0i3_caps);
return 0;
diff --git a/sound/soc/intel/skylake/skl-sst.c b/sound/soc/intel/skylake/skl-sst.c
index 8fc3178..b30bd38 100644
--- a/sound/soc/intel/skylake/skl-sst.c
+++ b/sound/soc/intel/skylake/skl-sst.c
@@ -515,6 +515,9 @@ EXPORT_SYMBOL_GPL(skl_sst_init_fw);
void skl_sst_dsp_cleanup(struct device *dev, struct skl_sst *ctx)
{
+
+ if (ctx->dsp->fw)
+ release_firmware(ctx->dsp->fw);
skl_clear_module_table(ctx->dsp);
skl_freeup_uuid_list(ctx);
skl_ipc_free(&ctx->ipc);
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 4bd68de..99b5b08 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -1030,10 +1030,8 @@ static int __rsnd_kctrl_new(struct rsnd_mod *mod,
return -ENOMEM;
ret = snd_ctl_add(card, kctrl);
- if (ret < 0) {
- snd_ctl_free_one(kctrl);
+ if (ret < 0)
return ret;
- }
cfg->update = update;
cfg->card = card;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index f1901bb..baa1afa 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1748,6 +1748,7 @@ static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
component->init = aux_dev->init;
component->auxiliary = 1;
+ list_add(&component->card_aux_list, &card->aux_comp_list);
return 0;
@@ -1758,16 +1759,14 @@ err_defer:
static int soc_probe_aux_devices(struct snd_soc_card *card)
{
- struct snd_soc_component *comp;
+ struct snd_soc_component *comp, *tmp;
int order;
int ret;
for (order = SND_SOC_COMP_ORDER_FIRST; order <= SND_SOC_COMP_ORDER_LAST;
order++) {
- list_for_each_entry(comp, &card->component_dev_list, card_list) {
- if (!comp->auxiliary)
- continue;
-
+ list_for_each_entry_safe(comp, tmp, &card->aux_comp_list,
+ card_aux_list) {
if (comp->driver->probe_order == order) {
ret = soc_probe_component(card, comp);
if (ret < 0) {
@@ -1776,6 +1775,7 @@ static int soc_probe_aux_devices(struct snd_soc_card *card)
comp->name, ret);
return ret;
}
+ list_del(&comp->card_aux_list);
}
}
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e7a1eaa..6aba140 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2184,9 +2184,11 @@ static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
- case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_STOP;
break;
+ case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
+ fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PAUSED;
+ break;
}
out:
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index 65670b2..fbfb1fa 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -514,13 +514,12 @@ static void remove_widget(struct snd_soc_component *comp,
== SND_SOC_TPLG_TYPE_MIXER)
kfree(kcontrol->tlv.p);
- snd_ctl_remove(card, kcontrol);
-
/* Private value is used as struct soc_mixer_control
* for volume mixers or soc_bytes_ext for bytes
* controls.
*/
kfree((void *)kcontrol->private_value);
+ snd_ctl_remove(card, kcontrol);
}
kfree(w->kcontrol_news);
}
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index b3fd2382..eb4b9f7 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1135,6 +1135,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x045E, 0x076F): /* MS Lifecam HD-6000 */
case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
+ case USB_ID(0x047F, 0x02F7): /* Plantronics BT-600 */
case USB_ID(0x047F, 0x0415): /* Plantronics BT-300 */
case USB_ID(0x047F, 0xAA05): /* Plantronics DA45 */
case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
diff --git a/tools/lib/subcmd/parse-options.c b/tools/lib/subcmd/parse-options.c
index 3284bb1..8aad811 100644
--- a/tools/lib/subcmd/parse-options.c
+++ b/tools/lib/subcmd/parse-options.c
@@ -213,6 +213,9 @@ static int get_value(struct parse_opt_ctx_t *p,
else
err = get_arg(p, opt, flags, (const char **)opt->value);
+ if (opt->set)
+ *(bool *)opt->set = true;
+
/* PARSE_OPT_NOEMPTY: Allow NULL but disallow empty string. */
if (opt->flags & PARSE_OPT_NOEMPTY) {
const char *val = *(const char **)opt->value;
diff --git a/tools/lib/subcmd/parse-options.h b/tools/lib/subcmd/parse-options.h
index 8866ac4..11c3be3 100644
--- a/tools/lib/subcmd/parse-options.h
+++ b/tools/lib/subcmd/parse-options.h
@@ -137,6 +137,11 @@ struct option {
{ .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
.value = check_vtype(v, const char **), (a), .help = (h), \
.flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
+#define OPT_STRING_OPTARG_SET(s, l, v, os, a, h, d) \
+ { .type = OPTION_STRING, .short_name = (s), .long_name = (l), \
+ .value = check_vtype(v, const char **), (a), .help = (h), \
+ .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d), \
+ .set = check_vtype(os, bool *)}
#define OPT_STRING_NOEMPTY(s, l, v, a, h) { .type = OPTION_STRING, .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
#define OPT_DATE(s, l, v, h) \
{ .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index 14a4f62..f2ea780 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -831,6 +831,7 @@ static void free_arg(struct print_arg *arg)
free_flag_sym(arg->symbol.symbols);
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
free_arg(arg->hex.field);
free_arg(arg->hex.size);
break;
@@ -2629,10 +2630,11 @@ out_free:
}
static enum event_type
-process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+process_hex_common(struct event_format *event, struct print_arg *arg,
+ char **tok, enum print_arg_type type)
{
memset(arg, 0, sizeof(*arg));
- arg->type = PRINT_HEX;
+ arg->type = type;
if (alloc_and_process_delim(event, ",", &arg->hex.field))
goto out;
@@ -2651,6 +2653,19 @@ out:
}
static enum event_type
+process_hex(struct event_format *event, struct print_arg *arg, char **tok)
+{
+ return process_hex_common(event, arg, tok, PRINT_HEX);
+}
+
+static enum event_type
+process_hex_str(struct event_format *event, struct print_arg *arg,
+ char **tok)
+{
+ return process_hex_common(event, arg, tok, PRINT_HEX_STR);
+}
+
+static enum event_type
process_int_array(struct event_format *event, struct print_arg *arg, char **tok)
{
memset(arg, 0, sizeof(*arg));
@@ -3009,6 +3024,10 @@ process_function(struct event_format *event, struct print_arg *arg,
free_token(token);
return process_hex(event, arg, tok);
}
+ if (strcmp(token, "__print_hex_str") == 0) {
+ free_token(token);
+ return process_hex_str(event, arg, tok);
+ }
if (strcmp(token, "__print_array") == 0) {
free_token(token);
return process_int_array(event, arg, tok);
@@ -3547,6 +3566,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
case PRINT_SYMBOL:
case PRINT_INT_ARRAY:
case PRINT_HEX:
+ case PRINT_HEX_STR:
break;
case PRINT_TYPE:
val = eval_num_arg(data, size, event, arg->typecast.item);
@@ -3962,6 +3982,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
if (arg->hex.field->type == PRINT_DYNAMIC_ARRAY) {
unsigned long offset;
offset = pevent_read_number(pevent,
@@ -3981,7 +4002,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
}
len = eval_num_arg(data, size, event, arg->hex.size);
for (i = 0; i < len; i++) {
- if (i)
+ if (i && arg->type == PRINT_HEX)
trace_seq_putc(s, ' ');
trace_seq_printf(s, "%02x", hex[i]);
}
@@ -5727,6 +5748,13 @@ static void print_args(struct print_arg *args)
print_args(args->hex.size);
printf(")");
break;
+ case PRINT_HEX_STR:
+ printf("__print_hex_str(");
+ print_args(args->hex.field);
+ printf(", ");
+ print_args(args->hex.size);
+ printf(")");
+ break;
case PRINT_INT_ARRAY:
printf("__print_array(");
print_args(args->int_array.field);
diff --git a/tools/lib/traceevent/event-parse.h b/tools/lib/traceevent/event-parse.h
index 7aae746..74cecba 100644
--- a/tools/lib/traceevent/event-parse.h
+++ b/tools/lib/traceevent/event-parse.h
@@ -292,6 +292,7 @@ enum print_arg_type {
PRINT_FUNC,
PRINT_BITMASK,
PRINT_DYNAMIC_ARRAY_LEN,
+ PRINT_HEX_STR,
};
struct print_arg {
diff --git a/tools/lib/traceevent/plugin_sched_switch.c b/tools/lib/traceevent/plugin_sched_switch.c
index f1ce600..ec30c2f 100644
--- a/tools/lib/traceevent/plugin_sched_switch.c
+++ b/tools/lib/traceevent/plugin_sched_switch.c
@@ -111,7 +111,7 @@ static int sched_switch_handler(struct trace_seq *s,
trace_seq_printf(s, "%lld ", val);
if (pevent_get_field_val(s, event, "prev_prio", record, &val, 0) == 0)
- trace_seq_printf(s, "[%lld] ", val);
+ trace_seq_printf(s, "[%d] ", (int) val);
if (pevent_get_field_val(s, event, "prev_state", record, &val, 0) == 0)
write_state(s, val);
@@ -129,7 +129,7 @@ static int sched_switch_handler(struct trace_seq *s,
trace_seq_printf(s, "%lld", val);
if (pevent_get_field_val(s, event, "next_prio", record, &val, 0) == 0)
- trace_seq_printf(s, " [%lld]", val);
+ trace_seq_printf(s, " [%d]", (int) val);
return 0;
}
diff --git a/tools/perf/Documentation/perf-record.txt b/tools/perf/Documentation/perf-record.txt
index 27fc361..5054d91 100644
--- a/tools/perf/Documentation/perf-record.txt
+++ b/tools/perf/Documentation/perf-record.txt
@@ -430,6 +430,10 @@ that gets then processed, possibly via a perf script, to decide if that
particular perf.data snapshot should be kept or not.
Implies --timestamp-filename, --no-buildid and --no-buildid-cache.
+The reason for the latter two is to reduce the data file switching
+overhead. You can still switch them on with:
+
+ --switch-output --no-no-buildid --no-no-buildid-cache
--dry-run::
Parse options then exit. --dry-run can be used to detect errors in cmdline
diff --git a/tools/perf/Makefile.perf b/tools/perf/Makefile.perf
index 8fc2482..8bb16aa 100644
--- a/tools/perf/Makefile.perf
+++ b/tools/perf/Makefile.perf
@@ -704,9 +704,9 @@ install-tests: all install-gtk
$(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
$(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
-install-bin: install-tools install-tests
+install-bin: install-tools install-tests install-traceevent-plugins
-install: install-bin try-install-man install-traceevent-plugins
+install: install-bin try-install-man
install-python_ext:
$(PYTHON_WORD) util/setup.py --quiet install --root='/$(DESTDIR_SQ)'
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 35a02f8..915869e 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -655,7 +655,6 @@ static const struct {
{ "__GFP_RECLAIM", "R" },
{ "__GFP_DIRECT_RECLAIM", "DR" },
{ "__GFP_KSWAPD_RECLAIM", "KR" },
- { "__GFP_OTHER_NODE", "ON" },
};
static size_t max_gfp_len;
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 74d6a03..4ec10e9 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -1405,7 +1405,7 @@ static bool dry_run;
* perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
* using pipes, etc.
*/
-struct option __record_options[] = {
+static struct option __record_options[] = {
OPT_CALLBACK('e', "event", &record.evlist, "event",
"event selector. use 'perf list' to list available events",
parse_events_option),
@@ -1636,7 +1636,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
* overhead. Still generate buildid if they are required
* explicitly using
*
- * perf record --signal-trigger --no-no-buildid \
+ * perf record --switch-output --no-no-buildid \
* --no-no-buildid-cache
*
* Following code equals to:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
index d53e706..5b134b0 100644
--- a/tools/perf/builtin-sched.c
+++ b/tools/perf/builtin-sched.c
@@ -209,6 +209,7 @@ struct perf_sched {
u64 skipped_samples;
const char *time_str;
struct perf_time_interval ptime;
+ struct perf_time_interval hist_time;
};
/* per thread run time data */
@@ -2460,6 +2461,11 @@ static int timehist_sched_change_event(struct perf_tool *tool,
timehist_print_sample(sched, sample, &al, thread, t);
out:
+ if (sched->hist_time.start == 0 && t >= ptime->start)
+ sched->hist_time.start = t;
+ if (ptime->end == 0 || t <= ptime->end)
+ sched->hist_time.end = t;
+
if (tr) {
/* time of this sched_switch event becomes last time task seen */
tr->last_time = sample->time;
@@ -2624,6 +2630,7 @@ static void timehist_print_summary(struct perf_sched *sched,
struct thread *t;
struct thread_runtime *r;
int i;
+ u64 hist_time = sched->hist_time.end - sched->hist_time.start;
memset(&totals, 0, sizeof(totals));
@@ -2665,7 +2672,7 @@ static void timehist_print_summary(struct perf_sched *sched,
totals.sched_count += r->run_stats.n;
printf(" CPU %2d idle for ", i);
print_sched_time(r->total_run_time, 6);
- printf(" msec\n");
+ printf(" msec (%6.2f%%)\n", 100.0 * r->total_run_time / hist_time);
} else
printf(" CPU %2d idle entire time window\n", i);
}
@@ -2701,12 +2708,16 @@ static void timehist_print_summary(struct perf_sched *sched,
printf("\n"
" Total number of unique tasks: %" PRIu64 "\n"
- "Total number of context switches: %" PRIu64 "\n"
- " Total run time (msec): ",
+ "Total number of context switches: %" PRIu64 "\n",
totals.task_count, totals.sched_count);
+ printf(" Total run time (msec): ");
print_sched_time(totals.total_run_time, 2);
printf("\n");
+
+ printf(" Total scheduling time (msec): ");
+ print_sched_time(hist_time, 2);
+ printf(" (x %d)\n", sched->max_cpu);
}
typedef int (*sched_handler)(struct perf_tool *tool,
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index d281ae2..6a6f44d 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -163,7 +163,7 @@ static struct map *kernel_get_module_map(const char *module)
/* A file path -- this is an offline module */
if (module && strchr(module, '/'))
- return machine__findnew_module_map(host_machine, 0, module);
+ return dso__new_map(module);
if (!module)
module = "kernel";
@@ -173,6 +173,7 @@ static struct map *kernel_get_module_map(const char *module)
if (strncmp(pos->dso->short_name + 1, module,
pos->dso->short_name_len - 2) == 0 &&
module[pos->dso->short_name_len - 2] == '\0') {
+ map__get(pos);
return pos;
}
}
@@ -188,15 +189,6 @@ struct map *get_target_map(const char *target, bool user)
return kernel_get_module_map(target);
}
-static void put_target_map(struct map *map, bool user)
-{
- if (map && user) {
- /* Only the user map needs to be released */
- map__put(map);
- }
-}
-
-
static int convert_exec_to_group(const char *exec, char **result)
{
char *ptr1, *ptr2, *exec_copy;
@@ -268,21 +260,6 @@ static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
}
/*
- * NOTE:
- * '.gnu.linkonce.this_module' section of kernel module elf directly
- * maps to 'struct module' from linux/module.h. This section contains
- * actual module name which will be used by kernel after loading it.
- * But, we cannot use 'struct module' here since linux/module.h is not
- * exposed to user-space. Offset of 'name' has remained same from long
- * time, so hardcoding it here.
- */
-#ifdef __LP64__
-#define MOD_NAME_OFFSET 24
-#else
-#define MOD_NAME_OFFSET 12
-#endif
-
-/*
* @module can be module name of module file path. In case of path,
* inspect elf and find out what is actual module name.
* Caller has to free mod_name after using it.
@@ -296,6 +273,7 @@ static char *find_module_name(const char *module)
Elf_Data *data;
Elf_Scn *sec;
char *mod_name = NULL;
+ int name_offset;
fd = open(module, O_RDONLY);
if (fd < 0)
@@ -317,7 +295,21 @@ static char *find_module_name(const char *module)
if (!data || !data->d_buf)
goto ret_err;
- mod_name = strdup((char *)data->d_buf + MOD_NAME_OFFSET);
+ /*
+ * NOTE:
+ * '.gnu.linkonce.this_module' section of kernel module elf directly
+ * maps to 'struct module' from linux/module.h. This section contains
+ * actual module name which will be used by kernel after loading it.
+ * But, we cannot use 'struct module' here since linux/module.h is not
+ * exposed to user-space. Offset of 'name' has remained same from long
+ * time, so hardcoding it here.
+ */
+ if (ehdr.e_ident[EI_CLASS] == ELFCLASS32)
+ name_offset = 12;
+ else /* expect ELFCLASS64 by default */
+ name_offset = 24;
+
+ mod_name = strdup((char *)data->d_buf + name_offset);
ret_err:
elf_end(elf);
@@ -412,7 +404,7 @@ static int find_alternative_probe_point(struct debuginfo *dinfo,
}
out:
- put_target_map(map, uprobes);
+ map__put(map);
return ret;
}
@@ -618,6 +610,67 @@ error:
return ret ? : -ENOENT;
}
+/* Adjust symbol name and address */
+static int post_process_probe_trace_point(struct probe_trace_point *tp,
+ struct map *map, unsigned long offs)
+{
+ struct symbol *sym;
+ u64 addr = tp->address + tp->offset - offs;
+
+ sym = map__find_symbol(map, addr);
+ if (!sym)
+ return -ENOENT;
+
+ if (strcmp(sym->name, tp->symbol)) {
+ /* If we have no realname, use symbol for it */
+ if (!tp->realname)
+ tp->realname = tp->symbol;
+ else
+ free(tp->symbol);
+ tp->symbol = strdup(sym->name);
+ if (!tp->symbol)
+ return -ENOMEM;
+ }
+ tp->offset = addr - sym->start;
+ tp->address -= offs;
+
+ return 0;
+}
+
+/*
+ * Rename DWARF symbols to ELF symbols -- gcc sometimes optimizes functions
+ * and generate new symbols with suffixes such as .constprop.N or .isra.N
+ * etc. Since those symbols are not recorded in DWARF, we have to find
+ * correct generated symbols from offline ELF binary.
+ * For online kernel or uprobes we don't need this because those are
+ * rebased on _text, or already a section relative address.
+ */
+static int
+post_process_offline_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *pathname)
+{
+ struct map *map;
+ unsigned long stext = 0;
+ int i, ret = 0;
+
+ /* Prepare a map for offline binary */
+ map = dso__new_map(pathname);
+ if (!map || get_text_start_address(pathname, &stext) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", pathname);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, stext);
+ if (ret < 0)
+ break;
+ }
+ map__put(map);
+
+ return ret;
+}
+
static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
int ntevs, const char *exec)
{
@@ -645,18 +698,31 @@ static int add_exec_to_probe_trace_events(struct probe_trace_event *tevs,
return ret;
}
-static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
- int ntevs, const char *module)
+static int
+post_process_module_probe_trace_events(struct probe_trace_event *tevs,
+ int ntevs, const char *module,
+ struct debuginfo *dinfo)
{
+ Dwarf_Addr text_offs = 0;
int i, ret = 0;
char *mod_name = NULL;
+ struct map *map;
if (!module)
return 0;
- mod_name = find_module_name(module);
+ map = get_target_map(module, false);
+ if (!map || debuginfo__get_text_offset(dinfo, &text_offs, true) < 0) {
+ pr_warning("Failed to get ELF symbols for %s\n", module);
+ return -EINVAL;
+ }
+ mod_name = find_module_name(module);
for (i = 0; i < ntevs; i++) {
+ ret = post_process_probe_trace_point(&tevs[i].point,
+ map, (unsigned long)text_offs);
+ if (ret < 0)
+ break;
tevs[i].point.module =
strdup(mod_name ? mod_name : module);
if (!tevs[i].point.module) {
@@ -666,6 +732,8 @@ static int add_module_to_probe_trace_events(struct probe_trace_event *tevs,
}
free(mod_name);
+ map__put(map);
+
return ret;
}
@@ -679,7 +747,8 @@ post_process_kernel_probe_trace_events(struct probe_trace_event *tevs,
/* Skip post process if the target is an offline kernel */
if (symbol_conf.ignore_vmlinux_buildid)
- return 0;
+ return post_process_offline_probe_trace_events(tevs, ntevs,
+ symbol_conf.vmlinux_name);
reloc_sym = kernel_get_ref_reloc_sym();
if (!reloc_sym) {
@@ -722,7 +791,7 @@ arch__post_process_probe_trace_events(struct perf_probe_event *pev __maybe_unuse
static int post_process_probe_trace_events(struct perf_probe_event *pev,
struct probe_trace_event *tevs,
int ntevs, const char *module,
- bool uprobe)
+ bool uprobe, struct debuginfo *dinfo)
{
int ret;
@@ -730,7 +799,8 @@ static int post_process_probe_trace_events(struct perf_probe_event *pev,
ret = add_exec_to_probe_trace_events(tevs, ntevs, module);
else if (module)
/* Currently ref_reloc_sym based probe is not for drivers */
- ret = add_module_to_probe_trace_events(tevs, ntevs, module);
+ ret = post_process_module_probe_trace_events(tevs, ntevs,
+ module, dinfo);
else
ret = post_process_kernel_probe_trace_events(tevs, ntevs);
@@ -774,30 +844,27 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
}
}
- debuginfo__delete(dinfo);
-
if (ntevs > 0) { /* Succeeded to find trace events */
pr_debug("Found %d probe_trace_events.\n", ntevs);
ret = post_process_probe_trace_events(pev, *tevs, ntevs,
- pev->target, pev->uprobes);
+ pev->target, pev->uprobes, dinfo);
if (ret < 0 || ret == ntevs) {
+ pr_debug("Post processing failed or all events are skipped. (%d)\n", ret);
clear_probe_trace_events(*tevs, ntevs);
zfree(tevs);
+ ntevs = 0;
}
- if (ret != ntevs)
- return ret < 0 ? ret : ntevs;
- ntevs = 0;
- /* Fall through */
}
+ debuginfo__delete(dinfo);
+
if (ntevs == 0) { /* No error but failed to find probe point. */
pr_warning("Probe point '%s' not found.\n",
synthesize_perf_probe_point(&pev->point));
return -ENOENT;
- }
- /* Error path : ntevs < 0 */
- pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
- if (ntevs < 0) {
+ } else if (ntevs < 0) {
+ /* Error path : ntevs < 0 */
+ pr_debug("An error occurred in debuginfo analysis (%d).\n", ntevs);
if (ntevs == -EBADF)
pr_warning("Warning: No dwarf info found in the vmlinux - "
"please rebuild kernel with CONFIG_DEBUG_INFO=y.\n");
@@ -2869,7 +2936,7 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
}
out:
- put_target_map(map, pev->uprobes);
+ map__put(map);
free(syms);
return ret;
@@ -3362,10 +3429,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
return ret;
/* Get a symbol map */
- if (user)
- map = dso__new_map(target);
- else
- map = kernel_get_module_map(target);
+ map = get_target_map(target, user);
if (!map) {
pr_err("Failed to get a map for %s\n", (target) ? : "kernel");
return -EINVAL;
@@ -3397,9 +3461,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
}
end:
- if (user) {
- map__put(map);
- }
+ map__put(map);
exit_probe_symbol_maps();
return ret;
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index df4debe..0d9d6e0 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -1501,7 +1501,8 @@ int debuginfo__find_available_vars_at(struct debuginfo *dbg,
}
/* For the kernel module, we need a special code to get a DIE */
-static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset)
{
int n, i;
Elf32_Word shndx;
@@ -1530,6 +1531,8 @@ static int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs)
if (!shdr)
return -ENOENT;
*offs = shdr->sh_addr;
+ if (adjust_offset)
+ *offs -= shdr->sh_offset;
}
}
return 0;
@@ -1543,16 +1546,12 @@ int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
Dwarf_Addr _addr = 0, baseaddr = 0;
const char *fname = NULL, *func = NULL, *basefunc = NULL, *tmp;
int baseline = 0, lineno = 0, ret = 0;
- bool reloc = false;
-retry:
+ /* We always need to relocate the address for aranges */
+ if (debuginfo__get_text_offset(dbg, &baseaddr, false) == 0)
+ addr += baseaddr;
/* Find cu die */
if (!dwarf_addrdie(dbg->dbg, (Dwarf_Addr)addr, &cudie)) {
- if (!reloc && debuginfo__get_text_offset(dbg, &baseaddr) == 0) {
- addr += baseaddr;
- reloc = true;
- goto retry;
- }
pr_warning("Failed to find debug information for address %lx\n",
addr);
ret = -EINVAL;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index f1d8558..2956c51 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -46,6 +46,9 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
int debuginfo__find_probe_point(struct debuginfo *dbg, unsigned long addr,
struct perf_probe_point *ppt);
+int debuginfo__get_text_offset(struct debuginfo *dbg, Dwarf_Addr *offs,
+ bool adjust_offset);
+
/* Find a line range */
int debuginfo__find_line_range(struct debuginfo *dbg, struct line_range *lr);
diff --git a/tools/perf/util/scripting-engines/trace-event-perl.c b/tools/perf/util/scripting-engines/trace-event-perl.c
index e55a132..e74adfb 100644
--- a/tools/perf/util/scripting-engines/trace-event-perl.c
+++ b/tools/perf/util/scripting-engines/trace-event-perl.c
@@ -217,6 +217,7 @@ static void define_event_symbols(struct event_format *event,
cur_field_name);
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 089438d..581e0ef 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -236,6 +236,7 @@ static void define_event_symbols(struct event_format *event,
cur_field_name);
break;
case PRINT_HEX:
+ case PRINT_HEX_STR:
define_event_symbols(event, ev_name, args->hex.field);
define_event_symbols(event, ev_name, args->hex.size);
break;
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 99400b0..adbc6c0 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -537,6 +537,12 @@ int sysfs__read_build_id(const char *filename, void *build_id, size_t size)
break;
} else {
int n = namesz + descsz;
+
+ if (n > (int)sizeof(bf)) {
+ n = sizeof(bf);
+ pr_debug("%s: truncating reading of build id in sysfs file %s: n_namesz=%u, n_descsz=%u.\n",
+ __func__, filename, nhdr.n_namesz, nhdr.n_descsz);
+ }
if (read(fd, bf, n) != n)
break;
}
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 71b0589..831022b 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -90,7 +90,7 @@ ifdef INSTALL_PATH
done;
@# Ask all targets to emit their test scripts
- echo "#!/bin/bash" > $(ALL_SCRIPT)
+ echo "#!/bin/sh" > $(ALL_SCRIPT)
echo "cd \$$(dirname \$$0)" >> $(ALL_SCRIPT)
echo "ROOT=\$$PWD" >> $(ALL_SCRIPT)
diff --git a/tools/testing/selftests/bpf/.gitignore b/tools/testing/selftests/bpf/.gitignore
index 071431b..d3b1c9b 100644
--- a/tools/testing/selftests/bpf/.gitignore
+++ b/tools/testing/selftests/bpf/.gitignore
@@ -1,3 +1,4 @@
test_verifier
test_maps
test_lru_map
+test_lpm_map
diff --git a/tools/testing/selftests/bpf/Makefile b/tools/testing/selftests/bpf/Makefile
index 7a5f245..769a6cb 100644
--- a/tools/testing/selftests/bpf/Makefile
+++ b/tools/testing/selftests/bpf/Makefile
@@ -1,8 +1,8 @@
CFLAGS += -Wall -O2 -I../../../../usr/include
-test_objs = test_verifier test_maps test_lru_map
+test_objs = test_verifier test_tag test_maps test_lru_map test_lpm_map
-TEST_PROGS := test_verifier test_maps test_lru_map test_kmod.sh
+TEST_PROGS := $(test_objs) test_kmod.sh
TEST_FILES := $(test_objs)
all: $(test_objs)
diff --git a/tools/testing/selftests/bpf/test_kmod.sh b/tools/testing/selftests/bpf/test_kmod.sh
index 92e627a..6d58cca 100755
--- a/tools/testing/selftests/bpf/test_kmod.sh
+++ b/tools/testing/selftests/bpf/test_kmod.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
SRC_TREE=../../../../
diff --git a/tools/testing/selftests/bpf/test_lpm_map.c b/tools/testing/selftests/bpf/test_lpm_map.c
new file mode 100644
index 0000000..26775c0
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_lpm_map.c
@@ -0,0 +1,358 @@
+/*
+ * Randomized tests for eBPF longest-prefix-match maps
+ *
+ * This program runs randomized tests against the lpm-bpf-map. It implements a
+ * "Trivial Longest Prefix Match" (tlpm) based on simple, linear, singly linked
+ * lists. The implementation should be pretty straightforward.
+ *
+ * Based on tlpm, this inserts randomized data into bpf-lpm-maps and verifies
+ * the trie-based bpf-map implementation behaves the same way as tlpm.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include <inttypes.h>
+#include <linux/bpf.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <arpa/inet.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include "bpf_sys.h"
+#include "bpf_util.h"
+
+struct tlpm_node {
+ struct tlpm_node *next;
+ size_t n_bits;
+ uint8_t key[];
+};
+
+static struct tlpm_node *tlpm_add(struct tlpm_node *list,
+ const uint8_t *key,
+ size_t n_bits)
+{
+ struct tlpm_node *node;
+ size_t n;
+
+ /* add new entry with @key/@n_bits to @list and return new head */
+
+ n = (n_bits + 7) / 8;
+ node = malloc(sizeof(*node) + n);
+ assert(node);
+
+ node->next = list;
+ node->n_bits = n_bits;
+ memcpy(node->key, key, n);
+
+ return node;
+}
+
+static void tlpm_clear(struct tlpm_node *list)
+{
+ struct tlpm_node *node;
+
+ /* free all entries in @list */
+
+ while ((node = list)) {
+ list = list->next;
+ free(node);
+ }
+}
+
+static struct tlpm_node *tlpm_match(struct tlpm_node *list,
+ const uint8_t *key,
+ size_t n_bits)
+{
+ struct tlpm_node *best = NULL;
+ size_t i;
+
+ /* Perform longest prefix-match on @key/@n_bits. That is, iterate all
+ * entries and match each prefix against @key. Remember the "best"
+ * entry we find (i.e., the longest prefix that matches) and return it
+ * to the caller when done.
+ */
+
+ for ( ; list; list = list->next) {
+ for (i = 0; i < n_bits && i < list->n_bits; ++i) {
+ if ((key[i / 8] & (1 << (7 - i % 8))) !=
+ (list->key[i / 8] & (1 << (7 - i % 8))))
+ break;
+ }
+
+ if (i >= list->n_bits) {
+ if (!best || i > best->n_bits)
+ best = list;
+ }
+ }
+
+ return best;
+}
+
+static void test_lpm_basic(void)
+{
+ struct tlpm_node *list = NULL, *t1, *t2;
+
+ /* very basic, static tests to verify tlpm works as expected */
+
+ assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+
+ t1 = list = tlpm_add(list, (uint8_t[]){ 0xff }, 8);
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0x00 }, 16));
+ assert(!tlpm_match(list, (uint8_t[]){ 0x7f }, 8));
+ assert(!tlpm_match(list, (uint8_t[]){ 0xfe }, 8));
+ assert(!tlpm_match(list, (uint8_t[]){ 0xff }, 7));
+
+ t2 = list = tlpm_add(list, (uint8_t[]){ 0xff, 0xff }, 16);
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff }, 8));
+ assert(t2 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 16));
+ assert(t1 == tlpm_match(list, (uint8_t[]){ 0xff, 0xff }, 15));
+ assert(!tlpm_match(list, (uint8_t[]){ 0x7f, 0xff }, 16));
+
+ tlpm_clear(list);
+}
+
+static void test_lpm_order(void)
+{
+ struct tlpm_node *t1, *t2, *l1 = NULL, *l2 = NULL;
+ size_t i, j;
+
+ /* Verify the tlpm implementation works correctly regardless of the
+ * order of entries. Insert a random set of entries into @l1, and copy
+ * the same data in reverse order into @l2. Then verify a lookup of
+ * random keys will yield the same result in both sets.
+ */
+
+ for (i = 0; i < (1 << 12); ++i)
+ l1 = tlpm_add(l1, (uint8_t[]){
+ rand() % 0xff,
+ rand() % 0xff,
+ }, rand() % 16 + 1);
+
+ for (t1 = l1; t1; t1 = t1->next)
+ l2 = tlpm_add(l2, t1->key, t1->n_bits);
+
+ for (i = 0; i < (1 << 8); ++i) {
+ uint8_t key[] = { rand() % 0xff, rand() % 0xff };
+
+ t1 = tlpm_match(l1, key, 16);
+ t2 = tlpm_match(l2, key, 16);
+
+ assert(!t1 == !t2);
+ if (t1) {
+ assert(t1->n_bits == t2->n_bits);
+ for (j = 0; j < t1->n_bits; ++j)
+ assert((t1->key[j / 8] & (1 << (7 - j % 8))) ==
+ (t2->key[j / 8] & (1 << (7 - j % 8))));
+ }
+ }
+
+ tlpm_clear(l1);
+ tlpm_clear(l2);
+}
+
+static void test_lpm_map(int keysize)
+{
+ size_t i, j, n_matches, n_nodes, n_lookups;
+ struct tlpm_node *t, *list = NULL;
+ struct bpf_lpm_trie_key *key;
+ uint8_t *data, *value;
+ int r, map;
+
+ /* Compare behavior of tlpm vs. bpf-lpm. Create a randomized set of
+ * prefixes and insert it into both tlpm and bpf-lpm. Then run some
+ * randomized lookups and verify both maps return the same result.
+ */
+
+ n_matches = 0;
+ n_nodes = 1 << 8;
+ n_lookups = 1 << 16;
+
+ data = alloca(keysize);
+ memset(data, 0, keysize);
+
+ value = alloca(keysize + 1);
+ memset(value, 0, keysize + 1);
+
+ key = alloca(sizeof(*key) + keysize);
+ memset(key, 0, sizeof(*key) + keysize);
+
+ map = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE,
+ sizeof(*key) + keysize,
+ keysize + 1,
+ 4096,
+ BPF_F_NO_PREALLOC);
+ assert(map >= 0);
+
+ for (i = 0; i < n_nodes; ++i) {
+ for (j = 0; j < keysize; ++j)
+ value[j] = rand() & 0xff;
+ value[keysize] = rand() % (8 * keysize + 1);
+
+ list = tlpm_add(list, value, value[keysize]);
+
+ key->prefixlen = value[keysize];
+ memcpy(key->data, value, keysize);
+ r = bpf_map_update(map, key, value, 0);
+ assert(!r);
+ }
+
+ for (i = 0; i < n_lookups; ++i) {
+ for (j = 0; j < keysize; ++j)
+ data[j] = rand() & 0xff;
+
+ t = tlpm_match(list, data, 8 * keysize);
+
+ key->prefixlen = 8 * keysize;
+ memcpy(key->data, data, keysize);
+ r = bpf_map_lookup(map, key, value);
+ assert(!r || errno == ENOENT);
+ assert(!t == !!r);
+
+ if (t) {
+ ++n_matches;
+ assert(t->n_bits == value[keysize]);
+ for (j = 0; j < t->n_bits; ++j)
+ assert((t->key[j / 8] & (1 << (7 - j % 8))) ==
+ (value[j / 8] & (1 << (7 - j % 8))));
+ }
+ }
+
+ close(map);
+ tlpm_clear(list);
+
+ /* With 255 random nodes in the map, we are pretty likely to match
+ * something on every lookup. For statistics, use this:
+ *
+ * printf(" nodes: %zu\n"
+ * "lookups: %zu\n"
+ * "matches: %zu\n", n_nodes, n_lookups, n_matches);
+ */
+}
+
+/* Test the implementation with some 'real world' examples */
+
+static void test_lpm_ipaddr(void)
+{
+ struct bpf_lpm_trie_key *key_ipv4;
+ struct bpf_lpm_trie_key *key_ipv6;
+ size_t key_size_ipv4;
+ size_t key_size_ipv6;
+ int map_fd_ipv4;
+ int map_fd_ipv6;
+ __u64 value;
+
+ key_size_ipv4 = sizeof(*key_ipv4) + sizeof(__u32);
+ key_size_ipv6 = sizeof(*key_ipv6) + sizeof(__u32) * 4;
+ key_ipv4 = alloca(key_size_ipv4);
+ key_ipv6 = alloca(key_size_ipv6);
+
+ map_fd_ipv4 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE,
+ key_size_ipv4, sizeof(value),
+ 100, BPF_F_NO_PREALLOC);
+ assert(map_fd_ipv4 >= 0);
+
+ map_fd_ipv6 = bpf_map_create(BPF_MAP_TYPE_LPM_TRIE,
+ key_size_ipv6, sizeof(value),
+ 100, BPF_F_NO_PREALLOC);
+ assert(map_fd_ipv6 >= 0);
+
+ /* Fill data some IPv4 and IPv6 address ranges */
+ value = 1;
+ key_ipv4->prefixlen = 16;
+ inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+ assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 2;
+ key_ipv4->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+ assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 3;
+ key_ipv4->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.128.0", key_ipv4->data);
+ assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 5;
+ key_ipv4->prefixlen = 24;
+ inet_pton(AF_INET, "192.168.1.0", key_ipv4->data);
+ assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 4;
+ key_ipv4->prefixlen = 23;
+ inet_pton(AF_INET, "192.168.0.0", key_ipv4->data);
+ assert(bpf_map_update(map_fd_ipv4, key_ipv4, &value, 0) == 0);
+
+ value = 0xdeadbeef;
+ key_ipv6->prefixlen = 64;
+ inet_pton(AF_INET6, "2a00:1450:4001:814::200e", key_ipv6->data);
+ assert(bpf_map_update(map_fd_ipv6, key_ipv6, &value, 0) == 0);
+
+ /* Set tprefixlen to maximum for lookups */
+ key_ipv4->prefixlen = 32;
+ key_ipv6->prefixlen = 128;
+
+ /* Test some lookups that should come back with a value */
+ inet_pton(AF_INET, "192.168.128.23", key_ipv4->data);
+ assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0);
+ assert(value == 3);
+
+ inet_pton(AF_INET, "192.168.0.1", key_ipv4->data);
+ assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == 0);
+ assert(value == 2);
+
+ inet_pton(AF_INET6, "2a00:1450:4001:814::", key_ipv6->data);
+ assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0);
+ assert(value == 0xdeadbeef);
+
+ inet_pton(AF_INET6, "2a00:1450:4001:814::1", key_ipv6->data);
+ assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == 0);
+ assert(value == 0xdeadbeef);
+
+ /* Test some lookups that should not match any entry */
+ inet_pton(AF_INET, "10.0.0.1", key_ipv4->data);
+ assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 &&
+ errno == ENOENT);
+
+ inet_pton(AF_INET, "11.11.11.11", key_ipv4->data);
+ assert(bpf_map_lookup(map_fd_ipv4, key_ipv4, &value) == -1 &&
+ errno == ENOENT);
+
+ inet_pton(AF_INET6, "2a00:ffff::", key_ipv6->data);
+ assert(bpf_map_lookup(map_fd_ipv6, key_ipv6, &value) == -1 &&
+ errno == ENOENT);
+
+ close(map_fd_ipv4);
+ close(map_fd_ipv6);
+}
+
+int main(void)
+{
+ struct rlimit limit = { RLIM_INFINITY, RLIM_INFINITY };
+ int i, ret;
+
+ /* we want predictable, pseudo random tests */
+ srand(0xf00ba1);
+
+ /* allow unlimited locked memory */
+ ret = setrlimit(RLIMIT_MEMLOCK, &limit);
+ if (ret < 0)
+ perror("Unable to lift memlock rlimit");
+
+ test_lpm_basic();
+ test_lpm_order();
+
+ /* Test with 8, 16, 24, 32, ... 128 bit prefix length */
+ for (i = 1; i <= 16; ++i)
+ test_lpm_map(i);
+
+ test_lpm_ipaddr();
+
+ printf("test_lpm: OK\n");
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_lru_map.c b/tools/testing/selftests/bpf/test_lru_map.c
index b13fed5..9f7bd19 100644
--- a/tools/testing/selftests/bpf/test_lru_map.c
+++ b/tools/testing/selftests/bpf/test_lru_map.c
@@ -67,21 +67,23 @@ static int map_equal(int lru_map, int expected)
return map_subset(lru_map, expected) && map_subset(expected, lru_map);
}
-static int sched_next_online(int pid, int next_to_try)
+static int sched_next_online(int pid, int *next_to_try)
{
cpu_set_t cpuset;
+ int next = *next_to_try;
+ int ret = -1;
- if (next_to_try == nr_cpus)
- return -1;
-
- while (next_to_try < nr_cpus) {
+ while (next < nr_cpus) {
CPU_ZERO(&cpuset);
- CPU_SET(next_to_try++, &cpuset);
- if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset))
+ CPU_SET(next++, &cpuset);
+ if (!sched_setaffinity(pid, sizeof(cpuset), &cpuset)) {
+ ret = 0;
break;
+ }
}
- return next_to_try;
+ *next_to_try = next;
+ return ret;
}
/* Size of the LRU amp is 2
@@ -96,11 +98,12 @@ static void test_lru_sanity0(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
int lru_map_fd, expected_map_fd;
+ int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags, 2 * nr_cpus);
@@ -183,6 +186,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
+ int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* Ther percpu lru list (i.e each cpu has its own LRU
@@ -196,7 +200,7 @@ static void test_lru_sanity1(int map_type, int map_flags, unsigned int tgt_free)
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
@@ -262,6 +266,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
+ int next_cpu = 0;
if (map_flags & BPF_F_NO_COMMON_LRU)
/* Ther percpu lru list (i.e each cpu has its own LRU
@@ -275,7 +280,7 @@ static void test_lru_sanity2(int map_type, int map_flags, unsigned int tgt_free)
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
@@ -370,11 +375,12 @@ static void test_lru_sanity3(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned int batch_size;
unsigned int map_size;
+ int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
batch_size = tgt_free / 2;
assert(batch_size * 2 == tgt_free);
@@ -430,11 +436,12 @@ static void test_lru_sanity4(int map_type, int map_flags, unsigned int tgt_free)
int lru_map_fd, expected_map_fd;
unsigned long long key, value[nr_cpus];
unsigned long long end_key;
+ int next_cpu = 0;
printf("%s (map_type:%d map_flags:0x%X): ", __func__, map_type,
map_flags);
- assert(sched_next_online(0, 0) != -1);
+ assert(sched_next_online(0, &next_cpu) != -1);
if (map_flags & BPF_F_NO_COMMON_LRU)
lru_map_fd = create_map(map_type, map_flags,
@@ -502,9 +509,8 @@ static void do_test_lru_sanity5(unsigned long long last_key, int map_fd)
static void test_lru_sanity5(int map_type, int map_flags)
{
unsigned long long key, value[nr_cpus];
- int next_sched_cpu = 0;
+ int next_cpu = 0;
int map_fd;
- int i;
if (map_flags & BPF_F_NO_COMMON_LRU)
return;
@@ -519,27 +525,20 @@ static void test_lru_sanity5(int map_type, int map_flags)
key = 0;
assert(!bpf_map_update(map_fd, &key, value, BPF_NOEXIST));
- for (i = 0; i < nr_cpus; i++) {
+ while (sched_next_online(0, &next_cpu) != -1) {
pid_t pid;
pid = fork();
if (pid == 0) {
- next_sched_cpu = sched_next_online(0, next_sched_cpu);
- if (next_sched_cpu != -1)
- do_test_lru_sanity5(key, map_fd);
+ do_test_lru_sanity5(key, map_fd);
exit(0);
} else if (pid == -1) {
- printf("couldn't spawn #%d process\n", i);
+ printf("couldn't spawn process to test key:%llu\n",
+ key);
exit(1);
} else {
int status;
- /* It is mostly redundant and just allow the parent
- * process to update next_shced_cpu for the next child
- * process
- */
- next_sched_cpu = sched_next_online(pid, next_sched_cpu);
-
assert(waitpid(pid, &status, 0) == pid);
assert(status == 0);
key++;
@@ -547,6 +546,8 @@ static void test_lru_sanity5(int map_type, int map_flags)
}
close(map_fd);
+ /* At least one key should be tested */
+ assert(key > 0);
printf("Pass\n");
}
diff --git a/tools/testing/selftests/bpf/test_tag.c b/tools/testing/selftests/bpf/test_tag.c
new file mode 100644
index 0000000..5f7c602
--- /dev/null
+++ b/tools/testing/selftests/bpf/test_tag.c
@@ -0,0 +1,202 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <string.h>
+#include <sched.h>
+#include <limits.h>
+#include <assert.h>
+
+#include <sys/socket.h>
+#include <sys/resource.h>
+
+#include <linux/filter.h>
+#include <linux/bpf.h>
+#include <linux/if_alg.h>
+
+#include "../../../include/linux/filter.h"
+
+#include "bpf_sys.h"
+
+static struct bpf_insn prog[BPF_MAXINSNS];
+
+static void bpf_gen_imm_prog(unsigned int insns, int fd_map)
+{
+ int i;
+
+ srand(time(NULL));
+ for (i = 0; i < insns; i++)
+ prog[i] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, rand());
+ prog[i - 1] = BPF_EXIT_INSN();
+}
+
+static void bpf_gen_map_prog(unsigned int insns, int fd_map)
+{
+ int i, j = 0;
+
+ for (i = 0; i + 1 < insns; i += 2) {
+ struct bpf_insn tmp[] = {
+ BPF_LD_MAP_FD(j++ % BPF_REG_10, fd_map)
+ };
+
+ memcpy(&prog[i], tmp, sizeof(tmp));
+ }
+ if (insns % 2 == 0)
+ prog[insns - 2] = BPF_ALU64_IMM(BPF_MOV, i % BPF_REG_10, 42);
+ prog[insns - 1] = BPF_EXIT_INSN();
+}
+
+static int bpf_try_load_prog(int insns, int fd_map,
+ void (*bpf_filler)(unsigned int insns,
+ int fd_map))
+{
+ int fd_prog;
+
+ bpf_filler(insns, fd_map);
+ fd_prog = bpf_prog_load(BPF_PROG_TYPE_SCHED_CLS, prog, insns *
+ sizeof(struct bpf_insn), "", NULL, 0);
+ assert(fd_prog > 0);
+ if (fd_map > 0)
+ bpf_filler(insns, 0);
+ return fd_prog;
+}
+
+static int __hex2bin(char ch)
+{
+ if ((ch >= '0') && (ch <= '9'))
+ return ch - '0';
+ ch = tolower(ch);
+ if ((ch >= 'a') && (ch <= 'f'))
+ return ch - 'a' + 10;
+ return -1;
+}
+
+static int hex2bin(uint8_t *dst, const char *src, size_t count)
+{
+ while (count--) {
+ int hi = __hex2bin(*src++);
+ int lo = __hex2bin(*src++);
+
+ if ((hi < 0) || (lo < 0))
+ return -1;
+ *dst++ = (hi << 4) | lo;
+ }
+ return 0;
+}
+
+static void tag_from_fdinfo(int fd_prog, uint8_t *tag, uint32_t len)
+{
+ const int prefix_len = sizeof("prog_tag:\t") - 1;
+ char buff[256];
+ int ret = -1;
+ FILE *fp;
+
+ snprintf(buff, sizeof(buff), "/proc/%d/fdinfo/%d", getpid(),
+ fd_prog);
+ fp = fopen(buff, "r");
+ assert(fp);
+
+ while (fgets(buff, sizeof(buff), fp)) {
+ if (strncmp(buff, "prog_tag:\t", prefix_len))
+ continue;
+ ret = hex2bin(tag, buff + prefix_len, len);
+ break;
+ }
+
+ fclose(fp);
+ assert(!ret);
+}
+
+static void tag_from_alg(int insns, uint8_t *tag, uint32_t len)
+{
+ static const struct sockaddr_alg alg = {
+ .salg_family = AF_ALG,
+ .salg_type = "hash",
+ .salg_name = "sha1",
+ };
+ int fd_base, fd_alg, ret;
+ ssize_t size;
+
+ fd_base = socket(AF_ALG, SOCK_SEQPACKET, 0);
+ assert(fd_base > 0);
+
+ ret = bind(fd_base, (struct sockaddr *)&alg, sizeof(alg));
+ assert(!ret);
+
+ fd_alg = accept(fd_base, NULL, 0);
+ assert(fd_alg > 0);
+
+ insns *= sizeof(struct bpf_insn);
+ size = write(fd_alg, prog, insns);
+ assert(size == insns);
+
+ size = read(fd_alg, tag, len);
+ assert(size == len);
+
+ close(fd_alg);
+ close(fd_base);
+}
+
+static void tag_dump(const char *prefix, uint8_t *tag, uint32_t len)
+{
+ int i;
+
+ printf("%s", prefix);
+ for (i = 0; i < len; i++)
+ printf("%02x", tag[i]);
+ printf("\n");
+}
+
+static void tag_exit_report(int insns, int fd_map, uint8_t *ftag,
+ uint8_t *atag, uint32_t len)
+{
+ printf("Program tag mismatch for %d insns%s!\n", insns,
+ fd_map < 0 ? "" : " with map");
+
+ tag_dump(" fdinfo result: ", ftag, len);
+ tag_dump(" af_alg result: ", atag, len);
+ exit(1);
+}
+
+static void do_test(uint32_t *tests, int start_insns, int fd_map,
+ void (*bpf_filler)(unsigned int insns, int fd))
+{
+ int i, fd_prog;
+
+ for (i = start_insns; i <= BPF_MAXINSNS; i++) {
+ uint8_t ftag[8], atag[sizeof(ftag)];
+
+ fd_prog = bpf_try_load_prog(i, fd_map, bpf_filler);
+ tag_from_fdinfo(fd_prog, ftag, sizeof(ftag));
+ tag_from_alg(i, atag, sizeof(atag));
+ if (memcmp(ftag, atag, sizeof(ftag)))
+ tag_exit_report(i, fd_map, ftag, atag, sizeof(ftag));
+
+ close(fd_prog);
+ sched_yield();
+ (*tests)++;
+ }
+}
+
+int main(void)
+{
+ struct rlimit rinf = { RLIM_INFINITY, RLIM_INFINITY };
+ uint32_t tests = 0;
+ int i, fd_map;
+
+ setrlimit(RLIMIT_MEMLOCK, &rinf);
+ fd_map = bpf_map_create(BPF_MAP_TYPE_HASH, sizeof(int),
+ sizeof(int), 1, BPF_F_NO_PREALLOC);
+ assert(fd_map > 0);
+
+ for (i = 0; i < 5; i++) {
+ do_test(&tests, 2, -1, bpf_gen_imm_prog);
+ do_test(&tests, 3, fd_map, bpf_gen_map_prog);
+ }
+
+ printf("test_tag: OK (%u tests)\n", tests);
+ close(fd_map);
+ return 0;
+}
diff --git a/tools/testing/selftests/bpf/test_verifier.c b/tools/testing/selftests/bpf/test_verifier.c
index 9bb4534..0d0912c 100644
--- a/tools/testing/selftests/bpf/test_verifier.c
+++ b/tools/testing/selftests/bpf/test_verifier.c
@@ -859,15 +859,451 @@ static struct bpf_test tests[] = {
.result = REJECT,
},
{
- "check non-u32 access to cb",
+ "check cb access: byte",
.insns = {
- BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_1,
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 3),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: byte, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) - 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, oob 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, oob 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) - 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: byte, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ },
+ {
+ "check cb access: half",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3]) + 2),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: half, unaligned",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) - 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 4),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, oob 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) - 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: half, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_H, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
+ },
+ {
+ "check cb access: word",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: word, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: word, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 1),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: word, unaligned 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 2),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: word, unaligned 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 3),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0])),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[2])),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ },
+ {
+ "check cb access: double, unaligned 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[1])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, unaligned 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[3])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "misaligned access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 1",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 2",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[4]) + 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 3",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
+ offsetof(struct __sk_buff, cb[0]) - 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 4",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4])),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 5",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[4]) + 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, oob 6",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
+ offsetof(struct __sk_buff, cb[0]) - 8),
+ BPF_EXIT_INSN(),
+ },
+ .errstr = "invalid bpf_context access",
+ .result = REJECT,
+ },
+ {
+ "check cb access: double, wrong type",
+ .insns = {
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, cb[0])),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
- .errstr_unpriv = "R1 leaks addr",
.result = REJECT,
+ .prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"check out of range skb->cb access",
@@ -1890,6 +2326,84 @@ static struct bpf_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
+ "direct packet access: test11 (shift, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_RSH, BPF_REG_5, 3),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test12 (and, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 8),
+ BPF_MOV64_IMM(BPF_REG_3, 144),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
+ "direct packet access: test13 (branches, good access)",
+ .insns = {
+ BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
+ offsetof(struct __sk_buff, data)),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, data_end)),
+ BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 22),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 13),
+ BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
+ offsetof(struct __sk_buff, mark)),
+ BPF_MOV64_IMM(BPF_REG_4, 1),
+ BPF_JMP_REG(BPF_JGT, BPF_REG_3, BPF_REG_4, 2),
+ BPF_MOV64_IMM(BPF_REG_3, 14),
+ BPF_JMP_IMM(BPF_JA, 0, 0, 1),
+ BPF_MOV64_IMM(BPF_REG_3, 24),
+ BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
+ BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 23),
+ BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 15),
+ BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
+ BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
+ BPF_MOV64_IMM(BPF_REG_0, 1),
+ BPF_EXIT_INSN(),
+ BPF_MOV64_IMM(BPF_REG_0, 0),
+ BPF_EXIT_INSN(),
+ },
+ .result = ACCEPT,
+ .prog_type = BPF_PROG_TYPE_SCHED_CLS,
+ },
+ {
"helper access to packet: test1, valid packet_ptr range",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
@@ -3772,6 +4286,8 @@ static struct bpf_test tests[] = {
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 64),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_IMM(BPF_REG_4, 0),
@@ -3815,6 +4331,8 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -16),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_MOV64_IMM(BPF_REG_2, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_2, -128),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_10, -128),
BPF_ALU64_IMM(BPF_AND, BPF_REG_2, 63),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_3, 0),
diff --git a/tools/testing/selftests/net/psock_lib.h b/tools/testing/selftests/net/psock_lib.h
index 24bc7ec..a77da88 100644
--- a/tools/testing/selftests/net/psock_lib.h
+++ b/tools/testing/selftests/net/psock_lib.h
@@ -40,14 +40,39 @@
static __maybe_unused void sock_setfilter(int fd, int lvl, int optnum)
{
+ /* the filter below checks for all of the following conditions that
+ * are based on the contents of create_payload()
+ * ether type 0x800 and
+ * ip proto udp and
+ * skb->len == DATA_LEN and
+ * udp[38] == 'a' or udp[38] == 'b'
+ * It can be generated from the following bpf_asm input:
+ * ldh [12]
+ * jne #0x800, drop ; ETH_P_IP
+ * ldb [23]
+ * jneq #17, drop ; IPPROTO_UDP
+ * ld len ; ld skb->len
+ * jlt #100, drop ; DATA_LEN
+ * ldb [80]
+ * jeq #97, pass ; DATA_CHAR
+ * jne #98, drop ; DATA_CHAR_1
+ * pass:
+ * ret #-1
+ * drop:
+ * ret #0
+ */
struct sock_filter bpf_filter[] = {
- { 0x80, 0, 0, 0x00000000 }, /* LD pktlen */
- { 0x35, 0, 4, DATA_LEN }, /* JGE DATA_LEN [f goto nomatch]*/
- { 0x30, 0, 0, 0x00000050 }, /* LD ip[80] */
- { 0x15, 1, 0, DATA_CHAR }, /* JEQ DATA_CHAR [t goto match]*/
- { 0x15, 0, 1, DATA_CHAR_1}, /* JEQ DATA_CHAR_1 [t goto match]*/
- { 0x06, 0, 0, 0x00000060 }, /* RET match */
- { 0x06, 0, 0, 0x00000000 }, /* RET no match */
+ { 0x28, 0, 0, 0x0000000c },
+ { 0x15, 0, 8, 0x00000800 },
+ { 0x30, 0, 0, 0x00000017 },
+ { 0x15, 0, 6, 0x00000011 },
+ { 0x80, 0, 0, 0000000000 },
+ { 0x35, 0, 4, 0x00000064 },
+ { 0x30, 0, 0, 0x00000050 },
+ { 0x15, 1, 0, 0x00000061 },
+ { 0x15, 0, 1, 0x00000062 },
+ { 0x06, 0, 0, 0xffffffff },
+ { 0x06, 0, 0, 0000000000 },
};
struct sock_fprog bpf_prog;
diff --git a/tools/testing/selftests/net/run_netsocktests b/tools/testing/selftests/net/run_netsocktests
index c09a682..16058bb 100755
--- a/tools/testing/selftests/net/run_netsocktests
+++ b/tools/testing/selftests/net/run_netsocktests
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/sh
echo "--------------------"
echo "running socket test"
diff --git a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
index c22860a..30e1ac6 100644
--- a/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
+++ b/tools/testing/selftests/powerpc/pmu/ebb/pmc56_overflow_test.c
@@ -66,7 +66,7 @@ int pmc56_overflow(void)
FAIL_IF(ebb_event_enable(&event));
- mtspr(SPRN_PMC1, pmc_sample_period(sample_period));
+ mtspr(SPRN_PMC2, pmc_sample_period(sample_period));
mtspr(SPRN_PMC5, 0);
mtspr(SPRN_PMC6, 0);
diff --git a/tools/testing/selftests/x86/protection_keys.c b/tools/testing/selftests/x86/protection_keys.c
index bdd58c7..df9e0a0 100644
--- a/tools/testing/selftests/x86/protection_keys.c
+++ b/tools/testing/selftests/x86/protection_keys.c
@@ -1367,7 +1367,7 @@ void run_tests_once(void)
tracing_off();
close_test_fds();
- printf("test %2d PASSED (itertation %d)\n", test_nr, iteration_nr);
+ printf("test %2d PASSED (iteration %d)\n", test_nr, iteration_nr);
dprintf1("======================\n\n");
}
iteration_nr++;
diff --git a/tools/virtio/ringtest/main.h b/tools/virtio/ringtest/main.h
index 34e63cc..14142fa 100644
--- a/tools/virtio/ringtest/main.h
+++ b/tools/virtio/ringtest/main.h
@@ -26,6 +26,16 @@ static inline void wait_cycles(unsigned long long cycles)
#define VMEXIT_CYCLES 500
#define VMENTRY_CYCLES 500
+#elif defined(__s390x__)
+static inline void wait_cycles(unsigned long long cycles)
+{
+ asm volatile("0: brctg %0,0b" : : "d" (cycles));
+}
+
+/* tweak me */
+#define VMEXIT_CYCLES 200
+#define VMENTRY_CYCLES 200
+
#else
static inline void wait_cycles(unsigned long long cycles)
{
@@ -81,6 +91,8 @@ extern unsigned ring_size;
/* Is there a portable way to do this? */
#if defined(__x86_64__) || defined(__i386__)
#define cpu_relax() asm ("rep; nop" ::: "memory")
+#elif defined(__s390x__)
+#define cpu_relax() barrier()
#else
#define cpu_relax() assert(0)
#endif
diff --git a/tools/virtio/ringtest/run-on-all.sh b/tools/virtio/ringtest/run-on-all.sh
index 2e69ca8..29b0d39 100755
--- a/tools/virtio/ringtest/run-on-all.sh
+++ b/tools/virtio/ringtest/run-on-all.sh
@@ -1,12 +1,13 @@
#!/bin/sh
+CPUS_ONLINE=$(lscpu --online -p=cpu|grep -v -e '#')
#use last CPU for host. Why not the first?
#many devices tend to use cpu0 by default so
#it tends to be busier
-HOST_AFFINITY=$(lscpu -p=cpu | tail -1)
+HOST_AFFINITY=$(echo "${CPUS_ONLINE}"|tail -n 1)
#run command on all cpus
-for cpu in $(seq 0 $HOST_AFFINITY)
+for cpu in $CPUS_ONLINE
do
#Don't run guest and host on same CPU
#It actually works ok if using signalling
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index a2dbbcc..6a084cd 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -24,6 +24,7 @@
#include <clocksource/arm_arch_timer.h>
#include <asm/arch_timer.h>
+#include <asm/kvm_hyp.h>
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
@@ -89,9 +90,6 @@ static void kvm_timer_inject_irq_work(struct work_struct *work)
struct kvm_vcpu *vcpu;
vcpu = container_of(work, struct kvm_vcpu, arch.timer_cpu.expired);
- vcpu->arch.timer_cpu.armed = false;
-
- WARN_ON(!kvm_timer_should_fire(vcpu));
/*
* If the vcpu is blocked we want to wake it up so that it will see
@@ -512,3 +510,25 @@ void kvm_timer_init(struct kvm *kvm)
{
kvm->arch.timer.cntvoff = kvm_phys_timer_read();
}
+
+/*
+ * On VHE system, we only need to configure trap on physical timer and counter
+ * accesses in EL0 and EL1 once, not for every world switch.
+ * The host kernel runs at EL2 with HCR_EL2.TGE == 1,
+ * and this makes those bits have no effect for the host kernel execution.
+ */
+void kvm_timer_init_vhe(void)
+{
+ /* When HCR_EL2.E2H ==1, EL1PCEN and EL1PCTEN are shifted by 10 */
+ u32 cnthctl_shift = 10;
+ u64 val;
+
+ /*
+ * Disallow physical timer access for the guest.
+ * Physical counter access is allowed.
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~(CNTHCTL_EL1PCEN << cnthctl_shift);
+ val |= (CNTHCTL_EL1PCTEN << cnthctl_shift);
+ write_sysreg(val, cnthctl_el2);
+}
diff --git a/virt/kvm/arm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
index 798866a..63e28dd 100644
--- a/virt/kvm/arm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -35,10 +35,16 @@ void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
/* Disable the virtual timer */
write_sysreg_el0(0, cntv_ctl);
- /* Allow physical timer/counter access for the host */
- val = read_sysreg(cnthctl_el2);
- val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
- write_sysreg(val, cnthctl_el2);
+ /*
+ * We don't need to do this for VHE since the host kernel runs in EL2
+ * with HCR_EL2.TGE ==1, which makes those bits have no impact.
+ */
+ if (!has_vhe()) {
+ /* Allow physical timer/counter access for the host */
+ val = read_sysreg(cnthctl_el2);
+ val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
+ write_sysreg(val, cnthctl_el2);
+ }
/* Clear cntvoff for the host */
write_sysreg(0, cntvoff_el2);
@@ -50,14 +56,17 @@ void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
u64 val;
- /*
- * Disallow physical timer access for the guest
- * Physical counter access is allowed
- */
- val = read_sysreg(cnthctl_el2);
- val &= ~CNTHCTL_EL1PCEN;
- val |= CNTHCTL_EL1PCTEN;
- write_sysreg(val, cnthctl_el2);
+ /* Those bits are already configured at boot on VHE-system */
+ if (!has_vhe()) {
+ /*
+ * Disallow physical timer access for the guest
+ * Physical counter access is allowed
+ */
+ val = read_sysreg(cnthctl_el2);
+ val &= ~CNTHCTL_EL1PCEN;
+ val |= CNTHCTL_EL1PCTEN;
+ write_sysreg(val, cnthctl_el2);
+ }
if (timer->enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
index 5114391..c737ea0 100644
--- a/virt/kvm/arm/vgic/vgic-init.c
+++ b/virt/kvm/arm/vgic/vgic-init.c
@@ -268,15 +268,11 @@ static void kvm_vgic_dist_destroy(struct kvm *kvm)
{
struct vgic_dist *dist = &kvm->arch.vgic;
- mutex_lock(&kvm->lock);
-
dist->ready = false;
dist->initialized = false;
kfree(dist->spis);
dist->nr_spis = 0;
-
- mutex_unlock(&kvm->lock);
}
void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
@@ -286,7 +282,8 @@ void kvm_vgic_vcpu_destroy(struct kvm_vcpu *vcpu)
INIT_LIST_HEAD(&vgic_cpu->ap_list_head);
}
-void kvm_vgic_destroy(struct kvm *kvm)
+/* To be called with kvm->lock held */
+static void __kvm_vgic_destroy(struct kvm *kvm)
{
struct kvm_vcpu *vcpu;
int i;
@@ -297,6 +294,13 @@ void kvm_vgic_destroy(struct kvm *kvm)
kvm_vgic_vcpu_destroy(vcpu);
}
+void kvm_vgic_destroy(struct kvm *kvm)
+{
+ mutex_lock(&kvm->lock);
+ __kvm_vgic_destroy(kvm);
+ mutex_unlock(&kvm->lock);
+}
+
/**
* vgic_lazy_init: Lazy init is only allowed if the GIC exposed to the guest
* is a GICv2. A GICv3 must be explicitly initialized by the guest using the
@@ -348,6 +352,10 @@ int kvm_vgic_map_resources(struct kvm *kvm)
ret = vgic_v2_map_resources(kvm);
else
ret = vgic_v3_map_resources(kvm);
+
+ if (ret)
+ __kvm_vgic_destroy(kvm);
+
out:
mutex_unlock(&kvm->lock);
return ret;
diff --git a/virt/kvm/arm/vgic/vgic-v2.c b/virt/kvm/arm/vgic/vgic-v2.c
index 9bab867..834137e 100644
--- a/virt/kvm/arm/vgic/vgic-v2.c
+++ b/virt/kvm/arm/vgic/vgic-v2.c
@@ -293,8 +293,6 @@ int vgic_v2_map_resources(struct kvm *kvm)
dist->ready = true;
out:
- if (ret)
- kvm_vgic_destroy(kvm);
return ret;
}
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index 5c9f974..e6b03fd 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -302,8 +302,6 @@ int vgic_v3_map_resources(struct kvm *kvm)
dist->ready = true;
out:
- if (ret)
- kvm_vgic_destroy(kvm);
return ret;
}
diff --git a/virt/lib/irqbypass.c b/virt/lib/irqbypass.c
index 52abac4..6d2fcd6 100644
--- a/virt/lib/irqbypass.c
+++ b/virt/lib/irqbypass.c
@@ -195,7 +195,7 @@ int irq_bypass_register_consumer(struct irq_bypass_consumer *consumer)
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token == consumer->token) {
+ if (tmp->token == consumer->token || tmp == consumer) {
mutex_unlock(&lock);
module_put(THIS_MODULE);
return -EBUSY;
@@ -245,7 +245,7 @@ void irq_bypass_unregister_consumer(struct irq_bypass_consumer *consumer)
mutex_lock(&lock);
list_for_each_entry(tmp, &consumers, node) {
- if (tmp->token != consumer->token)
+ if (tmp != consumer)
continue;
list_for_each_entry(producer, &producers, node) {
OpenPOWER on IntegriCloud