summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakashi Iwai <tiwai@suse.de>2015-05-18 08:23:37 +0200
committerTakashi Iwai <tiwai@suse.de>2015-05-18 08:23:37 +0200
commit5a6cc82171dba297ceab49cda47f2bf5616f87b3 (patch)
tree1e12036476a4faf4b404a0a4e070d148efafc221
parent09ea997677cd44ebe7f42573119aaf46b775c683 (diff)
parent7730c0b55043f6ff1c27e9cb45e13679995b2361 (diff)
downloadop-kernel-dev-5a6cc82171dba297ceab49cda47f2bf5616f87b3.zip
op-kernel-dev-5a6cc82171dba297ceab49cda47f2bf5616f87b3.tar.gz
Merge tag 'asoc-fix-v4.1-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie/sound into for-linus
ASoC: Fixes for v4.1 A few more fixes for v4.1, some driver fixes plus one core fix which fixes registration of DAI links when adding prefixes to CODECs to deuplicate in multi-CODEC systems.
-rw-r--r--CREDITS7
-rw-r--r--Documentation/IPMI.txt5
-rw-r--r--Documentation/acpi/enumeration.txt2
-rw-r--r--Documentation/acpi/gpio-properties.txt6
-rw-r--r--Documentation/devicetree/bindings/arm/omap/l3-noc.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/rtc/abracon,abx80x.txt30
-rw-r--r--Documentation/kasan.txt8
-rw-r--r--Documentation/kernel-parameters.txt2
-rw-r--r--Documentation/module-signing.txt6
-rw-r--r--Documentation/networking/mpls-sysctl.txt9
-rw-r--r--Documentation/networking/scaling.txt2
-rw-r--r--Documentation/powerpc/transactional_memory.txt32
-rw-r--r--MAINTAINERS34
-rw-r--r--Makefile2
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts4
-rw-r--r--arch/arm/boot/dts/am57xx-beagle-x15.dts11
-rw-r--r--arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts4
-rw-r--r--arch/arm/boot/dts/dra7.dtsi10
-rw-r--r--arch/arm/boot/dts/exynos4412-odroid-common.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts1
-rw-r--r--arch/arm/boot/dts/exynos5420-trip-points.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi1
-rw-r--r--arch/arm/boot/dts/exynos5440-trip-points.dtsi2
-rw-r--r--arch/arm/boot/dts/imx23-olinuxino.dts4
-rw-r--r--arch/arm/boot/dts/imx25.dtsi1
-rw-r--r--arch/arm/boot/dts/imx28.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts4
-rw-r--r--arch/arm/boot/dts/omap3.dtsi2
-rw-r--r--arch/arm/boot/dts/omap5.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7791-koelsch.dts2
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi17
-rw-r--r--arch/arm/boot/dts/ste-href.dtsi15
-rw-r--r--arch/arm/boot/dts/ste-snowball.dts13
-rw-r--r--arch/arm/configs/multi_v7_defconfig3
-rw-r--r--arch/arm/configs/omap2plus_defconfig2
-rw-r--r--arch/arm/include/asm/dma-iommu.h2
-rw-r--r--arch/arm/include/asm/xen/page.h1
-rw-r--r--arch/arm/kernel/perf_event_cpu.c9
-rw-r--r--arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c2
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h1
-rw-r--r--arch/arm/mach-omap2/prm-regbits-44xx.h1
-rw-r--r--arch/arm/mach-omap2/vc.c12
-rw-r--r--arch/arm/mach-omap2/vc.h2
-rw-r--r--arch/arm/mach-omap2/vc3xxx_data.c1
-rw-r--r--arch/arm/mach-omap2/vc44xx_data.c1
-rw-r--r--arch/arm/mach-pxa/Kconfig9
-rw-r--r--arch/arm/mach-pxa/Makefile1
-rw-r--r--arch/arm/mach-pxa/include/mach/lubbock.h7
-rw-r--r--arch/arm/mach-pxa/include/mach/mainstone.h6
-rw-r--r--arch/arm/mach-pxa/lubbock.c108
-rw-r--r--arch/arm/mach-pxa/mainstone.c115
-rw-r--r--arch/arm/mach-pxa/pxa_cplds_irqs.c200
-rw-r--r--arch/arm/mach-rockchip/pm.c33
-rw-r--r--arch/arm/mach-rockchip/pm.h8
-rw-r--r--arch/arm/mach-rockchip/rockchip.c19
-rw-r--r--arch/arm/mm/dma-mapping.c13
-rw-r--r--arch/arm/mm/proc-arm1020.S2
-rw-r--r--arch/arm/mm/proc-arm1020e.S2
-rw-r--r--arch/arm/mm/proc-arm925.S3
-rw-r--r--arch/arm/mm/proc-feroceon.S1
-rw-r--r--arch/arm/xen/mm.c15
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/barrier.h16
-rw-r--r--arch/arm64/kernel/perf_event.c9
-rw-r--r--arch/arm64/mm/dma-mapping.c9
-rw-r--r--arch/m32r/kernel/smp.c6
-rw-r--r--arch/powerpc/include/uapi/asm/tm.h2
-rw-r--r--arch/powerpc/kernel/eeh.c11
-rw-r--r--arch/powerpc/kernel/entry_64.S19
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kvm/book3s_xics.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c2
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c10
-rw-r--r--arch/s390/Kconfig2
-rw-r--r--arch/s390/crypto/crypt_s390.h122
-rw-r--r--arch/s390/crypto/prng.c850
-rw-r--r--arch/s390/include/asm/kexec.h3
-rw-r--r--arch/s390/include/asm/mmu.h4
-rw-r--r--arch/s390/include/asm/mmu_context.h3
-rw-r--r--arch/s390/include/asm/pgalloc.h1
-rw-r--r--arch/s390/include/asm/pgtable.h167
-rw-r--r--arch/s390/mm/hugetlbpage.c66
-rw-r--r--arch/s390/mm/pgtable.c142
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/x86/boot/compressed/eboot.c2
-rw-r--r--arch/x86/include/asm/hypervisor.h2
-rw-r--r--arch/x86/include/asm/pvclock.h1
-rw-r--r--arch/x86/include/asm/spinlock.h2
-rw-r--r--arch/x86/include/asm/xen/page.h5
-rw-r--r--arch/x86/kernel/cpu/hypervisor.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c66
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c12
-rw-r--r--arch/x86/kernel/process.c14
-rw-r--r--arch/x86/kernel/pvclock.c44
-rw-r--r--arch/x86/kvm/x86.c33
-rw-r--r--arch/x86/mm/ioremap.c14
-rw-r--r--arch/x86/pci/acpi.c24
-rw-r--r--arch/x86/vdso/vclock_gettime.c34
-rw-r--r--arch/x86/xen/enlighten.c27
-rw-r--r--arch/x86/xen/suspend.c10
-rw-r--r--block/blk-core.c2
-rw-r--r--block/blk-mq.c60
-rw-r--r--block/blk-sysfs.c2
-rw-r--r--block/bounce.c2
-rw-r--r--block/elevator.c6
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/resource.c2
-rw-r--r--drivers/acpi/sbs.c2
-rw-r--r--drivers/acpi/sbshc.c22
-rw-r--r--drivers/block/loop.c2
-rw-r--r--drivers/block/nvme-scsi.c3
-rw-r--r--drivers/block/rbd.c5
-rw-r--r--drivers/block/xen-blkback/blkback.c35
-rw-r--r--drivers/block/zram/zram_drv.c23
-rw-r--r--drivers/bus/arm-cci.c2
-rw-r--r--drivers/bus/omap_l3_noc.c5
-rw-r--r--drivers/bus/omap_l3_noc.h54
-rw-r--r--drivers/char/hw_random/bcm63xx-rng.c18
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c4
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c16
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c213
-rw-r--r--drivers/cpuidle/cpuidle.c16
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/dmaengine.c4
-rw-r--r--drivers/dma/sh/usb-dmac.c2
-rw-r--r--drivers/firmware/efi/runtime-map.c6
-rw-r--r--drivers/gpio/gpio-omap.c48
-rw-r--r--drivers/gpio/gpiolib-acpi.c2
-rw-r--r--drivers/gpio/gpiolib-sysfs.c19
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c4
-rw-r--r--drivers/gpu/drm/drm_irq.c9
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h2
-rw-r--r--drivers/gpu/drm/i915/intel_display.c3
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c9
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c26
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c25
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c53
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_uvd.c144
-rw-r--r--drivers/gpu/drm/radeon/radeon_vce.c65
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c36
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/uvd_v1_0.c14
-rw-r--r--drivers/gpu/drm/radeon/uvd_v2_2.c29
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c9
-rw-r--r--drivers/gpu/drm/tegra/drm.c1
-rw-r--r--drivers/infiniband/core/addr.c13
-rw-r--r--drivers/infiniband/core/cm.c23
-rw-r--r--drivers/infiniband/core/cm_msgs.h4
-rw-r--r--drivers/infiniband/core/cma.c27
-rw-r--r--drivers/infiniband/core/iwpm_msg.c73
-rw-r--r--drivers/infiniband/core/iwpm_util.c208
-rw-r--r--drivers/infiniband/core/iwpm_util.h15
-rw-r--r--drivers/infiniband/core/umem_odp.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c87
-rw-r--r--drivers/infiniband/hw/cxgb4/cq.c22
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c37
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/mem.c6
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/t4.h7
-rw-r--r--drivers/infiniband/hw/cxgb4/t4fw_ri_api.h4
-rw-r--r--drivers/infiniband/hw/nes/nes.c1
-rw-r--r--drivers/infiniband/hw/nes/nes_cm.c65
-rw-r--r--drivers/infiniband/hw/qib/qib.h1
-rw-r--r--drivers/infiniband/hw/qib/qib_file_ops.c3
-rw-r--r--drivers/infiniband/hw/qib/qib_iba6120.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7220.c8
-rw-r--r--drivers/infiniband/hw/qib/qib_iba7322.c41
-rw-r--r--drivers/infiniband/hw/qib/qib_init.c26
-rw-r--r--drivers/infiniband/hw/qib/qib_wc_x86_64.c31
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_cm.c4
-rw-r--r--drivers/irqchip/irq-gic.c71
-rw-r--r--drivers/md/dm-crypt.c12
-rw-r--r--drivers/md/dm-ioctl.c17
-rw-r--r--drivers/md/dm.c19
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.c14
-rw-r--r--drivers/media/platform/marvell-ccic/mcam-core.h8
-rw-r--r--drivers/media/platform/soc_camera/rcar_vin.c7
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/card/queue.c2
-rw-r--r--drivers/mmc/card/queue.h2
-rw-r--r--drivers/mmc/core/core.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c7
-rw-r--r--drivers/mmc/host/sh_mmcif.c2
-rw-r--r--drivers/net/bonding/bond_main.c12
-rw-r--r--drivers/net/bonding/bond_procfs.c1
-rw-r--r--drivers/net/bonding/bonding_priv.h25
-rw-r--r--drivers/net/can/Kconfig2
-rw-r--r--drivers/net/can/usb/kvaser_usb.c2
-rw-r--r--drivers/net/ethernet/8390/etherh.c2
-rw-r--r--drivers/net/ethernet/altera/altera_msgdmahw.h5
-rw-r--r--drivers/net/ethernet/altera/altera_tse_main.c43
-rw-r--r--drivers/net/ethernet/amd/Kconfig2
-rw-r--r--drivers/net/ethernet/arc/Kconfig5
-rw-r--r--drivers/net/ethernet/atheros/atl1e/atl1e_hw.h2
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.h2
-rw-r--r--drivers/net/ethernet/broadcom/bgmac.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c116
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c4
-rw-r--r--drivers/net/ethernet/cadence/macb.c7
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c5
-rw-r--r--drivers/net/ethernet/ibm/ehea/ehea_main.c6
-rw-r--r--drivers/net/ethernet/ibm/ibmveth.c4
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c7
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c4
-rw-r--r--drivers/net/ethernet/marvell/pxa168_eth.c16
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c29
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c26
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c18
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h1
-rw-r--r--drivers/net/ethernet/myricom/myri10ge/myri10ge.c38
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c4
-rw-r--r--drivers/net/ethernet/rocker/rocker.c5
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c8
-rw-r--r--drivers/net/hyperv/hyperv_net.h13
-rw-r--r--drivers/net/hyperv/netvsc.c8
-rw-r--r--drivers/net/hyperv/netvsc_drv.c47
-rw-r--r--drivers/net/hyperv/rndis_filter.c3
-rw-r--r--drivers/net/phy/mdio-gpio.c14
-rw-r--r--drivers/net/phy/mdio-mux-gpio.c60
-rw-r--r--drivers/net/ppp/ppp_mppe.c36
-rw-r--r--drivers/net/vxlan.c6
-rw-r--r--drivers/pinctrl/core.c10
-rw-r--r--drivers/pinctrl/core.h2
-rw-r--r--drivers/pinctrl/devicetree.c2
-rw-r--r--drivers/pinctrl/mediatek/pinctrl-mtk-common.c2
-rw-r--r--drivers/pinctrl/mvebu/pinctrl-armada-370.c2
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-gpio.c14
-rw-r--r--drivers/pinctrl/qcom/pinctrl-spmi-mpp.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/rtc/Kconfig10
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-abx80x.c307
-rw-r--r--drivers/rtc/rtc-armada38x.c24
-rw-r--r--drivers/s390/char/con3215.c2
-rw-r--r--drivers/scsi/3w-9xxx.c57
-rw-r--r--drivers/scsi/3w-9xxx.h5
-rw-r--r--drivers/scsi/3w-sas.c50
-rw-r--r--drivers/scsi/3w-sas.h4
-rw-r--r--drivers/scsi/3w-xxxx.c42
-rw-r--r--drivers/scsi/3w-xxxx.h5
-rw-r--r--drivers/scsi/aha1542.c23
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_scan.c6
-rw-r--r--drivers/sh/pm_runtime.c7
-rw-r--r--drivers/staging/media/omap4iss/Kconfig1
-rw-r--r--drivers/staging/media/omap4iss/iss.c11
-rw-r--r--drivers/staging/media/omap4iss/iss.h4
-rw-r--r--drivers/staging/media/omap4iss/iss_csiphy.c12
-rw-r--r--drivers/tty/hvc/hvc_xen.c18
-rw-r--r--drivers/tty/serial/8250/8250_pci.c25
-rw-r--r--drivers/tty/serial/atmel_serial.c2
-rw-r--r--drivers/tty/serial/of_serial.c1
-rw-r--r--drivers/tty/serial/samsung.c5
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/serial/uartlite.c11
-rw-r--r--drivers/tty/serial/xilinx_uartps.c12
-rw-r--r--drivers/tty/tty_ioctl.c3
-rw-r--r--drivers/usb/chipidea/otg_fsm.c4
-rw-r--r--drivers/usb/class/cdc-acm.c7
-rw-r--r--drivers/usb/host/ehci-msm.c13
-rw-r--r--drivers/usb/storage/uas-detect.h11
-rw-r--r--drivers/usb/storage/uas.c16
-rw-r--r--drivers/usb/storage/usb.c8
-rw-r--r--drivers/vfio/pci/vfio_pci.c8
-rw-r--r--drivers/vfio/vfio.c21
-rw-r--r--drivers/xen/events/events_2l.c10
-rw-r--r--drivers/xen/events/events_base.c7
-rw-r--r--drivers/xen/gntdev.c28
-rw-r--r--drivers/xen/grant-table.c28
-rw-r--r--drivers/xen/manage.c9
-rw-r--r--drivers/xen/swiotlb-xen.c2
-rw-r--r--drivers/xen/xen-pciback/conf_space.c6
-rw-r--r--drivers/xen/xen-pciback/conf_space.h2
-rw-r--r--drivers/xen/xen-pciback/conf_space_header.c2
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c29
-rw-r--r--fs/btrfs/delayed-inode.c2
-rw-r--r--fs/btrfs/extent-tree.c90
-rw-r--r--fs/btrfs/extent_io.c54
-rw-r--r--fs/btrfs/free-space-cache.c12
-rw-r--r--fs/btrfs/inode.c21
-rw-r--r--fs/btrfs/ioctl.c3
-rw-r--r--fs/btrfs/volumes.c15
-rw-r--r--fs/configfs/mount.c2
-rw-r--r--fs/efivarfs/super.c2
-rw-r--r--fs/ext4/Kconfig9
-rw-r--r--fs/ext4/crypto_fname.c280
-rw-r--r--fs/ext4/crypto_key.c1
-rw-r--r--fs/ext4/crypto_policy.c14
-rw-r--r--fs/ext4/dir.c2
-rw-r--r--fs/ext4/ext4.h16
-rw-r--r--fs/ext4/ext4_crypto.h11
-rw-r--r--fs/ext4/extents.c15
-rw-r--r--fs/ext4/extents_status.c8
-rw-r--r--fs/ext4/inode.c2
-rw-r--r--fs/ext4/namei.c72
-rw-r--r--fs/ext4/resize.c7
-rw-r--r--fs/ext4/symlink.c2
-rw-r--r--fs/f2fs/data.c7
-rw-r--r--fs/f2fs/f2fs.h1
-rw-r--r--fs/f2fs/namei.c8
-rw-r--r--fs/f2fs/super.c1
-rw-r--r--fs/namei.c22
-rw-r--r--fs/namespace.c6
-rw-r--r--fs/nilfs2/btree.c2
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c13
-rw-r--r--fs/splice.c12
-rw-r--r--include/acpi/actypes.h1
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/compiler-gcc.h16
-rw-r--r--include/linux/compiler-intel.h3
-rw-r--r--include/linux/compiler.h4
-rw-r--r--include/linux/ftrace_event.h2
-rw-r--r--include/linux/irqchip/arm-gic.h2
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/netdevice.h16
-rw-r--r--include/linux/netfilter_bridge.h16
-rw-r--r--include/linux/nilfs2_fs.h2
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/rhashtable.h3
-rw-r--r--include/linux/rtnetlink.h2
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/skbuff.h1
-rw-r--r--include/linux/tty.h1
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/util_macros.h2
-rw-r--r--include/net/bonding.h7
-rw-r--r--include/net/inet_connection_sock.h20
-rw-r--r--include/net/request_sock.h18
-rw-r--r--include/rdma/ib_addr.h3
-rw-r--r--include/rdma/ib_cm.h7
-rw-r--r--include/rdma/iw_portmap.h25
-rw-r--r--include/scsi/scsi_devinfo.h1
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--include/uapi/rdma/rdma_netlink.h1
-rw-r--r--include/xen/grant_table.h1
-rw-r--r--include/xen/xen-ops.h1
-rw-r--r--init/do_mounts.c5
-rw-r--r--kernel/Makefile6
-rw-r--r--kernel/bpf/core.c12
-rw-r--r--kernel/irq/dummychip.c1
-rw-r--r--kernel/kexec.c2
-rw-r--r--kernel/rcu/tree.c16
-rw-r--r--kernel/sched/core.c15
-rw-r--r--kernel/sched/idle.c16
-rw-r--r--kernel/time/clockevents.c6
-rw-r--r--kernel/trace/trace_output.c3
-rw-r--r--lib/Kconfig.debug1
-rw-r--r--lib/Kconfig.kasan8
-rw-r--r--lib/find_last_bit.c41
-rw-r--r--lib/rhashtable.c11
-rw-r--r--lib/string.c2
-rw-r--r--mm/hwpoison-inject.c13
-rw-r--r--mm/memory-failure.c16
-rw-r--r--mm/page-writeback.c6
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_netlink.c4
-rw-r--r--net/bridge/br_private.h2
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/rtnetlink.c12
-rw-r--r--net/core/skbuff.c30
-rw-r--r--net/dccp/ipv4.c3
-rw-r--r--net/dccp/ipv6.c3
-rw-r--r--net/dccp/minisocks.c3
-rw-r--r--net/dsa/dsa.c2
-rw-r--r--net/ipv4/inet_connection_sock.c34
-rw-r--r--net/ipv4/ping.c1
-rw-r--r--net/ipv4/route.c5
-rw-r--r--net/ipv4/tcp_ipv4.c3
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_output.c64
-rw-r--r--net/ipv6/ip6_gre.c9
-rw-r--r--net/ipv6/tcp_ipv6.c3
-rw-r--r--net/mpls/af_mpls.c125
-rw-r--r--net/mpls/internal.h6
-rw-r--r--net/netfilter/nf_tables_api.c3
-rw-r--r--net/netfilter/nft_reject.c2
-rw-r--r--net/netfilter/nft_reject_inet.c2
-rw-r--r--net/netlink/af_netlink.c6
-rw-r--r--net/sched/act_connmark.c2
-rw-r--r--net/tipc/bearer.c17
-rw-r--r--net/tipc/link.c16
-rw-r--r--net/tipc/server.c9
-rw-r--r--net/tipc/socket.c3
-rw-r--r--net/unix/garbage.c70
-rw-r--r--sound/soc/codecs/mc13783.c4
-rw-r--r--sound/soc/codecs/uda1380.c2
-rw-r--r--sound/soc/codecs/wm8960.c2
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/davinci/davinci-mcasp.c2
-rw-r--r--sound/soc/soc-dapm.c11
-rw-r--r--tools/lib/api/Makefile2
-rw-r--r--tools/lib/traceevent/event-parse.c2
-rw-r--r--tools/perf/bench/futex-requeue.c15
-rw-r--r--tools/perf/bench/numa.c12
-rw-r--r--tools/perf/builtin-kmem.c58
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/builtin-trace.c10
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/probe-finder.c4
-rw-r--r--tools/testing/selftests/powerpc/pmu/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
431 files changed, 5137 insertions, 2707 deletions
diff --git a/CREDITS b/CREDITS
index 40cc4bf..ec7e6c7 100644
--- a/CREDITS
+++ b/CREDITS
@@ -3709,6 +3709,13 @@ N: Dirk Verworner
D: Co-author of German book ``Linux-Kernel-Programmierung''
D: Co-founder of Berlin Linux User Group
+N: Andrew Victor
+E: linux@maxim.org.za
+W: http://maxim.org.za/at91_26.html
+D: First maintainer of Atmel ARM-based SoC, aka AT91
+D: Introduced support for at91rm9200, the first chip of AT91 family
+S: South Africa
+
N: Riku Voipio
E: riku.voipio@iki.fi
D: Author of PCA9532 LED and Fintek f75375s hwmon driver
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 653d5d7..31d1d65 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -505,7 +505,10 @@ at module load time (for a module) with:
The addresses are normal I2C addresses. The adapter is the string
name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
-It is *NOT* i2c-<n> itself.
+It is *NOT* i2c-<n> itself. Also, the comparison is done ignoring
+spaces, so if the name is "This is an I2C chip" you can say
+adapter_name=ThisisanI2cchip. This is because it's hard to pass in
+spaces in kernel parameters.
The debug flags are bit flags for each BMC found, they are:
IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index 750401f..15dfce7 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -253,7 +253,7 @@ input driver:
GPIO support
~~~~~~~~~~~~
ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
-and GpioInt. These resources are used be used to pass GPIO numbers used by
+and GpioInt. These resources can be used to pass GPIO numbers used by
the device to the driver. ACPI 5.1 extended this with _DSD (Device
Specific Data) which made it possible to name the GPIOs among other things.
diff --git a/Documentation/acpi/gpio-properties.txt b/Documentation/acpi/gpio-properties.txt
index ae36fcf..f35dad1 100644
--- a/Documentation/acpi/gpio-properties.txt
+++ b/Documentation/acpi/gpio-properties.txt
@@ -1,9 +1,9 @@
_DSD Device Properties Related to GPIO
--------------------------------------
-With the release of ACPI 5.1 and the _DSD configuration objecte names
-can finally be given to GPIOs (and other things as well) returned by
-_CRS. Previously, we were only able to use an integer index to find
+With the release of ACPI 5.1, the _DSD configuration object finally
+allows names to be given to GPIOs (and other things as well) returned
+by _CRS. Previously, we were only able to use an integer index to find
the corresponding GPIO, which is pretty error prone (it depends on
the _CRS output ordering, for example).
diff --git a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
index 974624e..161448d 100644
--- a/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
+++ b/Documentation/devicetree/bindings/arm/omap/l3-noc.txt
@@ -6,6 +6,7 @@ provided by Arteris.
Required properties:
- compatible : Should be "ti,omap3-l3-smx" for OMAP3 family
Should be "ti,omap4-l3-noc" for OMAP4 family
+ Should be "ti,omap5-l3-noc" for OMAP5 family
Should be "ti,dra7-l3-noc" for DRA7 family
Should be "ti,am4372-l3-noc" for AM43 family
- reg: Contains L3 register address range for each noc domain.
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
index a4873e5..e30e184 100644
--- a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
80 81 68 69
70 71 72 73
74 75 76 77>;
- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
"saif0", "saif1", "i2c0", "i2c1",
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff --git a/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
new file mode 100644
index 0000000..be78968
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/abracon,abx80x.txt
@@ -0,0 +1,30 @@
+Abracon ABX80X I2C ultra low power RTC/Alarm chip
+
+The Abracon ABX80X family consist of the ab0801, ab0803, ab0804, ab0805, ab1801,
+ab1803, ab1804 and ab1805. The ab0805 is the superset of ab080x and the ab1805
+is the superset of ab180x.
+
+Required properties:
+
+ - "compatible": should one of:
+ "abracon,abx80x"
+ "abracon,ab0801"
+ "abracon,ab0803"
+ "abracon,ab0804"
+ "abracon,ab0805"
+ "abracon,ab1801"
+ "abracon,ab1803"
+ "abracon,ab1804"
+ "abracon,ab1805"
+ Using "abracon,abx80x" will enable chip autodetection.
+ - "reg": I2C bus address of the device
+
+Optional properties:
+
+The abx804 and abx805 have a trickle charger that is able to charge the
+connected battery or supercap. Both the following properties have to be defined
+and valid to enable charging:
+
+ - "abracon,tc-diode": should be "standard" (0.6V) or "schottky" (0.3V)
+ - "abracon,tc-resistor": should be <0>, <3>, <6> or <11>. 0 disables the output
+ resistor, the other values are in ohm.
diff --git a/Documentation/kasan.txt b/Documentation/kasan.txt
index 092fc10..4692241 100644
--- a/Documentation/kasan.txt
+++ b/Documentation/kasan.txt
@@ -9,7 +9,9 @@ a fast and comprehensive solution for finding use-after-free and out-of-bounds
bugs.
KASan uses compile-time instrumentation for checking every memory access,
-therefore you will need a certain version of GCC > 4.9.2
+therefore you will need a gcc version of 4.9.2 or later. KASan could detect out
+of bounds accesses to stack or global variables, but only if gcc 5.0 or later was
+used to built the kernel.
Currently KASan is supported only for x86_64 architecture and requires that the
kernel be built with the SLUB allocator.
@@ -23,8 +25,8 @@ To enable KASAN configure kernel with:
and choose between CONFIG_KASAN_OUTLINE and CONFIG_KASAN_INLINE. Outline/inline
is compiler instrumentation types. The former produces smaller binary the
-latter is 1.1 - 2 times faster. Inline instrumentation requires GCC 5.0 or
-latter.
+latter is 1.1 - 2 times faster. Inline instrumentation requires a gcc version
+of 5.0 or later.
Currently KASAN works only with the SLUB memory allocator.
For better bug detection and nicer report, enable CONFIG_STACKTRACE and put
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f6befa9..61ab162 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3787,6 +3787,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
READ_CAPACITY_16 command);
f = NO_REPORT_OPCODES (don't use report opcodes
command, uas only);
+ g = MAX_SECTORS_240 (don't transfer more than
+ 240 sectors at a time, uas only);
h = CAPACITY_HEURISTICS (decrease the
reported device capacity by one
sector if the number is odd);
diff --git a/Documentation/module-signing.txt b/Documentation/module-signing.txt
index 09c2382..c72702e 100644
--- a/Documentation/module-signing.txt
+++ b/Documentation/module-signing.txt
@@ -119,9 +119,9 @@ Most notably, in the x509.genkey file, the req_distinguished_name section
should be altered from the default:
[ req_distinguished_name ]
- O = Magrathea
- CN = Glacier signing key
- emailAddress = slartibartfast@magrathea.h2g2
+ #O = Unspecified company
+ CN = Build time autogenerated kernel key
+ #emailAddress = unspecified.user@unspecified.company
The generated RSA key size can also be set with:
diff --git a/Documentation/networking/mpls-sysctl.txt b/Documentation/networking/mpls-sysctl.txt
index 639ddf0..9ed15f8 100644
--- a/Documentation/networking/mpls-sysctl.txt
+++ b/Documentation/networking/mpls-sysctl.txt
@@ -18,3 +18,12 @@ platform_labels - INTEGER
Possible values: 0 - 1048575
Default: 0
+
+conf/<interface>/input - BOOL
+ Control whether packets can be input on this interface.
+
+ If disabled, packets will be discarded without further
+ processing.
+
+ 0 - disabled (default)
+ not 0 - enabled
diff --git a/Documentation/networking/scaling.txt b/Documentation/networking/scaling.txt
index cbfac09..59f4db2 100644
--- a/Documentation/networking/scaling.txt
+++ b/Documentation/networking/scaling.txt
@@ -282,7 +282,7 @@ following is true:
- The current CPU's queue head counter >= the recorded tail counter
value in rps_dev_flow[i]
-- The current CPU is unset (equal to RPS_NO_CPU)
+- The current CPU is unset (>= nr_cpu_ids)
- The current CPU is offline
After this check, the packet is sent to the (possibly updated) current
diff --git a/Documentation/powerpc/transactional_memory.txt b/Documentation/powerpc/transactional_memory.txt
index ba0a2a4..ded6979 100644
--- a/Documentation/powerpc/transactional_memory.txt
+++ b/Documentation/powerpc/transactional_memory.txt
@@ -74,23 +74,22 @@ Causes of transaction aborts
Syscalls
========
-Syscalls made from within an active transaction will not be performed and the
-transaction will be doomed by the kernel with the failure code TM_CAUSE_SYSCALL
-| TM_CAUSE_PERSISTENT.
+Performing syscalls from within transaction is not recommended, and can lead
+to unpredictable results.
-Syscalls made from within a suspended transaction are performed as normal and
-the transaction is not explicitly doomed by the kernel. However, what the
-kernel does to perform the syscall may result in the transaction being doomed
-by the hardware. The syscall is performed in suspended mode so any side
-effects will be persistent, independent of transaction success or failure. No
-guarantees are provided by the kernel about which syscalls will affect
-transaction success.
+Syscalls do not by design abort transactions, but beware: The kernel code will
+not be running in transactional state. The effect of syscalls will always
+remain visible, but depending on the call they may abort your transaction as a
+side-effect, read soon-to-be-aborted transactional data that should not remain
+invisible, etc. If you constantly retry a transaction that constantly aborts
+itself by calling a syscall, you'll have a livelock & make no progress.
-Care must be taken when relying on syscalls to abort during active transactions
-if the calls are made via a library. Libraries may cache values (which may
-give the appearance of success) or perform operations that cause transaction
-failure before entering the kernel (which may produce different failure codes).
-Examples are glibc's getpid() and lazy symbol resolution.
+Simple syscalls (e.g. sigprocmask()) "could" be OK. Even things like write()
+from, say, printf() should be OK as long as the kernel does not access any
+memory that was accessed transactionally.
+
+Consider any syscalls that happen to work as debug-only -- not recommended for
+production use. Best to queue them up till after the transaction is over.
Signals
@@ -177,7 +176,8 @@ kernel aborted a transaction:
TM_CAUSE_RESCHED Thread was rescheduled.
TM_CAUSE_TLBI Software TLB invalid.
TM_CAUSE_FAC_UNAV FP/VEC/VSX unavailable trap.
- TM_CAUSE_SYSCALL Syscall from active transaction.
+ TM_CAUSE_SYSCALL Currently unused; future syscalls that must abort
+ transactions for consistency will use this.
TM_CAUSE_SIGNAL Signal delivered.
TM_CAUSE_MISC Currently unused.
TM_CAUSE_ALIGNMENT Alignment fault.
diff --git a/MAINTAINERS b/MAINTAINERS
index 2e5bbc0..590304b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -892,11 +892,10 @@ S: Maintained
F: arch/arm/mach-alpine/
ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
-M: Andrew Victor <linux@maxim.org.za>
M: Nicolas Ferre <nicolas.ferre@atmel.com>
+M: Alexandre Belloni <alexandre.belloni@free-electrons.com>
M: Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W: http://maxim.org.za/at91_26.html
W: http://www.linux4sam.org
S: Supported
F: arch/arm/mach-at91/
@@ -990,6 +989,12 @@ F: drivers/clocksource/timer-prima2.c
F: drivers/clocksource/timer-atlas7.c
N: [^a-z]sirf
+ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
+M: Baruch Siach <baruch@tkos.co.il>
+L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S: Maintained
+N: digicolor
+
ARM/EBSA110 MACHINE SUPPORT
M: Russell King <linux@arm.linux.org.uk>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1439,9 +1444,10 @@ ARM/SOCFPGA ARCHITECTURE
M: Dinh Nguyen <dinguyen@opensource.altera.com>
S: Maintained
F: arch/arm/mach-socfpga/
+F: arch/arm/boot/dts/socfpga*
+F: arch/arm/configs/socfpga_defconfig
W: http://www.rocketboards.org
-T: git://git.rocketboards.org/linux-socfpga.git
-T: git://git.rocketboards.org/linux-socfpga-next.git
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
M: Dinh Nguyen <dinguyen@opensource.altera.com>
@@ -2116,8 +2122,9 @@ S: Supported
F: drivers/net/ethernet/broadcom/bnx2x/
BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
-M: Christian Daudt <bcm@fixthebug.org>
M: Florian Fainelli <f.fainelli@gmail.com>
+M: Ray Jui <rjui@broadcom.com>
+M: Scott Branden <sbranden@broadcom.com>
L: bcm-kernel-feedback-list@broadcom.com
T: git git://github.com/broadcom/mach-bcm
S: Maintained
@@ -2168,7 +2175,6 @@ S: Maintained
F: drivers/usb/gadget/udc/bcm63xx_udc.*
BROADCOM BCM7XXX ARM ARCHITECTURE
-M: Marc Carino <marc.ceeeee@gmail.com>
M: Brian Norris <computersforpeace@gmail.com>
M: Gregory Fong <gregory.0xf0@gmail.com>
M: Florian Fainelli <f.fainelli@gmail.com>
@@ -3413,6 +3419,13 @@ F: drivers/gpu/drm/rcar-du/
F: drivers/gpu/drm/shmobile/
F: include/linux/platform_data/shmob_drm.h
+DRM DRIVERS FOR ROCKCHIP
+M: Mark Yao <mark.yao@rock-chips.com>
+L: dri-devel@lists.freedesktop.org
+S: Maintained
+F: drivers/gpu/drm/rockchip/
+F: Documentation/devicetree/bindings/video/rockchip*
+
DSBR100 USB FM RADIO DRIVER
M: Alexey Klimov <klimov.linux@gmail.com>
L: linux-media@vger.kernel.org
@@ -5035,17 +5048,19 @@ S: Orphan
F: drivers/video/fbdev/imsttfb.c
INFINIBAND SUBSYSTEM
-M: Roland Dreier <roland@kernel.org>
+M: Doug Ledford <dledford@redhat.com>
M: Sean Hefty <sean.hefty@intel.com>
M: Hal Rosenstock <hal.rosenstock@gmail.com>
L: linux-rdma@vger.kernel.org
W: http://www.openfabrics.org/
Q: http://patchwork.kernel.org/project/linux-rdma/list/
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
+T: git git://github.com/dledford/linux.git
S: Supported
F: Documentation/infiniband/
F: drivers/infiniband/
F: include/uapi/linux/if_infiniband.h
+F: include/uapi/rdma/
+F: include/rdma/
INOTIFY
M: John McCutchan <john@johnmccutchan.com>
@@ -5798,6 +5813,7 @@ F: drivers/scsi/53c700*
LED SUBSYSTEM
M: Bryan Wu <cooloney@gmail.com>
M: Richard Purdie <rpurdie@rpsys.net>
+M: Jacek Anaszewski <j.anaszewski@samsung.com>
L: linux-leds@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
S: Maintained
@@ -10523,7 +10539,6 @@ F: include/linux/virtio_console.h
F: include/uapi/linux/virtio_console.h
VIRTIO CORE, NET AND BLOCK DRIVERS
-M: Rusty Russell <rusty@rustcorp.com.au>
M: "Michael S. Tsirkin" <mst@redhat.com>
L: virtualization@lists.linux-foundation.org
S: Maintained
@@ -11031,6 +11046,7 @@ F: drivers/media/pci/zoran/
ZRAM COMPRESSED RAM BLOCK DEVICE DRVIER
M: Minchan Kim <minchan@kernel.org>
M: Nitin Gupta <ngupta@vflare.org>
+R: Sergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/block/zram/
diff --git a/Makefile b/Makefile
index 7ff1239..eae539d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 1
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc3
NAME = Hurr durr I'ma sheep
# *DOCUMENTATION*
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 8ae29c9..c17097d 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -49,7 +49,7 @@
pinctrl-0 = <&matrix_keypad_pins>;
debounce-delay-ms = <5>;
- col-scan-delay-us = <1500>;
+ col-scan-delay-us = <5>;
row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH /* Bank5, pin5 */
&gpio5 6 GPIO_ACTIVE_HIGH>; /* Bank5, pin6 */
@@ -473,7 +473,7 @@
interrupt-parent = <&gpio0>;
interrupts = <31 0>;
- wake-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
touchscreen-size-x = <480>;
touchscreen-size-y = <272>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 15f198e..7128fad 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -18,6 +18,7 @@
aliases {
rtc0 = &mcp_rtc;
rtc1 = &tps659038_rtc;
+ rtc2 = &rtc;
};
memory {
@@ -83,7 +84,7 @@
gpio_fan: gpio_fan {
/* Based on 5v 500mA AFB02505HHB */
compatible = "gpio-fan";
- gpios = <&tps659038_gpio 1 GPIO_ACTIVE_HIGH>;
+ gpios = <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>;
gpio-fan,speed-map = <0 0>,
<13000 1>;
#cooling-cells = <2>;
@@ -130,8 +131,8 @@
uart3_pins_default: uart3_pins_default {
pinctrl-single,pins = <
- 0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd.rxd */
- 0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd.txd */
+ 0x3f8 (PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */
+ 0x3fc (PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */
>;
};
@@ -455,7 +456,7 @@
mcp_rtc: rtc@6f {
compatible = "microchip,mcp7941x";
reg = <0x6f>;
- interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>; /* IRQ_SYS_1N */
+ interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>; /* IRQ_SYS_1N */
pinctrl-names = "default";
pinctrl-0 = <&mcp79410_pins_default>;
@@ -478,7 +479,7 @@
&uart3 {
status = "okay";
interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
- <&dra7_pmx_core 0x248>;
+ <&dra7_pmx_core 0x3f8>;
pinctrl-names = "default";
pinctrl-0 = <&uart3_pins_default>;
diff --git a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
index e3b08fb..990e8a2 100644
--- a/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
+++ b/arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
@@ -105,6 +105,10 @@
};
internal-regs {
+ rtc@10300 {
+ /* No crystal connected to the internal RTC */
+ status = "disabled";
+ };
serial@12000 {
status = "okay";
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 5332b57..f03a091 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -911,7 +911,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>,
- <0x4ae06014 0x4>, <0x4a003b20 0x8>,
+ <0x4ae06014 0x4>, <0x4a003b20 0xc>,
<0x4ae0c158 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -944,7 +944,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>,
- <0x4ae06010 0x4>, <0x4a0025cc 0x8>,
+ <0x4ae06010 0x4>, <0x4a0025cc 0xc>,
<0x4a002470 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -977,7 +977,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>,
- <0x4ae06010 0x4>, <0x4a0025e0 0x8>,
+ <0x4ae06010 0x4>, <0x4a0025e0 0xc>,
<0x4a00246c 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -1010,7 +1010,7 @@
ti,clock-cycles = <16>;
reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>,
- <0x4ae06010 0x4>, <0x4a003b08 0x8>,
+ <0x4ae06010 0x4>, <0x4a003b08 0xc>,
<0x4ae0c154 0x4>;
reg-names = "setup-address", "control-address",
"int-address", "efuse-address",
@@ -1203,7 +1203,7 @@
status = "disabled";
};
- rtc@48838000 {
+ rtc: rtc@48838000 {
compatible = "ti,am3352-rtc";
reg = <0x48838000 0x100>;
interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index 8de12af7..d6b49e5 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -9,6 +9,7 @@
#include <dt-bindings/sound/samsung-i2s.h>
#include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/maxim,max77686.h>
#include "exynos4412.dtsi"
/ {
@@ -105,6 +106,8 @@
rtc@10070000 {
status = "okay";
+ clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>;
+ clock-names = "rtc", "rtc_src";
};
g2d@10800000 {
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index 2657e84..1eca97e 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -567,6 +567,7 @@
num-slots = <1>;
broken-cd;
cap-sdio-irq;
+ keep-power-in-suspend;
card-detect-delay = <200>;
samsung,dw-mshc-ciu-div = <3>;
samsung,dw-mshc-sdr-timing = <2 3>;
diff --git a/arch/arm/boot/dts/exynos5420-trip-points.dtsi b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
index 5d31fc1..2180a01 100644
--- a/arch/arm/boot/dts/exynos5420-trip-points.dtsi
+++ b/arch/arm/boot/dts/exynos5420-trip-points.dtsi
@@ -28,7 +28,7 @@ trips {
type = "active";
};
cpu-crit-0 {
- temperature = <1200000>; /* millicelsius */
+ temperature = <120000>; /* millicelsius */
hysteresis = <0>; /* millicelsius */
type = "critical";
};
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index f67b23f..4531753 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -536,6 +536,7 @@
clock-names = "dp";
phys = <&dp_phy>;
phy-names = "dp";
+ power-domains = <&disp_pd>;
};
mipi_phy: video-phy@10040714 {
diff --git a/arch/arm/boot/dts/exynos5440-trip-points.dtsi b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
index 48adfa8..356e963 100644
--- a/arch/arm/boot/dts/exynos5440-trip-points.dtsi
+++ b/arch/arm/boot/dts/exynos5440-trip-points.dtsi
@@ -18,7 +18,7 @@ trips {
type = "active";
};
cpu-crit-0 {
- temperature = <1050000>; /* millicelsius */
+ temperature = <105000>; /* millicelsius */
hysteresis = <0>; /* millicelsius */
type = "critical";
};
diff --git a/arch/arm/boot/dts/imx23-olinuxino.dts b/arch/arm/boot/dts/imx23-olinuxino.dts
index 7e6eef2..8204539 100644
--- a/arch/arm/boot/dts/imx23-olinuxino.dts
+++ b/arch/arm/boot/dts/imx23-olinuxino.dts
@@ -12,6 +12,7 @@
*/
/dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
#include "imx23.dtsi"
/ {
@@ -93,6 +94,7 @@
ahb@80080000 {
usb0: usb@80080000 {
+ dr_mode = "host";
vbus-supply = <&reg_usb0_vbus>;
status = "okay";
};
@@ -122,7 +124,7 @@
user {
label = "green";
- gpios = <&gpio2 1 1>;
+ gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
};
};
};
diff --git a/arch/arm/boot/dts/imx25.dtsi b/arch/arm/boot/dts/imx25.dtsi
index e4d3aec..677f81d 100644
--- a/arch/arm/boot/dts/imx25.dtsi
+++ b/arch/arm/boot/dts/imx25.dtsi
@@ -428,6 +428,7 @@
pwm4: pwm@53fc8000 {
compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+ #pwm-cells = <2>;
reg = <0x53fc8000 0x4000>;
clocks = <&clks 108>, <&clks 52>;
clock-names = "ipg", "per";
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
index 25e25f8..4e073e8 100644
--- a/arch/arm/boot/dts/imx28.dtsi
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -913,7 +913,7 @@
80 81 68 69
70 71 72 73
74 75 76 77>;
- interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+ interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
"saif0", "saif1", "i2c0", "i2c1",
"auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
"auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 19cc269..1ce6133 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -31,6 +31,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio4 15 0>;
+ enable-active-high;
};
reg_usb_h1_vbus: regulator@1 {
@@ -40,6 +41,7 @@
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
gpio = <&gpio1 0 0>;
+ enable-active-high;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 46b2fed..3b24b126 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -185,7 +185,6 @@
&i2c3 {
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_i2c3>;
- pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
status = "okay";
max7310_a: gpio@30 {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index a293158..5c16145 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -498,6 +498,8 @@
DRVDD-supply = <&vmmc2>;
IOVDD-supply = <&vio>;
DVDD-supply = <&vio>;
+
+ ai3x-micbias-vg = <1>;
};
tlv320aic3x_aux: tlv320aic3x@19 {
@@ -509,6 +511,8 @@
DRVDD-supply = <&vmmc2>;
IOVDD-supply = <&vio>;
DVDD-supply = <&vio>;
+
+ ai3x-micbias-vg = <2>;
};
tsl2563: tsl2563@29 {
diff --git a/arch/arm/boot/dts/omap3.dtsi b/arch/arm/boot/dts/omap3.dtsi
index d18a90f..69a40cf 100644
--- a/arch/arm/boot/dts/omap3.dtsi
+++ b/arch/arm/boot/dts/omap3.dtsi
@@ -456,6 +456,7 @@
};
mmu_isp: mmu@480bd400 {
+ #iommu-cells = <0>;
compatible = "ti,omap2-iommu";
reg = <0x480bd400 0x80>;
interrupts = <24>;
@@ -464,6 +465,7 @@
};
mmu_iva: mmu@5d000000 {
+ #iommu-cells = <0>;
compatible = "ti,omap2-iommu";
reg = <0x5d000000 0x80>;
interrupts = <28>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index efe5f73..7d24ae0 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -128,7 +128,7 @@
* hierarchy.
*/
ocp {
- compatible = "ti,omap4-l3-noc", "simple-bus";
+ compatible = "ti,omap5-l3-noc", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm/boot/dts/r8a7791-koelsch.dts b/arch/arm/boot/dts/r8a7791-koelsch.dts
index 74c3212..824ddab 100644
--- a/arch/arm/boot/dts/r8a7791-koelsch.dts
+++ b/arch/arm/boot/dts/r8a7791-koelsch.dts
@@ -545,7 +545,7 @@
compatible = "adi,adv7511w";
reg = <0x39>;
interrupt-parent = <&gpio3>;
- interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+ interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
adi,input-depth = <8>;
adi,input-colorspace = "rgb";
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index bfd3f1c..2201cd5 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -1017,23 +1017,6 @@
status = "disabled";
};
- vmmci: regulator-gpio {
- compatible = "regulator-gpio";
-
- regulator-min-microvolt = <1800000>;
- regulator-max-microvolt = <2900000>;
- regulator-name = "mmci-reg";
- regulator-type = "voltage";
-
- startup-delay-us = <100>;
- enable-active-high;
-
- states = <1800000 0x1
- 2900000 0x0>;
-
- status = "disabled";
- };
-
mcde@a0350000 {
compatible = "stericsson,mcde";
reg = <0xa0350000 0x1000>, /* MCDE */
diff --git a/arch/arm/boot/dts/ste-href.dtsi b/arch/arm/boot/dts/ste-href.dtsi
index bf8f0ed..744c1e3 100644
--- a/arch/arm/boot/dts/ste-href.dtsi
+++ b/arch/arm/boot/dts/ste-href.dtsi
@@ -111,6 +111,21 @@
pinctrl-1 = <&i2c3_sleep_mode>;
};
+ vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+ enable-active-high;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
+ };
+
// External Micro SD slot
sdi0_per1@80126000 {
arm,primecell-periphid = <0x10480180>;
diff --git a/arch/arm/boot/dts/ste-snowball.dts b/arch/arm/boot/dts/ste-snowball.dts
index 206826a..1bc84eb 100644
--- a/arch/arm/boot/dts/ste-snowball.dts
+++ b/arch/arm/boot/dts/ste-snowball.dts
@@ -146,8 +146,21 @@
};
vmmci: regulator-gpio {
+ compatible = "regulator-gpio";
+
gpios = <&gpio7 4 0x4>;
enable-gpio = <&gpio6 25 0x4>;
+
+ regulator-min-microvolt = <1800000>;
+ regulator-max-microvolt = <2900000>;
+ regulator-name = "mmci-reg";
+ regulator-type = "voltage";
+
+ startup-delay-us = <100>;
+ enable-active-high;
+
+ states = <1800000 0x1
+ 2900000 0x0>;
};
// External Micro SD slot
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index ab86655..0ca4a3e 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -39,11 +39,14 @@ CONFIG_ARCH_HIP04=y
CONFIG_ARCH_KEYSTONE=y
CONFIG_ARCH_MESON=y
CONFIG_ARCH_MXC=y
+CONFIG_SOC_IMX50=y
CONFIG_SOC_IMX51=y
CONFIG_SOC_IMX53=y
CONFIG_SOC_IMX6Q=y
CONFIG_SOC_IMX6SL=y
+CONFIG_SOC_IMX6SX=y
CONFIG_SOC_VF610=y
+CONFIG_SOC_LS1021A=y
CONFIG_ARCH_OMAP3=y
CONFIG_ARCH_OMAP4=y
CONFIG_SOC_OMAP5=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 9ff7b54..3743ca2 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -393,7 +393,7 @@ CONFIG_TI_EDMA=y
CONFIG_DMA_OMAP=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXTCON=m
-CONFIG_EXTCON_GPIO=m
+CONFIG_EXTCON_USB_GPIO=m
CONFIG_EXTCON_PALMAS=m
CONFIG_TI_EMIF=m
CONFIG_PWM=y
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
index 8e3fcb9..2ef282f 100644
--- a/arch/arm/include/asm/dma-iommu.h
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -25,7 +25,7 @@ struct dma_iommu_mapping {
};
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 2f7e6ff..0b579b2 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -110,5 +110,6 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
bool xen_arch_need_swiotlb(struct device *dev,
unsigned long pfn,
unsigned long mfn);
+unsigned long xen_get_swiotlb_free_pages(unsigned int order);
#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 91c7ba1..213919b 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -303,12 +303,17 @@ static int probe_current_pmu(struct arm_pmu *pmu)
static int of_pmu_irq_cfg(struct platform_device *pdev)
{
- int i;
+ int i, irq;
int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
if (!irqs)
return -ENOMEM;
+ /* Don't bother with PPIs; they're already affine */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 && irq_is_percpu(irq))
+ return 0;
+
for (i = 0; i < pdev->num_resources; ++i) {
struct device_node *dn;
int cpu;
@@ -317,7 +322,7 @@ static int of_pmu_irq_cfg(struct platform_device *pdev)
i);
if (!dn) {
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
- of_node_full_name(dn), i);
+ of_node_full_name(pdev->dev.of_node), i);
break;
}
diff --git a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
index fb8d4a2..a5edd7d 100644
--- a/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
+++ b/arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de>
+ * Copyright (C) 2010 Pengutronix, Wolfram Sang <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index cbefbd7..661d753 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -112,6 +112,7 @@
#define OMAP3430_VC_CMD_ONLP_SHIFT 16
#define OMAP3430_VC_CMD_RET_SHIFT 8
#define OMAP3430_VC_CMD_OFF_SHIFT 0
+#define OMAP3430_SREN_MASK (1 << 4)
#define OMAP3430_HSEN_MASK (1 << 3)
#define OMAP3430_MCODE_MASK (0x7 << 0)
#define OMAP3430_VALID_MASK (1 << 24)
diff --git a/arch/arm/mach-omap2/prm-regbits-44xx.h b/arch/arm/mach-omap2/prm-regbits-44xx.h
index b1c7a33..e794828 100644
--- a/arch/arm/mach-omap2/prm-regbits-44xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-44xx.h
@@ -35,6 +35,7 @@
#define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT 1
#define OMAP4430_GLOBAL_WUEN_MASK (1 << 16)
#define OMAP4430_HSMCODE_MASK (0x7 << 0)
+#define OMAP4430_SRMODEEN_MASK (1 << 4)
#define OMAP4430_HSMODEEN_MASK (1 << 3)
#define OMAP4430_HSSCLL_SHIFT 24
#define OMAP4430_ICEPICK_RST_SHIFT 9
diff --git a/arch/arm/mach-omap2/vc.c b/arch/arm/mach-omap2/vc.c
index be9ef83..076fd20 100644
--- a/arch/arm/mach-omap2/vc.c
+++ b/arch/arm/mach-omap2/vc.c
@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
* idle. And we can also scale voltages to zero for off-idle.
* Note that no actual voltage scaling during off-idle will
* happen unless the board specific twl4030 PMIC scripts are
- * loaded.
+ * loaded. See also omap_vc_i2c_init for comments regarding
+ * erratum i531.
*/
val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
return;
}
+ /*
+ * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
+ * erratum i531 "Extra Power Consumed When Repeated Start Operation
+ * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
+ * Otherwise I2C4 eventually leads into about 23mW extra power being
+ * consumed even during off idle using VMODE.
+ */
i2c_high_speed = voltdm->pmic->i2c_high_speed;
if (i2c_high_speed)
- voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
+ voltdm->rmw(vc->common->i2c_cfg_clear_mask,
vc->common->i2c_cfg_hsen_mask,
vc->common->i2c_cfg_reg);
diff --git a/arch/arm/mach-omap2/vc.h b/arch/arm/mach-omap2/vc.h
index cdbdd78..89b83b7 100644
--- a/arch/arm/mach-omap2/vc.h
+++ b/arch/arm/mach-omap2/vc.h
@@ -34,6 +34,7 @@ struct voltagedomain;
* @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
* @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
* @i2c_cfg_reg: I2C configuration register offset
+ * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
* @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
* @i2c_mcode_mask: MCODE field mask for I2C config register
*
@@ -52,6 +53,7 @@ struct omap_vc_common {
u8 cmd_ret_shift;
u8 cmd_off_shift;
u8 i2c_cfg_reg;
+ u8 i2c_cfg_clear_mask;
u8 i2c_cfg_hsen_mask;
u8 i2c_mcode_mask;
};
diff --git a/arch/arm/mach-omap2/vc3xxx_data.c b/arch/arm/mach-omap2/vc3xxx_data.c
index 75bc4aa..71d74c9 100644
--- a/arch/arm/mach-omap2/vc3xxx_data.c
+++ b/arch/arm/mach-omap2/vc3xxx_data.c
@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
.cmd_onlp_shift = OMAP3430_VC_CMD_ONLP_SHIFT,
.cmd_ret_shift = OMAP3430_VC_CMD_RET_SHIFT,
.cmd_off_shift = OMAP3430_VC_CMD_OFF_SHIFT,
+ .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
.i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
.i2c_cfg_reg = OMAP3_PRM_VC_I2C_CFG_OFFSET,
.i2c_mcode_mask = OMAP3430_MCODE_MASK,
diff --git a/arch/arm/mach-omap2/vc44xx_data.c b/arch/arm/mach-omap2/vc44xx_data.c
index 085e5d6..2abd5fa 100644
--- a/arch/arm/mach-omap2/vc44xx_data.c
+++ b/arch/arm/mach-omap2/vc44xx_data.c
@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
.cmd_ret_shift = OMAP4430_RET_SHIFT,
.cmd_off_shift = OMAP4430_OFF_SHIFT,
.i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
+ .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
.i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
.i2c_mcode_mask = OMAP4430_HSMCODE_MASK,
};
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index 8896e71..f096836 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -691,4 +691,13 @@ config SHARPSL_PM_MAX1111
config PXA310_ULPI
bool
+config PXA_SYSTEMS_CPLDS
+ tristate "Motherboard cplds"
+ default ARCH_LUBBOCK || MACH_MAINSTONE
+ help
+ This driver supports the Lubbock and Mainstone multifunction chip
+ found on the pxa25x development platform system (Lubbock) and pxa27x
+ development platform system (Mainstone). This IO board supports the
+ interrupts handling, ethernet controller, flash chips, etc ...
+
endif
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index eb0bf76..4087d33 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -90,4 +90,5 @@ obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
obj-$(CONFIG_MACH_RAUMFELD_SPEAKER) += raumfeld.o
obj-$(CONFIG_MACH_ZIPIT2) += z2.o
+obj-$(CONFIG_PXA_SYSTEMS_CPLDS) += pxa_cplds_irqs.o
obj-$(CONFIG_TOSA_BT) += tosa-bt.o
diff --git a/arch/arm/mach-pxa/include/mach/lubbock.h b/arch/arm/mach-pxa/include/mach/lubbock.h
index 958cd6af..1eecf79 100644
--- a/arch/arm/mach-pxa/include/mach/lubbock.h
+++ b/arch/arm/mach-pxa/include/mach/lubbock.h
@@ -37,7 +37,9 @@
#define LUB_GP __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100)
/* Board specific IRQs */
-#define LUBBOCK_IRQ(x) (IRQ_BOARD_START + (x))
+#define LUBBOCK_NR_IRQS IRQ_BOARD_START
+
+#define LUBBOCK_IRQ(x) (LUBBOCK_NR_IRQS + (x))
#define LUBBOCK_SD_IRQ LUBBOCK_IRQ(0)
#define LUBBOCK_SA1111_IRQ LUBBOCK_IRQ(1)
#define LUBBOCK_USB_IRQ LUBBOCK_IRQ(2) /* usb connect */
@@ -47,8 +49,7 @@
#define LUBBOCK_USB_DISC_IRQ LUBBOCK_IRQ(6) /* usb disconnect */
#define LUBBOCK_LAST_IRQ LUBBOCK_IRQ(6)
-#define LUBBOCK_SA1111_IRQ_BASE (IRQ_BOARD_START + 16)
-#define LUBBOCK_NR_IRQS (IRQ_BOARD_START + 16 + 55)
+#define LUBBOCK_SA1111_IRQ_BASE (LUBBOCK_NR_IRQS + 32)
#ifndef __ASSEMBLY__
extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set);
diff --git a/arch/arm/mach-pxa/include/mach/mainstone.h b/arch/arm/mach-pxa/include/mach/mainstone.h
index 1bfc4e8..e82a7d3 100644
--- a/arch/arm/mach-pxa/include/mach/mainstone.h
+++ b/arch/arm/mach-pxa/include/mach/mainstone.h
@@ -120,7 +120,9 @@
#define MST_PCMCIA_PWR_VCC_50 0x4 /* voltage VCC = 5.0V */
/* board specific IRQs */
-#define MAINSTONE_IRQ(x) (IRQ_BOARD_START + (x))
+#define MAINSTONE_NR_IRQS IRQ_BOARD_START
+
+#define MAINSTONE_IRQ(x) (MAINSTONE_NR_IRQS + (x))
#define MAINSTONE_MMC_IRQ MAINSTONE_IRQ(0)
#define MAINSTONE_USIM_IRQ MAINSTONE_IRQ(1)
#define MAINSTONE_USBC_IRQ MAINSTONE_IRQ(2)
@@ -136,6 +138,4 @@
#define MAINSTONE_S1_STSCHG_IRQ MAINSTONE_IRQ(14)
#define MAINSTONE_S1_IRQ MAINSTONE_IRQ(15)
-#define MAINSTONE_NR_IRQS (IRQ_BOARD_START + 16)
-
#endif
diff --git a/arch/arm/mach-pxa/lubbock.c b/arch/arm/mach-pxa/lubbock.c
index d8a1be6..4ac9ab8 100644
--- a/arch/arm/mach-pxa/lubbock.c
+++ b/arch/arm/mach-pxa/lubbock.c
@@ -12,6 +12,7 @@
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
@@ -123,84 +124,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
}
EXPORT_SYMBOL(lubbock_set_misc_wr);
-static unsigned long lubbock_irq_enabled;
-
-static void lubbock_mask_irq(struct irq_data *d)
-{
- int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
- LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
-}
-
-static void lubbock_unmask_irq(struct irq_data *d)
-{
- int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
- /* the irq can be acknowledged only if deasserted, so it's done here */
- LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
- LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
-}
-
-static struct irq_chip lubbock_irq_chip = {
- .name = "FPGA",
- .irq_ack = lubbock_mask_irq,
- .irq_mask = lubbock_mask_irq,
- .irq_unmask = lubbock_unmask_irq,
-};
-
-static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
- do {
- /* clear our parent irq */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- if (likely(pending)) {
- irq = LUBBOCK_IRQ(0) + __ffs(pending);
- generic_handle_irq(irq);
- }
- pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
- } while (pending);
-}
-
-static void __init lubbock_init_irq(void)
-{
- int irq;
-
- pxa25x_init_irq();
-
- /* setup extra lubbock irqs */
- for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
- irq_set_chip_and_handler(irq, &lubbock_irq_chip,
- handle_level_irq);
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
-
- irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
- irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
-}
-
-#ifdef CONFIG_PM
-
-static void lubbock_irq_resume(void)
-{
- LUB_IRQ_MASK_EN = lubbock_irq_enabled;
-}
-
-static struct syscore_ops lubbock_irq_syscore_ops = {
- .resume = lubbock_irq_resume,
-};
-
-static int __init lubbock_irq_device_init(void)
-{
- if (machine_is_lubbock()) {
- register_syscore_ops(&lubbock_irq_syscore_ops);
- return 0;
- }
- return -ENODEV;
-}
-
-device_initcall(lubbock_irq_device_init);
-
-#endif
-
static int lubbock_udc_is_connected(void)
{
return (LUB_MISC_RD & (1 << 9)) == 0;
@@ -383,11 +306,38 @@ static struct platform_device lubbock_flash_device[2] = {
},
};
+static struct resource lubbock_cplds_resources[] = {
+ [0] = {
+ .start = LUBBOCK_FPGA_PHYS + 0xc0,
+ .end = LUBBOCK_FPGA_PHYS + 0xe0 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PXA_GPIO_TO_IRQ(0),
+ .end = PXA_GPIO_TO_IRQ(0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+ [2] = {
+ .start = LUBBOCK_IRQ(0),
+ .end = LUBBOCK_IRQ(6),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device lubbock_cplds_device = {
+ .name = "pxa_cplds_irqs",
+ .id = -1,
+ .resource = &lubbock_cplds_resources[0],
+ .num_resources = 3,
+};
+
+
static struct platform_device *devices[] __initdata = {
&sa1111_device,
&smc91x_device,
&lubbock_flash_device[0],
&lubbock_flash_device[1],
+ &lubbock_cplds_device,
};
static struct pxafb_mode_info sharp_lm8v31_mode = {
@@ -648,7 +598,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
/* Maintainer: MontaVista Software Inc. */
.map_io = lubbock_map_io,
.nr_irqs = LUBBOCK_NR_IRQS,
- .init_irq = lubbock_init_irq,
+ .init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_time = pxa_timer_init,
.init_machine = lubbock_init,
diff --git a/arch/arm/mach-pxa/mainstone.c b/arch/arm/mach-pxa/mainstone.c
index 78b84c0..2c0658c 100644
--- a/arch/arm/mach-pxa/mainstone.c
+++ b/arch/arm/mach-pxa/mainstone.c
@@ -13,6 +13,7 @@
* published by the Free Software Foundation.
*/
#include <linux/gpio.h>
+#include <linux/gpio/machine.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/syscore_ops.h>
@@ -122,92 +123,6 @@ static unsigned long mainstone_pin_config[] = {
GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
};
-static unsigned long mainstone_irq_enabled;
-
-static void mainstone_mask_irq(struct irq_data *d)
-{
- int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
- MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
-}
-
-static void mainstone_unmask_irq(struct irq_data *d)
-{
- int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
- /* the irq can be acknowledged only if deasserted, so it's done here */
- MST_INTSETCLR &= ~(1 << mainstone_irq);
- MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
-}
-
-static struct irq_chip mainstone_irq_chip = {
- .name = "FPGA",
- .irq_ack = mainstone_mask_irq,
- .irq_mask = mainstone_mask_irq,
- .irq_unmask = mainstone_unmask_irq,
-};
-
-static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
- unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
- do {
- /* clear useless edge notification */
- desc->irq_data.chip->irq_ack(&desc->irq_data);
- if (likely(pending)) {
- irq = MAINSTONE_IRQ(0) + __ffs(pending);
- generic_handle_irq(irq);
- }
- pending = MST_INTSETCLR & mainstone_irq_enabled;
- } while (pending);
-}
-
-static void __init mainstone_init_irq(void)
-{
- int irq;
-
- pxa27x_init_irq();
-
- /* setup extra Mainstone irqs */
- for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
- irq_set_chip_and_handler(irq, &mainstone_irq_chip,
- handle_level_irq);
- if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
- else
- set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
- }
- set_irq_flags(MAINSTONE_IRQ(8), 0);
- set_irq_flags(MAINSTONE_IRQ(12), 0);
-
- MST_INTMSKENA = 0;
- MST_INTSETCLR = 0;
-
- irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
- irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
-}
-
-#ifdef CONFIG_PM
-
-static void mainstone_irq_resume(void)
-{
- MST_INTMSKENA = mainstone_irq_enabled;
-}
-
-static struct syscore_ops mainstone_irq_syscore_ops = {
- .resume = mainstone_irq_resume,
-};
-
-static int __init mainstone_irq_device_init(void)
-{
- if (machine_is_mainstone())
- register_syscore_ops(&mainstone_irq_syscore_ops);
-
- return 0;
-}
-
-device_initcall(mainstone_irq_device_init);
-
-#endif
-
-
static struct resource smc91x_resources[] = {
[0] = {
.start = (MST_ETH_PHYS + 0x300),
@@ -487,11 +402,37 @@ static struct platform_device mst_gpio_keys_device = {
},
};
+static struct resource mst_cplds_resources[] = {
+ [0] = {
+ .start = MST_FPGA_PHYS + 0xc0,
+ .end = MST_FPGA_PHYS + 0xe0 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = PXA_GPIO_TO_IRQ(0),
+ .end = PXA_GPIO_TO_IRQ(0),
+ .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+ },
+ [2] = {
+ .start = MAINSTONE_IRQ(0),
+ .end = MAINSTONE_IRQ(15),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mst_cplds_device = {
+ .name = "pxa_cplds_irqs",
+ .id = -1,
+ .resource = &mst_cplds_resources[0],
+ .num_resources = 3,
+};
+
static struct platform_device *platform_devices[] __initdata = {
&smc91x_device,
&mst_flash_device[0],
&mst_flash_device[1],
&mst_gpio_keys_device,
+ &mst_cplds_device,
};
static struct pxaohci_platform_data mainstone_ohci_platform_data = {
@@ -718,7 +659,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
.atag_offset = 0x100, /* BLOB boot parameter setting */
.map_io = mainstone_map_io,
.nr_irqs = MAINSTONE_NR_IRQS,
- .init_irq = mainstone_init_irq,
+ .init_irq = pxa27x_init_irq,
.handle_irq = pxa27x_handle_irq,
.init_time = pxa_timer_init,
.init_machine = mainstone_init,
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
new file mode 100644
index 0000000..f1aeb54
--- /dev/null
+++ b/arch/arm/mach-pxa/pxa_cplds_irqs.c
@@ -0,0 +1,200 @@
+/*
+ * Intel Reference Systems cplds
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Cplds motherboard driver, supporting lubbock and mainstone SoC board.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#define FPGA_IRQ_MASK_EN 0x0
+#define FPGA_IRQ_SET_CLR 0x10
+
+#define CPLDS_NB_IRQ 32
+
+struct cplds {
+ void __iomem *base;
+ int irq;
+ unsigned int irq_mask;
+ struct gpio_desc *gpio0;
+ struct irq_domain *irqdomain;
+};
+
+static irqreturn_t cplds_irq_handler(int in_irq, void *d)
+{
+ struct cplds *fpga = d;
+ unsigned long pending;
+ unsigned int bit;
+
+ pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
+ for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
+ generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
+
+ return IRQ_HANDLED;
+}
+
+static void cplds_irq_mask_ack(struct irq_data *d)
+{
+ struct cplds *fpga = irq_data_get_irq_chip_data(d);
+ unsigned int cplds_irq = irqd_to_hwirq(d);
+ unsigned int set, bit = BIT(cplds_irq);
+
+ fpga->irq_mask &= ~bit;
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+ set = readl(fpga->base + FPGA_IRQ_SET_CLR);
+ writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
+}
+
+static void cplds_irq_unmask(struct irq_data *d)
+{
+ struct cplds *fpga = irq_data_get_irq_chip_data(d);
+ unsigned int cplds_irq = irqd_to_hwirq(d);
+ unsigned int bit = BIT(cplds_irq);
+
+ fpga->irq_mask |= bit;
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+}
+
+static struct irq_chip cplds_irq_chip = {
+ .name = "pxa_cplds",
+ .irq_mask_ack = cplds_irq_mask_ack,
+ .irq_unmask = cplds_irq_unmask,
+ .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq,
+ irq_hw_number_t hwirq)
+{
+ struct cplds *fpga = d->host_data;
+
+ irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq);
+ irq_set_chip_data(irq, fpga);
+
+ return 0;
+}
+
+static const struct irq_domain_ops cplds_irq_domain_ops = {
+ .xlate = irq_domain_xlate_twocell,
+ .map = cplds_irq_domain_map,
+};
+
+static int cplds_resume(struct platform_device *pdev)
+{
+ struct cplds *fpga = platform_get_drvdata(pdev);
+
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+
+ return 0;
+}
+
+static int cplds_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct cplds *fpga;
+ int ret;
+ unsigned int base_irq = 0;
+ unsigned long irqflags = 0;
+
+ fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
+ if (!fpga)
+ return -ENOMEM;
+
+ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (res) {
+ fpga->irq = (unsigned int)res->start;
+ irqflags = res->flags;
+ }
+ if (!fpga->irq)
+ return -ENODEV;
+
+ base_irq = platform_get_irq(pdev, 1);
+ if (base_irq < 0)
+ base_irq = 0;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ fpga->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(fpga->base))
+ return PTR_ERR(fpga->base);
+
+ platform_set_drvdata(pdev, fpga);
+
+ writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+ writel(0, fpga->base + FPGA_IRQ_SET_CLR);
+
+ ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
+ irqflags, dev_name(&pdev->dev), fpga);
+ if (ret == -ENOSYS)
+ return -EPROBE_DEFER;
+
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't request main irq%d: %d\n",
+ fpga->irq, ret);
+ return ret;
+ }
+
+ irq_set_irq_wake(fpga->irq, 1);
+ fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+ CPLDS_NB_IRQ,
+ &cplds_irq_domain_ops, fpga);
+ if (!fpga->irqdomain)
+ return -ENODEV;
+
+ if (base_irq) {
+ ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
+ CPLDS_NB_IRQ);
+ if (ret) {
+ dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
+ base_irq, base_irq + CPLDS_NB_IRQ);
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int cplds_remove(struct platform_device *pdev)
+{
+ struct cplds *fpga = platform_get_drvdata(pdev);
+
+ irq_set_chip_and_handler(fpga->irq, NULL, NULL);
+
+ return 0;
+}
+
+static const struct of_device_id cplds_id_table[] = {
+ { .compatible = "intel,lubbock-cplds-irqs", },
+ { .compatible = "intel,mainstone-cplds-irqs", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, cplds_id_table);
+
+static struct platform_driver cplds_driver = {
+ .driver = {
+ .name = "pxa_cplds_irqs",
+ .of_match_table = of_match_ptr(cplds_id_table),
+ },
+ .probe = cplds_probe,
+ .remove = cplds_remove,
+ .resume = cplds_resume,
+};
+
+module_platform_driver(cplds_driver);
+
+MODULE_DESCRIPTION("PXA Cplds interrupts driver");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-rockchip/pm.c b/arch/arm/mach-rockchip/pm.c
index b07d886..22812fe 100644
--- a/arch/arm/mach-rockchip/pm.c
+++ b/arch/arm/mach-rockchip/pm.c
@@ -44,9 +44,11 @@ static void __iomem *rk3288_bootram_base;
static phys_addr_t rk3288_bootram_phy;
static struct regmap *pmu_regmap;
+static struct regmap *grf_regmap;
static struct regmap *sgrf_regmap;
static u32 rk3288_pmu_pwr_mode_con;
+static u32 rk3288_grf_soc_con0;
static u32 rk3288_sgrf_soc_con0;
static inline u32 rk3288_l2_config(void)
@@ -70,12 +72,26 @@ static void rk3288_slp_mode_set(int level)
{
u32 mode_set, mode_set1;
+ regmap_read(grf_regmap, RK3288_GRF_SOC_CON0, &rk3288_grf_soc_con0);
+
regmap_read(sgrf_regmap, RK3288_SGRF_SOC_CON0, &rk3288_sgrf_soc_con0);
regmap_read(pmu_regmap, RK3288_PMU_PWRMODE_CON,
&rk3288_pmu_pwr_mode_con);
/*
+ * We need set this bit GRF_FORCE_JTAG here, for the debug module,
+ * otherwise, it may become inaccessible after resume.
+ * This creates a potential security issue, as the sdmmc pins may
+ * accept jtag data for a short time during resume if no card is
+ * inserted.
+ * But this is of course also true for the regular boot, before we
+ * turn of the jtag/sdmmc autodetect.
+ */
+ regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, GRF_FORCE_JTAG |
+ GRF_FORCE_JTAG_WRITE);
+
+ /*
* SGRF_FAST_BOOT_EN - system to boot from FAST_BOOT_ADDR
* PCLK_WDT_GATE - disable WDT during suspend.
*/
@@ -83,6 +99,13 @@ static void rk3288_slp_mode_set(int level)
SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN
| SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE);
+ /*
+ * The dapswjdp can not auto reset before resume, that cause it may
+ * access some illegal address during resume. Let's disable it before
+ * suspend, and the MASKROM will enable it back.
+ */
+ regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, SGRF_DAPDEVICEEN_WRITE);
+
/* booting address of resuming system is from this register value */
regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR,
rk3288_bootram_phy);
@@ -128,6 +151,9 @@ static void rk3288_slp_mode_set_resume(void)
regmap_write(sgrf_regmap, RK3288_SGRF_SOC_CON0,
rk3288_sgrf_soc_con0 | SGRF_PCLK_WDT_GATE_WRITE
| SGRF_FAST_BOOT_EN_WRITE);
+
+ regmap_write(grf_regmap, RK3288_GRF_SOC_CON0, rk3288_grf_soc_con0 |
+ GRF_FORCE_JTAG_WRITE);
}
static int rockchip_lpmode_enter(unsigned long arg)
@@ -186,6 +212,13 @@ static int rk3288_suspend_init(struct device_node *np)
return PTR_ERR(pmu_regmap);
}
+ grf_regmap = syscon_regmap_lookup_by_compatible(
+ "rockchip,rk3288-grf");
+ if (IS_ERR(grf_regmap)) {
+ pr_err("%s: could not find grf regmap\n", __func__);
+ return PTR_ERR(pmu_regmap);
+ }
+
sram_np = of_find_compatible_node(NULL, NULL,
"rockchip,rk3288-pmu-sram");
if (!sram_np) {
diff --git a/arch/arm/mach-rockchip/pm.h b/arch/arm/mach-rockchip/pm.h
index 03ff31d..f8a747b 100644
--- a/arch/arm/mach-rockchip/pm.h
+++ b/arch/arm/mach-rockchip/pm.h
@@ -48,6 +48,10 @@ static inline void rockchip_suspend_init(void)
#define RK3288_PMU_WAKEUP_RST_CLR_CNT 0x44
#define RK3288_PMU_PWRMODE_CON1 0x90
+#define RK3288_GRF_SOC_CON0 0x244
+#define GRF_FORCE_JTAG BIT(12)
+#define GRF_FORCE_JTAG_WRITE BIT(28)
+
#define RK3288_SGRF_SOC_CON0 (0x0000)
#define RK3288_SGRF_FAST_BOOT_ADDR (0x0120)
#define SGRF_PCLK_WDT_GATE BIT(6)
@@ -55,6 +59,10 @@ static inline void rockchip_suspend_init(void)
#define SGRF_FAST_BOOT_EN BIT(8)
#define SGRF_FAST_BOOT_EN_WRITE BIT(24)
+#define RK3288_SGRF_CPU_CON0 (0x40)
+#define SGRF_DAPDEVICEEN BIT(0)
+#define SGRF_DAPDEVICEEN_WRITE BIT(16)
+
#define RK3288_CRU_MODE_CON 0x50
#define RK3288_CRU_SEL0_CON 0x60
#define RK3288_CRU_SEL1_CON 0x64
diff --git a/arch/arm/mach-rockchip/rockchip.c b/arch/arm/mach-rockchip/rockchip.c
index d360ec0..b6cf3b4 100644
--- a/arch/arm/mach-rockchip/rockchip.c
+++ b/arch/arm/mach-rockchip/rockchip.c
@@ -30,11 +30,30 @@
#include "pm.h"
#define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_TIMER6_7_PHYS 0xff810000
static void __init rockchip_timer_init(void)
{
if (of_machine_is_compatible("rockchip,rk3288")) {
struct regmap *grf;
+ void __iomem *reg_base;
+
+ /*
+ * Most/all uboot versions for rk3288 don't enable timer7
+ * which is needed for the architected timer to work.
+ * So make sure it is running during early boot.
+ */
+ reg_base = ioremap(RK3288_TIMER6_7_PHYS, SZ_16K);
+ if (reg_base) {
+ writel(0, reg_base + 0x30);
+ writel(0xffffffff, reg_base + 0x20);
+ writel(0xffffffff, reg_base + 0x24);
+ writel(1, reg_base + 0x30);
+ dsb();
+ iounmap(reg_base);
+ } else {
+ pr_err("rockchip: could not map timer7 registers\n");
+ }
/*
* Disable auto jtag/sdmmc switching that causes issues
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 09c5fe3..7e7583d 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1878,7 +1878,7 @@ struct dma_map_ops iommu_coherent_ops = {
* arm_iommu_attach_device function.
*/
struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
{
unsigned int bits = size >> PAGE_SHIFT;
unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@@ -1886,6 +1886,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
int extensions = 1;
int err = -ENOMEM;
+ /* currently only 32-bit DMA address space is supported */
+ if (size > DMA_BIT_MASK(32) + 1)
+ return ERR_PTR(-ERANGE);
+
if (!bitmap_size)
return ERR_PTR(-EINVAL);
@@ -2057,13 +2061,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
if (!iommu)
return false;
- /*
- * currently arm_iommu_create_mapping() takes a max of size_t
- * for size param. So check this limit for now.
- */
- if (size > SIZE_MAX)
- return false;
-
mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
if (IS_ERR(mapping)) {
pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S
index aa0519e..774ef13 100644
--- a/arch/arm/mm/proc-arm1020.S
+++ b/arch/arm/mm/proc-arm1020.S
@@ -22,8 +22,6 @@
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020.
- *
- * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S
index bff4c7f..ae3c27b 100644
--- a/arch/arm/mm/proc-arm1020e.S
+++ b/arch/arm/mm/proc-arm1020e.S
@@ -22,8 +22,6 @@
*
* These are the low level assembler for performing cache and TLB
* functions on the arm1020e.
- *
- * CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
*/
#include <linux/linkage.h>
#include <linux/init.h>
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index ede8c54..32a47cc 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext)
.type __arm925_setup, #function
__arm925_setup:
mov r0, #0
-#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
- orr r0,r0,#1 << 7
-#endif
/* Transparent on, D-cache clean & flush mode. See NOTE2 above */
orr r0,r0,#1 << 1 @ transparent mode on
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S
index e494d6d..92e08bf 100644
--- a/arch/arm/mm/proc-feroceon.S
+++ b/arch/arm/mm/proc-feroceon.S
@@ -602,7 +602,6 @@ __\name\()_proc_info:
PMD_SECT_AP_WRITE | \
PMD_SECT_AP_READ
initfn __feroceon_setup, __\name\()_proc_info
- .long __feroceon_setup
.long cpu_arch_name
.long cpu_elf_name
.long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index 793551d..4983250 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -4,6 +4,7 @@
#include <linux/gfp.h>
#include <linux/highmem.h>
#include <linux/export.h>
+#include <linux/memblock.h>
#include <linux/of_address.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -21,6 +22,20 @@
#include <asm/xen/hypercall.h>
#include <asm/xen/interface.h>
+unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+ struct memblock_region *reg;
+ gfp_t flags = __GFP_NOWARN;
+
+ for_each_memblock(memory, reg) {
+ if (reg->base < (phys_addr_t)0xffffffff) {
+ flags |= __GFP_DMA;
+ break;
+ }
+ }
+ return __get_free_pages(flags, order);
+}
+
enum dma_cache_op {
DMA_UNMAP,
DMA_MAP,
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 4269dba..7796af4 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -31,6 +31,7 @@ config ARM64
select GENERIC_EARLY_IOREMAP
select GENERIC_IRQ_PROBE
select GENERIC_IRQ_SHOW
+ select GENERIC_IRQ_SHOW_LEVEL
select GENERIC_PCI_IOMAP
select GENERIC_SCHED_CLOCK
select GENERIC_SMP_IDLE_THREAD
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index a5abb00..71f19c4 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -65,6 +65,14 @@ do { \
do { \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("stlrb %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("stlrh %w1, %0" \
+ : "=Q" (*p) : "r" (v) : "memory"); \
+ break; \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*p) : "r" (v) : "memory"); \
@@ -81,6 +89,14 @@ do { \
typeof(*p) ___p1; \
compiletime_assert_atomic_type(*p); \
switch (sizeof(*p)) { \
+ case 1: \
+ asm volatile ("ldarb %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
+ case 2: \
+ asm volatile ("ldarh %w0, %1" \
+ : "=r" (___p1) : "Q" (*p) : "memory"); \
+ break; \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (___p1) : "Q" (*p) : "memory"); \
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 195991d..23f25ac 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1310,7 +1310,7 @@ static const struct of_device_id armpmu_of_device_ids[] = {
static int armpmu_device_probe(struct platform_device *pdev)
{
- int i, *irqs;
+ int i, irq, *irqs;
if (!cpu_pmu)
return -ENODEV;
@@ -1319,6 +1319,11 @@ static int armpmu_device_probe(struct platform_device *pdev)
if (!irqs)
return -ENOMEM;
+ /* Don't bother with PPIs; they're already affine */
+ irq = platform_get_irq(pdev, 0);
+ if (irq >= 0 && irq_is_percpu(irq))
+ return 0;
+
for (i = 0; i < pdev->num_resources; ++i) {
struct device_node *dn;
int cpu;
@@ -1327,7 +1332,7 @@ static int armpmu_device_probe(struct platform_device *pdev)
i);
if (!dn) {
pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
- of_node_full_name(dn), i);
+ of_node_full_name(pdev->dev.of_node), i);
break;
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index ef7d112..b0bd4e5 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -67,8 +67,7 @@ static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
*ret_page = phys_to_page(phys);
ptr = (void *)val;
- if (flags & __GFP_ZERO)
- memset(ptr, 0, size);
+ memset(ptr, 0, size);
}
return ptr;
@@ -105,7 +104,6 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
struct page *page;
void *addr;
- size = PAGE_ALIGN(size);
page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
get_order(size));
if (!page)
@@ -113,8 +111,7 @@ static void *__dma_alloc_coherent(struct device *dev, size_t size,
*dma_handle = phys_to_dma(dev, page_to_phys(page));
addr = page_address(page);
- if (flags & __GFP_ZERO)
- memset(addr, 0, size);
+ memset(addr, 0, size);
return addr;
} else {
return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
@@ -195,6 +192,8 @@ static void __dma_free(struct device *dev, size_t size,
{
void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
+ size = PAGE_ALIGN(size);
+
if (!is_device_dma_coherent(dev)) {
if (__free_from_pool(vaddr, size))
return;
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c
index ce7aea3..c18ddc7 100644
--- a/arch/m32r/kernel/smp.c
+++ b/arch/m32r/kernel/smp.c
@@ -45,7 +45,7 @@ static volatile unsigned long flushcache_cpumask = 0;
/*
* For flush_tlb_others()
*/
-static volatile cpumask_t flush_cpumask;
+static cpumask_t flush_cpumask;
static struct mm_struct *flush_mm;
static struct vm_area_struct *flush_vma;
static volatile unsigned long flush_va;
@@ -415,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
*/
send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
- while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
+ while (!cpumask_empty(&flush_cpumask)) {
/* nothing. lockup detection does not belong here */
mb();
}
@@ -468,7 +468,7 @@ void smp_invalidate_interrupt(void)
__flush_tlb_page(va);
}
}
- cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
+ cpumask_clear_cpu(cpu_id, &flush_cpumask);
}
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
diff --git a/arch/powerpc/include/uapi/asm/tm.h b/arch/powerpc/include/uapi/asm/tm.h
index 5047659..5d836b7 100644
--- a/arch/powerpc/include/uapi/asm/tm.h
+++ b/arch/powerpc/include/uapi/asm/tm.h
@@ -11,7 +11,7 @@
#define TM_CAUSE_RESCHED 0xde
#define TM_CAUSE_TLBI 0xdc
#define TM_CAUSE_FAC_UNAV 0xda
-#define TM_CAUSE_SYSCALL 0xd8
+#define TM_CAUSE_SYSCALL 0xd8 /* future use */
#define TM_CAUSE_MISC 0xd6 /* future use */
#define TM_CAUSE_SIGNAL 0xd4
#define TM_CAUSE_ALIGNMENT 0xd2
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 44b480e3..9ee61d1 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -749,21 +749,24 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
eeh_unfreeze_pe(pe, false);
eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
eeh_pe_dev_traverse(pe, eeh_restore_dev_state, dev);
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
break;
case pcie_hot_reset:
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_HOT);
break;
case pcie_warm_reset:
+ eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
eeh_pe_dev_traverse(pe, eeh_disable_and_save_dev_state, dev);
eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
break;
default:
- eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
+ eeh_pe_state_clear(pe, EEH_PE_ISOLATED | EEH_PE_CFG_BLOCKED);
return -EINVAL;
};
@@ -1058,6 +1061,9 @@ void eeh_add_device_early(struct pci_dn *pdn)
if (!edev || !eeh_enabled())
return;
+ if (!eeh_has_flag(EEH_PROBE_MODE_DEVTREE))
+ return;
+
/* USB Bus children of PCI devices will not have BUID's */
phb = edev->phb;
if (NULL == phb ||
@@ -1112,6 +1118,9 @@ void eeh_add_device_late(struct pci_dev *dev)
return;
}
+ if (eeh_has_flag(EEH_PROBE_MODE_DEV))
+ eeh_ops->probe(pdn, NULL);
+
/*
* The EEH cache might not be removed correctly because of
* unbalanced kref to the device during unplug time, which
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 8ca9434..afbc200 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -34,7 +34,6 @@
#include <asm/ftrace.h>
#include <asm/hw_irq.h>
#include <asm/context_tracking.h>
-#include <asm/tm.h>
/*
* System calls.
@@ -146,24 +145,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
andi. r11,r10,_TIF_SYSCALL_DOTRACE
bne syscall_dotrace
.Lsyscall_dotrace_cont:
-#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
-BEGIN_FTR_SECTION
- b 1f
-END_FTR_SECTION_IFCLR(CPU_FTR_TM)
- extrdi. r11, r12, 1, (63-MSR_TS_T_LG) /* transaction active? */
- beq+ 1f
-
- /* Doom the transaction and don't perform the syscall: */
- mfmsr r11
- li r12, 1
- rldimi r11, r12, MSR_TM_LG, 63-MSR_TM_LG
- mtmsrd r11, 0
- li r11, (TM_CAUSE_SYSCALL|TM_CAUSE_PERSISTENT)
- TABORT(R11)
-
- b .Lsyscall_exit
-1:
-#endif
cmpldi 0,r0,NR_syscalls
bge- syscall_enosys
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index eeaa0d5..ccde8f0 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -501,9 +501,11 @@ BEGIN_FTR_SECTION
CHECK_HMI_INTERRUPT
END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
ld r1,PACAR1(r13)
+ ld r6,_CCR(r1)
ld r4,_MSR(r1)
ld r5,_NIP(r1)
addi r1,r1,INT_FRAME_SIZE
+ mtcr r6
mtspr SPRN_SRR1,r4
mtspr SPRN_SRR0,r5
rfid
diff --git a/arch/powerpc/kvm/book3s_xics.c b/arch/powerpc/kvm/book3s_xics.c
index 8f3e6cc..c6ca7db 100644
--- a/arch/powerpc/kvm/book3s_xics.c
+++ b/arch/powerpc/kvm/book3s_xics.c
@@ -12,6 +12,7 @@
#include <linux/err.h>
#include <linux/gfp.h>
#include <linux/anon_inodes.h>
+#include <linux/spinlock.h>
#include <asm/uaccess.h>
#include <asm/kvm_book3s.h>
@@ -20,7 +21,6 @@
#include <asm/xics.h>
#include <asm/debug.h>
#include <asm/time.h>
-#include <asm/spinlock.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 920c252..f8bc950 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2693,7 +2693,6 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
hose->last_busno = 0xff;
}
hose->private_data = phb;
- hose->controller_ops = pnv_pci_controller_ops;
phb->hub_id = hub_id;
phb->opal_id = phb_id;
phb->type = ioda_type;
@@ -2812,6 +2811,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
pnv_pci_controller_ops.enable_device_hook = pnv_pci_enable_device_hook;
pnv_pci_controller_ops.window_alignment = pnv_pci_window_alignment;
pnv_pci_controller_ops.reset_secondary_bus = pnv_pci_reset_secondary_bus;
+ hose->controller_ops = pnv_pci_controller_ops;
#ifdef CONFIG_PCI_IOV
ppc_md.pcibios_fixup_sriov = pnv_pci_ioda_fixup_iov_resources;
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index b4b1109..019d34a 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -412,6 +412,10 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
if (rc)
return -EINVAL;
+ rc = dlpar_acquire_drc(drc_index);
+ if (rc)
+ return -EINVAL;
+
parent = of_find_node_by_path("/cpus");
if (!parent)
return -ENODEV;
@@ -422,12 +426,6 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
of_node_put(parent);
- rc = dlpar_acquire_drc(drc_index);
- if (rc) {
- dlpar_free_cc_nodes(dn);
- return -EINVAL;
- }
-
rc = dlpar_attach_node(dn);
if (rc) {
dlpar_release_drc(drc_index);
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index 8e58c61..b06dc38 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -115,7 +115,7 @@ config S390
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_TRANSPARENT_HUGEPAGE
- select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z9_109_FEATURES
+ select HAVE_BPF_JIT if PACK_STACK && HAVE_MARCH_Z196_FEATURES
select HAVE_CMPXCHG_DOUBLE
select HAVE_CMPXCHG_LOCAL
select HAVE_DEBUG_KMEMLEAK
diff --git a/arch/s390/crypto/crypt_s390.h b/arch/s390/crypto/crypt_s390.h
index ba3b2ae..d9c4c31 100644
--- a/arch/s390/crypto/crypt_s390.h
+++ b/arch/s390/crypto/crypt_s390.h
@@ -3,9 +3,10 @@
*
* Support for s390 cryptographic instructions.
*
- * Copyright IBM Corp. 2003, 2007
+ * Copyright IBM Corp. 2003, 2015
* Author(s): Thomas Spatzier
* Jan Glauber (jan.glauber@de.ibm.com)
+ * Harald Freudenberger (freude@de.ibm.com)
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -28,15 +29,17 @@
#define CRYPT_S390_MSA 0x1
#define CRYPT_S390_MSA3 0x2
#define CRYPT_S390_MSA4 0x4
+#define CRYPT_S390_MSA5 0x8
/* s390 cryptographic operations */
enum crypt_s390_operations {
- CRYPT_S390_KM = 0x0100,
- CRYPT_S390_KMC = 0x0200,
- CRYPT_S390_KIMD = 0x0300,
- CRYPT_S390_KLMD = 0x0400,
- CRYPT_S390_KMAC = 0x0500,
- CRYPT_S390_KMCTR = 0x0600
+ CRYPT_S390_KM = 0x0100,
+ CRYPT_S390_KMC = 0x0200,
+ CRYPT_S390_KIMD = 0x0300,
+ CRYPT_S390_KLMD = 0x0400,
+ CRYPT_S390_KMAC = 0x0500,
+ CRYPT_S390_KMCTR = 0x0600,
+ CRYPT_S390_PPNO = 0x0700
};
/*
@@ -138,6 +141,16 @@ enum crypt_s390_kmac_func {
KMAC_TDEA_192 = CRYPT_S390_KMAC | 3
};
+/*
+ * function codes for PPNO (PERFORM PSEUDORANDOM NUMBER
+ * OPERATION) instruction
+ */
+enum crypt_s390_ppno_func {
+ PPNO_QUERY = CRYPT_S390_PPNO | 0,
+ PPNO_SHA512_DRNG_GEN = CRYPT_S390_PPNO | 3,
+ PPNO_SHA512_DRNG_SEED = CRYPT_S390_PPNO | 0x83
+};
+
/**
* crypt_s390_km:
* @func: the function code passed to KM; see crypt_s390_km_func
@@ -162,11 +175,11 @@ static inline int crypt_s390_km(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb92e0000,%3,%1 \n" /* KM opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb92e0000,%3,%1\n" /* KM opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -198,11 +211,11 @@ static inline int crypt_s390_kmc(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb92f0000,%3,%1 \n" /* KMC opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb92f0000,%3,%1\n" /* KMC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -233,11 +246,11 @@ static inline int crypt_s390_kimd(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb93e0000,%1,%1 \n" /* KIMD opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb93e0000,%1,%1\n" /* KIMD opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -267,11 +280,11 @@ static inline int crypt_s390_klmd(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb93f0000,%1,%1 \n" /* KLMD opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb93f0000,%1,%1\n" /* KLMD opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -302,11 +315,11 @@ static inline int crypt_s390_kmac(long func, void *param,
int ret;
asm volatile(
- "0: .insn rre,0xb91e0000,%1,%1 \n" /* KLAC opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb91e0000,%1,%1\n" /* KLAC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "=d" (ret), "+a" (__src), "+d" (__src_len)
: "d" (__func), "a" (__param), "0" (-1) : "cc", "memory");
if (ret < 0)
@@ -340,11 +353,11 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
int ret = -1;
asm volatile(
- "0: .insn rrf,0xb92d0000,%3,%1,%4,0 \n" /* KMCTR opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rrf,0xb92d0000,%3,%1,%4,0\n" /* KMCTR opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret), "+a" (__src), "+d" (__src_len), "+a" (__dest),
"+a" (__ctr)
: "d" (__func), "a" (__param) : "cc", "memory");
@@ -354,6 +367,47 @@ static inline int crypt_s390_kmctr(long func, void *param, u8 *dest,
}
/**
+ * crypt_s390_ppno:
+ * @func: the function code passed to PPNO; see crypt_s390_ppno_func
+ * @param: address of parameter block; see POP for details on each func
+ * @dest: address of destination memory area
+ * @dest_len: size of destination memory area in bytes
+ * @seed: address of seed data
+ * @seed_len: size of seed data in bytes
+ *
+ * Executes the PPNO (PERFORM PSEUDORANDOM NUMBER OPERATION)
+ * operation of the CPU.
+ *
+ * Returns -1 for failure, 0 for the query func, number of random
+ * bytes stored in dest buffer for generate function
+ */
+static inline int crypt_s390_ppno(long func, void *param,
+ u8 *dest, long dest_len,
+ const u8 *seed, long seed_len)
+{
+ register long __func asm("0") = func & CRYPT_S390_FUNC_MASK;
+ register void *__param asm("1") = param; /* param block (240 bytes) */
+ register u8 *__dest asm("2") = dest; /* buf for recv random bytes */
+ register long __dest_len asm("3") = dest_len; /* requested random bytes */
+ register const u8 *__seed asm("4") = seed; /* buf with seed data */
+ register long __seed_len asm("5") = seed_len; /* bytes in seed buf */
+ int ret = -1;
+
+ asm volatile (
+ "0: .insn rre,0xb93c0000,%1,%5\n" /* PPNO opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
+ " la %0,0\n"
+ "2:\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : "+d" (ret), "+a"(__dest), "+d"(__dest_len)
+ : "d"(__func), "a"(__param), "a"(__seed), "d"(__seed_len)
+ : "cc", "memory");
+ if (ret < 0)
+ return ret;
+ return (func & CRYPT_S390_FUNC_MASK) ? dest_len - __dest_len : 0;
+}
+
+/**
* crypt_s390_func_available:
* @func: the function code of the specific function; 0 if op in general
*
@@ -373,6 +427,9 @@ static inline int crypt_s390_func_available(int func,
return 0;
if (facility_mask & CRYPT_S390_MSA4 && !test_facility(77))
return 0;
+ if (facility_mask & CRYPT_S390_MSA5 && !test_facility(57))
+ return 0;
+
switch (func & CRYPT_S390_OP_MASK) {
case CRYPT_S390_KM:
ret = crypt_s390_km(KM_QUERY, &status, NULL, NULL, 0);
@@ -390,8 +447,12 @@ static inline int crypt_s390_func_available(int func,
ret = crypt_s390_kmac(KMAC_QUERY, &status, NULL, 0);
break;
case CRYPT_S390_KMCTR:
- ret = crypt_s390_kmctr(KMCTR_QUERY, &status, NULL, NULL, 0,
- NULL);
+ ret = crypt_s390_kmctr(KMCTR_QUERY, &status,
+ NULL, NULL, 0, NULL);
+ break;
+ case CRYPT_S390_PPNO:
+ ret = crypt_s390_ppno(PPNO_QUERY, &status,
+ NULL, 0, NULL, 0);
break;
default:
return 0;
@@ -419,15 +480,14 @@ static inline int crypt_s390_pcc(long func, void *param)
int ret = -1;
asm volatile(
- "0: .insn rre,0xb92c0000,0,0 \n" /* PCC opcode */
- "1: brc 1,0b \n" /* handle partial completion */
+ "0: .insn rre,0xb92c0000,0,0\n" /* PCC opcode */
+ "1: brc 1,0b\n" /* handle partial completion */
" la %0,0\n"
"2:\n"
- EX_TABLE(0b,2b) EX_TABLE(1b,2b)
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
: "+d" (ret)
: "d" (__func), "a" (__param) : "cc", "memory");
return ret;
}
-
#endif /* _CRYPTO_ARCH_S390_CRYPT_S390_H */
diff --git a/arch/s390/crypto/prng.c b/arch/s390/crypto/prng.c
index 94a35a4..1f374b3 100644
--- a/arch/s390/crypto/prng.c
+++ b/arch/s390/crypto/prng.c
@@ -1,106 +1,529 @@
/*
- * Copyright IBM Corp. 2006, 2007
+ * Copyright IBM Corp. 2006, 2015
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
+ * Harald Freudenberger <freude@de.ibm.com>
* Driver for the s390 pseudo random number generator
*/
+
+#define KMSG_COMPONENT "prng"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/fs.h>
+#include <linux/fips.h>
#include <linux/init.h>
#include <linux/kernel.h>
+#include <linux/device.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
+#include <linux/mutex.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <asm/debug.h>
#include <asm/uaccess.h>
+#include <asm/timex.h>
#include "crypt_s390.h"
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Jan Glauber <jan.glauber@de.ibm.com>");
+MODULE_AUTHOR("IBM Corporation");
MODULE_DESCRIPTION("s390 PRNG interface");
-static int prng_chunk_size = 256;
-module_param(prng_chunk_size, int, S_IRUSR | S_IRGRP | S_IROTH);
+
+#define PRNG_MODE_AUTO 0
+#define PRNG_MODE_TDES 1
+#define PRNG_MODE_SHA512 2
+
+static unsigned int prng_mode = PRNG_MODE_AUTO;
+module_param_named(mode, prng_mode, int, 0);
+MODULE_PARM_DESC(prng_mode, "PRNG mode: 0 - auto, 1 - TDES, 2 - SHA512");
+
+
+#define PRNG_CHUNKSIZE_TDES_MIN 8
+#define PRNG_CHUNKSIZE_TDES_MAX (64*1024)
+#define PRNG_CHUNKSIZE_SHA512_MIN 64
+#define PRNG_CHUNKSIZE_SHA512_MAX (64*1024)
+
+static unsigned int prng_chunk_size = 256;
+module_param_named(chunksize, prng_chunk_size, int, 0);
MODULE_PARM_DESC(prng_chunk_size, "PRNG read chunk size in bytes");
-static int prng_entropy_limit = 4096;
-module_param(prng_entropy_limit, int, S_IRUSR | S_IRGRP | S_IROTH | S_IWUSR);
-MODULE_PARM_DESC(prng_entropy_limit,
- "PRNG add entropy after that much bytes were produced");
+
+#define PRNG_RESEED_LIMIT_TDES 4096
+#define PRNG_RESEED_LIMIT_TDES_LOWER 4096
+#define PRNG_RESEED_LIMIT_SHA512 100000
+#define PRNG_RESEED_LIMIT_SHA512_LOWER 10000
+
+static unsigned int prng_reseed_limit;
+module_param_named(reseed_limit, prng_reseed_limit, int, 0);
+MODULE_PARM_DESC(prng_reseed_limit, "PRNG reseed limit");
+
/*
* Any one who considers arithmetical methods of producing random digits is,
* of course, in a state of sin. -- John von Neumann
*/
-struct s390_prng_data {
- unsigned long count; /* how many bytes were produced */
- char *buf;
+static int prng_errorflag;
+
+#define PRNG_GEN_ENTROPY_FAILED 1
+#define PRNG_SELFTEST_FAILED 2
+#define PRNG_INSTANTIATE_FAILED 3
+#define PRNG_SEED_FAILED 4
+#define PRNG_RESEED_FAILED 5
+#define PRNG_GEN_FAILED 6
+
+struct prng_ws_s {
+ u8 parm_block[32];
+ u32 reseed_counter;
+ u64 byte_counter;
};
-static struct s390_prng_data *p;
+struct ppno_ws_s {
+ u32 res;
+ u32 reseed_counter;
+ u64 stream_bytes;
+ u8 V[112];
+ u8 C[112];
+};
-/* copied from libica, use a non-zero initial parameter block */
-static unsigned char parm_block[32] = {
-0x0F,0x2B,0x8E,0x63,0x8C,0x8E,0xD2,0x52,0x64,0xB7,0xA0,0x7B,0x75,0x28,0xB8,0xF4,
-0x75,0x5F,0xD2,0xA6,0x8D,0x97,0x11,0xFF,0x49,0xD8,0x23,0xF3,0x7E,0x21,0xEC,0xA0,
+struct prng_data_s {
+ struct mutex mutex;
+ union {
+ struct prng_ws_s prngws;
+ struct ppno_ws_s ppnows;
+ };
+ u8 *buf;
+ u32 rest;
+ u8 *prev;
};
-static int prng_open(struct inode *inode, struct file *file)
+static struct prng_data_s *prng_data;
+
+/* initial parameter block for tdes mode, copied from libica */
+static const u8 initial_parm_block[32] __initconst = {
+ 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
+ 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
+ 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
+ 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0 };
+
+
+/*** helper functions ***/
+
+static int generate_entropy(u8 *ebuf, size_t nbytes)
{
- return nonseekable_open(inode, file);
+ int n, ret = 0;
+ u8 *pg, *h, hash[32];
+
+ pg = (u8 *) __get_free_page(GFP_KERNEL);
+ if (!pg) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ return -ENOMEM;
+ }
+
+ while (nbytes) {
+ /* fill page with urandom bytes */
+ get_random_bytes(pg, PAGE_SIZE);
+ /* exor page with stckf values */
+ for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
+ u64 *p = ((u64 *)pg) + n;
+ *p ^= get_tod_clock_fast();
+ }
+ n = (nbytes < sizeof(hash)) ? nbytes : sizeof(hash);
+ if (n < sizeof(hash))
+ h = hash;
+ else
+ h = ebuf;
+ /* generate sha256 from this page */
+ if (crypt_s390_kimd(KIMD_SHA_256, h,
+ pg, PAGE_SIZE) != PAGE_SIZE) {
+ prng_errorflag = PRNG_GEN_ENTROPY_FAILED;
+ ret = -EIO;
+ goto out;
+ }
+ if (n < sizeof(hash))
+ memcpy(ebuf, hash, n);
+ ret += n;
+ ebuf += n;
+ nbytes -= n;
+ }
+
+out:
+ free_page((unsigned long)pg);
+ return ret;
}
-static void prng_add_entropy(void)
+
+/*** tdes functions ***/
+
+static void prng_tdes_add_entropy(void)
{
__u64 entropy[4];
unsigned int i;
int ret;
for (i = 0; i < 16; i++) {
- ret = crypt_s390_kmc(KMC_PRNG, parm_block, (char *)entropy,
- (char *)entropy, sizeof(entropy));
+ ret = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+ (char *)entropy, (char *)entropy,
+ sizeof(entropy));
BUG_ON(ret < 0 || ret != sizeof(entropy));
- memcpy(parm_block, entropy, sizeof(entropy));
+ memcpy(prng_data->prngws.parm_block, entropy, sizeof(entropy));
}
}
-static void prng_seed(int nbytes)
+
+static void prng_tdes_seed(int nbytes)
{
char buf[16];
int i = 0;
- BUG_ON(nbytes > 16);
+ BUG_ON(nbytes > sizeof(buf));
+
get_random_bytes(buf, nbytes);
/* Add the entropy */
while (nbytes >= 8) {
- *((__u64 *)parm_block) ^= *((__u64 *)(buf+i));
- prng_add_entropy();
+ *((__u64 *)prng_data->prngws.parm_block) ^= *((__u64 *)(buf+i));
+ prng_tdes_add_entropy();
i += 8;
nbytes -= 8;
}
- prng_add_entropy();
+ prng_tdes_add_entropy();
+ prng_data->prngws.reseed_counter = 0;
+}
+
+
+static int __init prng_tdes_instantiate(void)
+{
+ int datalen;
+
+ pr_debug("prng runs in TDES mode with "
+ "chunksize=%d and reseed_limit=%u\n",
+ prng_chunk_size, prng_reseed_limit);
+
+ /* memory allocation, prng_data struct init, mutex init */
+ datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+ prng_data = kzalloc(datalen, GFP_KERNEL);
+ if (!prng_data) {
+ prng_errorflag = PRNG_INSTANTIATE_FAILED;
+ return -ENOMEM;
+ }
+ mutex_init(&prng_data->mutex);
+ prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+ memcpy(prng_data->prngws.parm_block, initial_parm_block, 32);
+
+ /* initialize the PRNG, add 128 bits of entropy */
+ prng_tdes_seed(16);
+
+ return 0;
}
-static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
- loff_t *ppos)
+
+static void prng_tdes_deinstantiate(void)
+{
+ pr_debug("The prng module stopped "
+ "after running in triple DES mode\n");
+ kzfree(prng_data);
+}
+
+
+/*** sha512 functions ***/
+
+static int __init prng_sha512_selftest(void)
{
- int chunk, n;
+ /* NIST DRBG testvector for Hash Drbg, Sha-512, Count #0 */
+ static const u8 seed[] __initconst = {
+ 0x6b, 0x50, 0xa7, 0xd8, 0xf8, 0xa5, 0x5d, 0x7a,
+ 0x3d, 0xf8, 0xbb, 0x40, 0xbc, 0xc3, 0xb7, 0x22,
+ 0xd8, 0x70, 0x8d, 0xe6, 0x7f, 0xda, 0x01, 0x0b,
+ 0x03, 0xc4, 0xc8, 0x4d, 0x72, 0x09, 0x6f, 0x8c,
+ 0x3e, 0xc6, 0x49, 0xcc, 0x62, 0x56, 0xd9, 0xfa,
+ 0x31, 0xdb, 0x7a, 0x29, 0x04, 0xaa, 0xf0, 0x25 };
+ static const u8 V0[] __initconst = {
+ 0x00, 0xad, 0xe3, 0x6f, 0x9a, 0x01, 0xc7, 0x76,
+ 0x61, 0x34, 0x35, 0xf5, 0x4e, 0x24, 0x74, 0x22,
+ 0x21, 0x9a, 0x29, 0x89, 0xc7, 0x93, 0x2e, 0x60,
+ 0x1e, 0xe8, 0x14, 0x24, 0x8d, 0xd5, 0x03, 0xf1,
+ 0x65, 0x5d, 0x08, 0x22, 0x72, 0xd5, 0xad, 0x95,
+ 0xe1, 0x23, 0x1e, 0x8a, 0xa7, 0x13, 0xd9, 0x2b,
+ 0x5e, 0xbc, 0xbb, 0x80, 0xab, 0x8d, 0xe5, 0x79,
+ 0xab, 0x5b, 0x47, 0x4e, 0xdd, 0xee, 0x6b, 0x03,
+ 0x8f, 0x0f, 0x5c, 0x5e, 0xa9, 0x1a, 0x83, 0xdd,
+ 0xd3, 0x88, 0xb2, 0x75, 0x4b, 0xce, 0x83, 0x36,
+ 0x57, 0x4b, 0xf1, 0x5c, 0xca, 0x7e, 0x09, 0xc0,
+ 0xd3, 0x89, 0xc6, 0xe0, 0xda, 0xc4, 0x81, 0x7e,
+ 0x5b, 0xf9, 0xe1, 0x01, 0xc1, 0x92, 0x05, 0xea,
+ 0xf5, 0x2f, 0xc6, 0xc6, 0xc7, 0x8f, 0xbc, 0xf4 };
+ static const u8 C0[] __initconst = {
+ 0x00, 0xf4, 0xa3, 0xe5, 0xa0, 0x72, 0x63, 0x95,
+ 0xc6, 0x4f, 0x48, 0xd0, 0x8b, 0x5b, 0x5f, 0x8e,
+ 0x6b, 0x96, 0x1f, 0x16, 0xed, 0xbc, 0x66, 0x94,
+ 0x45, 0x31, 0xd7, 0x47, 0x73, 0x22, 0xa5, 0x86,
+ 0xce, 0xc0, 0x4c, 0xac, 0x63, 0xb8, 0x39, 0x50,
+ 0xbf, 0xe6, 0x59, 0x6c, 0x38, 0x58, 0x99, 0x1f,
+ 0x27, 0xa7, 0x9d, 0x71, 0x2a, 0xb3, 0x7b, 0xf9,
+ 0xfb, 0x17, 0x86, 0xaa, 0x99, 0x81, 0xaa, 0x43,
+ 0xe4, 0x37, 0xd3, 0x1e, 0x6e, 0xe5, 0xe6, 0xee,
+ 0xc2, 0xed, 0x95, 0x4f, 0x53, 0x0e, 0x46, 0x8a,
+ 0xcc, 0x45, 0xa5, 0xdb, 0x69, 0x0d, 0x81, 0xc9,
+ 0x32, 0x92, 0xbc, 0x8f, 0x33, 0xe6, 0xf6, 0x09,
+ 0x7c, 0x8e, 0x05, 0x19, 0x0d, 0xf1, 0xb6, 0xcc,
+ 0xf3, 0x02, 0x21, 0x90, 0x25, 0xec, 0xed, 0x0e };
+ static const u8 random[] __initconst = {
+ 0x95, 0xb7, 0xf1, 0x7e, 0x98, 0x02, 0xd3, 0x57,
+ 0x73, 0x92, 0xc6, 0xa9, 0xc0, 0x80, 0x83, 0xb6,
+ 0x7d, 0xd1, 0x29, 0x22, 0x65, 0xb5, 0xf4, 0x2d,
+ 0x23, 0x7f, 0x1c, 0x55, 0xbb, 0x9b, 0x10, 0xbf,
+ 0xcf, 0xd8, 0x2c, 0x77, 0xa3, 0x78, 0xb8, 0x26,
+ 0x6a, 0x00, 0x99, 0x14, 0x3b, 0x3c, 0x2d, 0x64,
+ 0x61, 0x1e, 0xee, 0xb6, 0x9a, 0xcd, 0xc0, 0x55,
+ 0x95, 0x7c, 0x13, 0x9e, 0x8b, 0x19, 0x0c, 0x7a,
+ 0x06, 0x95, 0x5f, 0x2c, 0x79, 0x7c, 0x27, 0x78,
+ 0xde, 0x94, 0x03, 0x96, 0xa5, 0x01, 0xf4, 0x0e,
+ 0x91, 0x39, 0x6a, 0xcf, 0x8d, 0x7e, 0x45, 0xeb,
+ 0xdb, 0xb5, 0x3b, 0xbf, 0x8c, 0x97, 0x52, 0x30,
+ 0xd2, 0xf0, 0xff, 0x91, 0x06, 0xc7, 0x61, 0x19,
+ 0xae, 0x49, 0x8e, 0x7f, 0xbc, 0x03, 0xd9, 0x0f,
+ 0x8e, 0x4c, 0x51, 0x62, 0x7a, 0xed, 0x5c, 0x8d,
+ 0x42, 0x63, 0xd5, 0xd2, 0xb9, 0x78, 0x87, 0x3a,
+ 0x0d, 0xe5, 0x96, 0xee, 0x6d, 0xc7, 0xf7, 0xc2,
+ 0x9e, 0x37, 0xee, 0xe8, 0xb3, 0x4c, 0x90, 0xdd,
+ 0x1c, 0xf6, 0xa9, 0xdd, 0xb2, 0x2b, 0x4c, 0xbd,
+ 0x08, 0x6b, 0x14, 0xb3, 0x5d, 0xe9, 0x3d, 0xa2,
+ 0xd5, 0xcb, 0x18, 0x06, 0x69, 0x8c, 0xbd, 0x7b,
+ 0xbb, 0x67, 0xbf, 0xe3, 0xd3, 0x1f, 0xd2, 0xd1,
+ 0xdb, 0xd2, 0xa1, 0xe0, 0x58, 0xa3, 0xeb, 0x99,
+ 0xd7, 0xe5, 0x1f, 0x1a, 0x93, 0x8e, 0xed, 0x5e,
+ 0x1c, 0x1d, 0xe2, 0x3a, 0x6b, 0x43, 0x45, 0xd3,
+ 0x19, 0x14, 0x09, 0xf9, 0x2f, 0x39, 0xb3, 0x67,
+ 0x0d, 0x8d, 0xbf, 0xb6, 0x35, 0xd8, 0xe6, 0xa3,
+ 0x69, 0x32, 0xd8, 0x10, 0x33, 0xd1, 0x44, 0x8d,
+ 0x63, 0xb4, 0x03, 0xdd, 0xf8, 0x8e, 0x12, 0x1b,
+ 0x6e, 0x81, 0x9a, 0xc3, 0x81, 0x22, 0x6c, 0x13,
+ 0x21, 0xe4, 0xb0, 0x86, 0x44, 0xf6, 0x72, 0x7c,
+ 0x36, 0x8c, 0x5a, 0x9f, 0x7a, 0x4b, 0x3e, 0xe2 };
+
int ret = 0;
- int tmp;
+ u8 buf[sizeof(random)];
+ struct ppno_ws_s ws;
+
+ memset(&ws, 0, sizeof(ws));
+
+ /* initial seed */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &ws, NULL, 0,
+ seed, sizeof(seed));
+ if (ret < 0) {
+ pr_err("The prng self test seed operation for the "
+ "SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* check working states V and C */
+ if (memcmp(ws.V, V0, sizeof(V0)) != 0
+ || memcmp(ws.C, C0, sizeof(C0)) != 0) {
+ pr_err("The prng self test state test "
+ "for the SHA-512 mode failed\n");
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* generate random bytes */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf),
+ NULL, 0);
+ if (ret < 0) {
+ pr_err("The prng self test generate operation for "
+ "the SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &ws, buf, sizeof(buf),
+ NULL, 0);
+ if (ret < 0) {
+ pr_err("The prng self test generate operation for "
+ "the SHA-512 mode failed with rc=%d\n", ret);
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ /* check against expected data */
+ if (memcmp(buf, random, sizeof(random)) != 0) {
+ pr_err("The prng self test data test "
+ "for the SHA-512 mode failed\n");
+ prng_errorflag = PRNG_SELFTEST_FAILED;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int __init prng_sha512_instantiate(void)
+{
+ int ret, datalen;
+ u8 seed[64];
+
+ pr_debug("prng runs in SHA-512 mode "
+ "with chunksize=%d and reseed_limit=%u\n",
+ prng_chunk_size, prng_reseed_limit);
+
+ /* memory allocation, prng_data struct init, mutex init */
+ datalen = sizeof(struct prng_data_s) + prng_chunk_size;
+ if (fips_enabled)
+ datalen += prng_chunk_size;
+ prng_data = kzalloc(datalen, GFP_KERNEL);
+ if (!prng_data) {
+ prng_errorflag = PRNG_INSTANTIATE_FAILED;
+ return -ENOMEM;
+ }
+ mutex_init(&prng_data->mutex);
+ prng_data->buf = ((u8 *)prng_data) + sizeof(struct prng_data_s);
+
+ /* selftest */
+ ret = prng_sha512_selftest();
+ if (ret)
+ goto outfree;
+
+ /* generate initial seed bytestring, first 48 bytes of entropy */
+ ret = generate_entropy(seed, 48);
+ if (ret != 48)
+ goto outfree;
+ /* followed by 16 bytes of unique nonce */
+ get_tod_clock_ext(seed + 48);
+
+ /* initial seed of the ppno drng */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0,
+ seed, sizeof(seed));
+ if (ret < 0) {
+ prng_errorflag = PRNG_SEED_FAILED;
+ ret = -EIO;
+ goto outfree;
+ }
+
+ /* if fips mode is enabled, generate a first block of random
+ bytes for the FIPS 140-2 Conditional Self Test */
+ if (fips_enabled) {
+ prng_data->prev = prng_data->buf + prng_chunk_size;
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows,
+ prng_data->prev,
+ prng_chunk_size,
+ NULL, 0);
+ if (ret < 0 || ret != prng_chunk_size) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ ret = -EIO;
+ goto outfree;
+ }
+ }
+
+ return 0;
+
+outfree:
+ kfree(prng_data);
+ return ret;
+}
+
+
+static void prng_sha512_deinstantiate(void)
+{
+ pr_debug("The prng module stopped after running in SHA-512 mode\n");
+ kzfree(prng_data);
+}
+
+
+static int prng_sha512_reseed(void)
+{
+ int ret;
+ u8 seed[32];
+
+ /* generate 32 bytes of fresh entropy */
+ ret = generate_entropy(seed, sizeof(seed));
+ if (ret != sizeof(seed))
+ return ret;
+
+ /* do a reseed of the ppno drng with this bytestring */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_SEED,
+ &prng_data->ppnows, NULL, 0,
+ seed, sizeof(seed));
+ if (ret) {
+ prng_errorflag = PRNG_RESEED_FAILED;
+ return -EIO;
+ }
+
+ return 0;
+}
+
+
+static int prng_sha512_generate(u8 *buf, size_t nbytes)
+{
+ int ret;
+
+ /* reseed needed ? */
+ if (prng_data->ppnows.reseed_counter > prng_reseed_limit) {
+ ret = prng_sha512_reseed();
+ if (ret)
+ return ret;
+ }
+
+ /* PPNO generate */
+ ret = crypt_s390_ppno(PPNO_SHA512_DRNG_GEN,
+ &prng_data->ppnows, buf, nbytes,
+ NULL, 0);
+ if (ret < 0 || ret != nbytes) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ return -EIO;
+ }
+
+ /* FIPS 140-2 Conditional Self Test */
+ if (fips_enabled) {
+ if (!memcmp(prng_data->prev, buf, nbytes)) {
+ prng_errorflag = PRNG_GEN_FAILED;
+ return -EILSEQ;
+ }
+ memcpy(prng_data->prev, buf, nbytes);
+ }
+
+ return ret;
+}
+
+
+/*** file io functions ***/
+
+static int prng_open(struct inode *inode, struct file *file)
+{
+ return nonseekable_open(inode, file);
+}
+
+
+static ssize_t prng_tdes_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ int chunk, n, tmp, ret = 0;
+
+ /* lock prng_data struct */
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
- /* nbytes can be arbitrary length, we split it into chunks */
while (nbytes) {
- /* same as in extract_entropy_user in random.c */
if (need_resched()) {
if (signal_pending(current)) {
if (ret == 0)
ret = -ERESTARTSYS;
break;
}
+ /* give mutex free before calling schedule() */
+ mutex_unlock(&prng_data->mutex);
schedule();
+ /* occopy mutex again */
+ if (mutex_lock_interruptible(&prng_data->mutex)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
}
/*
@@ -112,12 +535,11 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
/* PRNG only likes multiples of 8 bytes */
n = (chunk + 7) & -8;
- if (p->count > prng_entropy_limit)
- prng_seed(8);
+ if (prng_data->prngws.reseed_counter > prng_reseed_limit)
+ prng_tdes_seed(8);
/* if the CPU supports PRNG stckf is present too */
- asm volatile(".insn s,0xb27c0000,%0"
- : "=m" (*((unsigned long long *)p->buf)) : : "cc");
+ *((unsigned long long *)prng_data->buf) = get_tod_clock_fast();
/*
* Beside the STCKF the input for the TDES-EDE is the output
@@ -132,35 +554,259 @@ static ssize_t prng_read(struct file *file, char __user *ubuf, size_t nbytes,
* Note: you can still get strict X9.17 conformity by setting
* prng_chunk_size to 8 bytes.
*/
- tmp = crypt_s390_kmc(KMC_PRNG, parm_block, p->buf, p->buf, n);
- BUG_ON((tmp < 0) || (tmp != n));
+ tmp = crypt_s390_kmc(KMC_PRNG, prng_data->prngws.parm_block,
+ prng_data->buf, prng_data->buf, n);
+ if (tmp < 0 || tmp != n) {
+ ret = -EIO;
+ break;
+ }
- p->count += n;
+ prng_data->prngws.byte_counter += n;
+ prng_data->prngws.reseed_counter += n;
- if (copy_to_user(ubuf, p->buf, chunk))
+ if (copy_to_user(ubuf, prng_data->buf, chunk))
return -EFAULT;
nbytes -= chunk;
ret += chunk;
ubuf += chunk;
}
+
+ /* unlock prng_data struct */
+ mutex_unlock(&prng_data->mutex);
+
return ret;
}
-static const struct file_operations prng_fops = {
+
+static ssize_t prng_sha512_read(struct file *file, char __user *ubuf,
+ size_t nbytes, loff_t *ppos)
+{
+ int n, ret = 0;
+ u8 *p;
+
+ /* if errorflag is set do nothing and return 'broken pipe' */
+ if (prng_errorflag)
+ return -EPIPE;
+
+ /* lock prng_data struct */
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+
+ while (nbytes) {
+ if (need_resched()) {
+ if (signal_pending(current)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ break;
+ }
+ /* give mutex free before calling schedule() */
+ mutex_unlock(&prng_data->mutex);
+ schedule();
+ /* occopy mutex again */
+ if (mutex_lock_interruptible(&prng_data->mutex)) {
+ if (ret == 0)
+ ret = -ERESTARTSYS;
+ return ret;
+ }
+ }
+ if (prng_data->rest) {
+ /* push left over random bytes from the previous read */
+ p = prng_data->buf + prng_chunk_size - prng_data->rest;
+ n = (nbytes < prng_data->rest) ?
+ nbytes : prng_data->rest;
+ prng_data->rest -= n;
+ } else {
+ /* generate one chunk of random bytes into read buf */
+ p = prng_data->buf;
+ n = prng_sha512_generate(p, prng_chunk_size);
+ if (n < 0) {
+ ret = n;
+ break;
+ }
+ if (nbytes < prng_chunk_size) {
+ n = nbytes;
+ prng_data->rest = prng_chunk_size - n;
+ } else {
+ n = prng_chunk_size;
+ prng_data->rest = 0;
+ }
+ }
+ if (copy_to_user(ubuf, p, n)) {
+ ret = -EFAULT;
+ break;
+ }
+ ubuf += n;
+ nbytes -= n;
+ ret += n;
+ }
+
+ /* unlock prng_data struct */
+ mutex_unlock(&prng_data->mutex);
+
+ return ret;
+}
+
+
+/*** sysfs stuff ***/
+
+static const struct file_operations prng_sha512_fops = {
+ .owner = THIS_MODULE,
+ .open = &prng_open,
+ .release = NULL,
+ .read = &prng_sha512_read,
+ .llseek = noop_llseek,
+};
+static const struct file_operations prng_tdes_fops = {
.owner = THIS_MODULE,
.open = &prng_open,
.release = NULL,
- .read = &prng_read,
+ .read = &prng_tdes_read,
.llseek = noop_llseek,
};
-static struct miscdevice prng_dev = {
+static struct miscdevice prng_sha512_dev = {
+ .name = "prandom",
+ .minor = MISC_DYNAMIC_MINOR,
+ .fops = &prng_sha512_fops,
+};
+static struct miscdevice prng_tdes_dev = {
.name = "prandom",
.minor = MISC_DYNAMIC_MINOR,
- .fops = &prng_fops,
+ .fops = &prng_tdes_fops,
};
+
+/* chunksize attribute (ro) */
+static ssize_t prng_chunksize_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", prng_chunk_size);
+}
+static DEVICE_ATTR(chunksize, 0444, prng_chunksize_show, NULL);
+
+/* counter attribute (ro) */
+static ssize_t prng_counter_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ u64 counter;
+
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+ if (prng_mode == PRNG_MODE_SHA512)
+ counter = prng_data->ppnows.stream_bytes;
+ else
+ counter = prng_data->prngws.byte_counter;
+ mutex_unlock(&prng_data->mutex);
+
+ return snprintf(buf, PAGE_SIZE, "%llu\n", counter);
+}
+static DEVICE_ATTR(byte_counter, 0444, prng_counter_show, NULL);
+
+/* errorflag attribute (ro) */
+static ssize_t prng_errorflag_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%d\n", prng_errorflag);
+}
+static DEVICE_ATTR(errorflag, 0444, prng_errorflag_show, NULL);
+
+/* mode attribute (ro) */
+static ssize_t prng_mode_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ if (prng_mode == PRNG_MODE_TDES)
+ return snprintf(buf, PAGE_SIZE, "TDES\n");
+ else
+ return snprintf(buf, PAGE_SIZE, "SHA512\n");
+}
+static DEVICE_ATTR(mode, 0444, prng_mode_show, NULL);
+
+/* reseed attribute (w) */
+static ssize_t prng_reseed_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ if (mutex_lock_interruptible(&prng_data->mutex))
+ return -ERESTARTSYS;
+ prng_sha512_reseed();
+ mutex_unlock(&prng_data->mutex);
+
+ return count;
+}
+static DEVICE_ATTR(reseed, 0200, NULL, prng_reseed_store);
+
+/* reseed limit attribute (rw) */
+static ssize_t prng_reseed_limit_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "%u\n", prng_reseed_limit);
+}
+static ssize_t prng_reseed_limit_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ unsigned limit;
+
+ if (sscanf(buf, "%u\n", &limit) != 1)
+ return -EINVAL;
+
+ if (prng_mode == PRNG_MODE_SHA512) {
+ if (limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+ return -EINVAL;
+ } else {
+ if (limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+ return -EINVAL;
+ }
+
+ prng_reseed_limit = limit;
+
+ return count;
+}
+static DEVICE_ATTR(reseed_limit, 0644,
+ prng_reseed_limit_show, prng_reseed_limit_store);
+
+/* strength attribute (ro) */
+static ssize_t prng_strength_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ return snprintf(buf, PAGE_SIZE, "256\n");
+}
+static DEVICE_ATTR(strength, 0444, prng_strength_show, NULL);
+
+static struct attribute *prng_sha512_dev_attrs[] = {
+ &dev_attr_errorflag.attr,
+ &dev_attr_chunksize.attr,
+ &dev_attr_byte_counter.attr,
+ &dev_attr_mode.attr,
+ &dev_attr_reseed.attr,
+ &dev_attr_reseed_limit.attr,
+ &dev_attr_strength.attr,
+ NULL
+};
+static struct attribute *prng_tdes_dev_attrs[] = {
+ &dev_attr_chunksize.attr,
+ &dev_attr_byte_counter.attr,
+ &dev_attr_mode.attr,
+ NULL
+};
+
+static struct attribute_group prng_sha512_dev_attr_group = {
+ .attrs = prng_sha512_dev_attrs
+};
+static struct attribute_group prng_tdes_dev_attr_group = {
+ .attrs = prng_tdes_dev_attrs
+};
+
+
+/*** module init and exit ***/
+
static int __init prng_init(void)
{
int ret;
@@ -169,43 +815,105 @@ static int __init prng_init(void)
if (!crypt_s390_func_available(KMC_PRNG, CRYPT_S390_MSA))
return -EOPNOTSUPP;
- if (prng_chunk_size < 8)
- return -EINVAL;
+ /* choose prng mode */
+ if (prng_mode != PRNG_MODE_TDES) {
+ /* check for MSA5 support for PPNO operations */
+ if (!crypt_s390_func_available(PPNO_SHA512_DRNG_GEN,
+ CRYPT_S390_MSA5)) {
+ if (prng_mode == PRNG_MODE_SHA512) {
+ pr_err("The prng module cannot "
+ "start in SHA-512 mode\n");
+ return -EOPNOTSUPP;
+ }
+ prng_mode = PRNG_MODE_TDES;
+ } else
+ prng_mode = PRNG_MODE_SHA512;
+ }
- p = kmalloc(sizeof(struct s390_prng_data), GFP_KERNEL);
- if (!p)
- return -ENOMEM;
- p->count = 0;
+ if (prng_mode == PRNG_MODE_SHA512) {
- p->buf = kmalloc(prng_chunk_size, GFP_KERNEL);
- if (!p->buf) {
- ret = -ENOMEM;
- goto out_free;
- }
+ /* SHA512 mode */
- /* initialize the PRNG, add 128 bits of entropy */
- prng_seed(16);
+ if (prng_chunk_size < PRNG_CHUNKSIZE_SHA512_MIN
+ || prng_chunk_size > PRNG_CHUNKSIZE_SHA512_MAX)
+ return -EINVAL;
+ prng_chunk_size = (prng_chunk_size + 0x3f) & ~0x3f;
- ret = misc_register(&prng_dev);
- if (ret)
- goto out_buf;
- return 0;
+ if (prng_reseed_limit == 0)
+ prng_reseed_limit = PRNG_RESEED_LIMIT_SHA512;
+ else if (prng_reseed_limit < PRNG_RESEED_LIMIT_SHA512_LOWER)
+ return -EINVAL;
+
+ ret = prng_sha512_instantiate();
+ if (ret)
+ goto out;
+
+ ret = misc_register(&prng_sha512_dev);
+ if (ret) {
+ prng_sha512_deinstantiate();
+ goto out;
+ }
+ ret = sysfs_create_group(&prng_sha512_dev.this_device->kobj,
+ &prng_sha512_dev_attr_group);
+ if (ret) {
+ misc_deregister(&prng_sha512_dev);
+ prng_sha512_deinstantiate();
+ goto out;
+ }
-out_buf:
- kfree(p->buf);
-out_free:
- kfree(p);
+ } else {
+
+ /* TDES mode */
+
+ if (prng_chunk_size < PRNG_CHUNKSIZE_TDES_MIN
+ || prng_chunk_size > PRNG_CHUNKSIZE_TDES_MAX)
+ return -EINVAL;
+ prng_chunk_size = (prng_chunk_size + 0x07) & ~0x07;
+
+ if (prng_reseed_limit == 0)
+ prng_reseed_limit = PRNG_RESEED_LIMIT_TDES;
+ else if (prng_reseed_limit < PRNG_RESEED_LIMIT_TDES_LOWER)
+ return -EINVAL;
+
+ ret = prng_tdes_instantiate();
+ if (ret)
+ goto out;
+
+ ret = misc_register(&prng_tdes_dev);
+ if (ret) {
+ prng_tdes_deinstantiate();
+ goto out;
+ }
+ ret = sysfs_create_group(&prng_tdes_dev.this_device->kobj,
+ &prng_tdes_dev_attr_group);
+ if (ret) {
+ misc_deregister(&prng_tdes_dev);
+ prng_tdes_deinstantiate();
+ goto out;
+ }
+
+ }
+
+out:
return ret;
}
+
static void __exit prng_exit(void)
{
- /* wipe me */
- kzfree(p->buf);
- kfree(p);
-
- misc_deregister(&prng_dev);
+ if (prng_mode == PRNG_MODE_SHA512) {
+ sysfs_remove_group(&prng_sha512_dev.this_device->kobj,
+ &prng_sha512_dev_attr_group);
+ misc_deregister(&prng_sha512_dev);
+ prng_sha512_deinstantiate();
+ } else {
+ sysfs_remove_group(&prng_tdes_dev.this_device->kobj,
+ &prng_tdes_dev_attr_group);
+ misc_deregister(&prng_tdes_dev);
+ prng_tdes_deinstantiate();
+ }
}
+
module_init(prng_init);
module_exit(prng_exit);
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 694bcd6..2f924bc 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -26,6 +26,9 @@
/* Not more than 2GB */
#define KEXEC_CONTROL_MEMORY_LIMIT (1UL<<31)
+/* Allocate control page with GFP_DMA */
+#define KEXEC_CONTROL_MEMORY_GFP GFP_DMA
+
/* Maximum address we can use for the crash control pages */
#define KEXEC_CRASH_CONTROL_MEMORY_LIMIT (-1UL)
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h
index a5e6562..d29ad95 100644
--- a/arch/s390/include/asm/mmu.h
+++ b/arch/s390/include/asm/mmu.h
@@ -14,7 +14,9 @@ typedef struct {
unsigned long asce_bits;
unsigned long asce_limit;
unsigned long vdso_base;
- /* The mmu context has extended page tables. */
+ /* The mmu context allocates 4K page tables. */
+ unsigned int alloc_pgste:1;
+ /* The mmu context uses extended page tables. */
unsigned int has_pgste:1;
/* The mmu context uses storage keys. */
unsigned int use_skey:1;
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index d25d9ff..fb1b93e 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -20,8 +20,11 @@ static inline int init_new_context(struct task_struct *tsk,
mm->context.flush_mm = 0;
mm->context.asce_bits = _ASCE_TABLE_LENGTH | _ASCE_USER_BITS;
mm->context.asce_bits |= _ASCE_TYPE_REGION3;
+#ifdef CONFIG_PGSTE
+ mm->context.alloc_pgste = page_table_allocate_pgste;
mm->context.has_pgste = 0;
mm->context.use_skey = 0;
+#endif
mm->context.asce_limit = STACK_TOP_MAX;
crst_table_init((unsigned long *) mm->pgd, pgd_entry_type(mm));
return 0;
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 51e7fb6..7b7858f 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -21,6 +21,7 @@ void crst_table_free(struct mm_struct *, unsigned long *);
unsigned long *page_table_alloc(struct mm_struct *);
void page_table_free(struct mm_struct *, unsigned long *);
void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
+extern int page_table_allocate_pgste;
int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
unsigned long key, bool nq);
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 989cfae..fc64239 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -12,12 +12,9 @@
#define _ASM_S390_PGTABLE_H
/*
- * The Linux memory management assumes a three-level page table setup. For
- * s390 31 bit we "fold" the mid level into the top-level page table, so
- * that we physically have the same two-level page table as the s390 mmu
- * expects in 31 bit mode. For s390 64 bit we use three of the five levels
- * the hardware provides (region first and region second tables are not
- * used).
+ * The Linux memory management assumes a three-level page table setup.
+ * For s390 64 bit we use up to four of the five levels the hardware
+ * provides (region first tables are not used).
*
* The "pgd_xxx()" functions are trivial for a folded two-level
* setup: the pgd is never bad, and a pmd always exists (as it's folded
@@ -101,8 +98,8 @@ extern unsigned long zero_page_mask;
#ifndef __ASSEMBLY__
/*
- * The vmalloc and module area will always be on the topmost area of the kernel
- * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
+ * The vmalloc and module area will always be on the topmost area of the
+ * kernel mapping. We reserve 128GB (64bit) for vmalloc and modules.
* On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
* modules will reside. That makes sure that inter module branches always
* happen without trampolines and in addition the placement within a 2GB frame
@@ -131,38 +128,6 @@ static inline int is_module_addr(void *addr)
}
/*
- * A 31 bit pagetable entry of S390 has following format:
- * | PFRA | | OS |
- * 0 0IP0
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Page-Invalid Bit: Page is not available for address-translation
- * P Page-Protection Bit: Store access not possible for page
- *
- * A 31 bit segmenttable entry of S390 has following format:
- * | P-table origin | |PTL
- * 0 IC
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * I Segment-Invalid Bit: Segment is not available for address-translation
- * C Common-Segment Bit: Segment is not private (PoP 3-30)
- * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
- *
- * The 31 bit segmenttable origin of S390 has following format:
- *
- * |S-table origin | | STL |
- * X **GPS
- * 00000000001111111111222222222233
- * 01234567890123456789012345678901
- *
- * X Space-Switch event:
- * G Segment-Invalid Bit: *
- * P Private-Space Bit: Segment is not private (PoP 3-30)
- * S Storage-Alteration:
- * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
- *
* A 64 bit pagetable entry of S390 has following format:
* | PFRA |0IPC| OS |
* 0000000000111111111122222222223333333333444444444455555555556666
@@ -220,7 +185,6 @@ static inline int is_module_addr(void *addr)
/* Software bits in the page table entry */
#define _PAGE_PRESENT 0x001 /* SW pte present bit */
-#define _PAGE_TYPE 0x002 /* SW pte type bit */
#define _PAGE_YOUNG 0x004 /* SW pte young bit */
#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
#define _PAGE_READ 0x010 /* SW pte read bit */
@@ -240,31 +204,34 @@ static inline int is_module_addr(void *addr)
* table lock held.
*
* The following table gives the different possible bit combinations for
- * the pte hardware and software bits in the last 12 bits of a pte:
+ * the pte hardware and software bits in the last 12 bits of a pte
+ * (. unassigned bit, x don't care, t swap type):
*
* 842100000000
* 000084210000
* 000000008421
- * .IR...wrdytp
- * empty .10...000000
- * swap .10...xxxx10
- * file .11...xxxxx0
- * prot-none, clean, old .11...000001
- * prot-none, clean, young .11...000101
- * prot-none, dirty, old .10...001001
- * prot-none, dirty, young .10...001101
- * read-only, clean, old .11...010001
- * read-only, clean, young .01...010101
- * read-only, dirty, old .11...011001
- * read-only, dirty, young .01...011101
- * read-write, clean, old .11...110001
- * read-write, clean, young .01...110101
- * read-write, dirty, old .10...111001
- * read-write, dirty, young .00...111101
+ * .IR.uswrdy.p
+ * empty .10.00000000
+ * swap .11..ttttt.0
+ * prot-none, clean, old .11.xx0000.1
+ * prot-none, clean, young .11.xx0001.1
+ * prot-none, dirty, old .10.xx0010.1
+ * prot-none, dirty, young .10.xx0011.1
+ * read-only, clean, old .11.xx0100.1
+ * read-only, clean, young .01.xx0101.1
+ * read-only, dirty, old .11.xx0110.1
+ * read-only, dirty, young .01.xx0111.1
+ * read-write, clean, old .11.xx1100.1
+ * read-write, clean, young .01.xx1101.1
+ * read-write, dirty, old .10.xx1110.1
+ * read-write, dirty, young .00.xx1111.1
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
*
- * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
- * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
- * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
+ * pte_none is true for the bit pattern .10.00000000, pte == 0x400
+ * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
+ * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
*/
/* Bits in the segment/region table address-space-control-element */
@@ -335,6 +302,8 @@ static inline int is_module_addr(void *addr)
* read-write, dirty, young 11..0...0...11
* The segment table origin is used to distinguish empty (origin==0) from
* read-write, old segment table entries (origin!=0)
+ * HW-bits: R read-only, I invalid
+ * SW-bits: y young, d dirty, r read, w write
*/
#define _SEGMENT_ENTRY_SPLIT_BIT 11 /* THP splitting bit number */
@@ -423,6 +392,15 @@ static inline int mm_has_pgste(struct mm_struct *mm)
return 0;
}
+static inline int mm_alloc_pgste(struct mm_struct *mm)
+{
+#ifdef CONFIG_PGSTE
+ if (unlikely(mm->context.alloc_pgste))
+ return 1;
+#endif
+ return 0;
+}
+
/*
* In the case that a guest uses storage keys
* faults should no longer be backed by zero pages
@@ -582,10 +560,9 @@ static inline int pte_none(pte_t pte)
static inline int pte_swap(pte_t pte)
{
- /* Bit pattern: (pte & 0x603) == 0x402 */
- return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
- _PAGE_TYPE | _PAGE_PRESENT))
- == (_PAGE_INVALID | _PAGE_TYPE);
+ /* Bit pattern: (pte & 0x201) == 0x200 */
+ return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
+ == _PAGE_PROTECT;
}
static inline int pte_special(pte_t pte)
@@ -1586,51 +1563,51 @@ static inline int has_transparent_hugepage(void)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
- * 31 bit swap entry format:
- * A page-table entry has some bits we have to treat in a special way.
- * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
- * exception will occur instead of a page translation exception. The
- * specifiation exception has the bad habit not to store necessary
- * information in the lowcore.
- * Bits 21, 22, 30 and 31 are used to indicate the page type.
- * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
- * This leaves the bits 1-19 and bits 24-29 to store type and offset.
- * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
- * plus 24 for the offset.
- * 0| offset |0110|o|type |00|
- * 0 0000000001111111111 2222 2 22222 33
- * 0 1234567890123456789 0123 4 56789 01
- *
* 64 bit swap entry format:
* A page-table entry has some bits we have to treat in a special way.
* Bits 52 and bit 55 have to be zero, otherwise an specification
* exception will occur instead of a page translation exception. The
* specifiation exception has the bad habit not to store necessary
* information in the lowcore.
- * Bits 53, 54, 62 and 63 are used to indicate the page type.
- * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
- * This leaves the bits 0-51 and bits 56-61 to store type and offset.
- * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
- * plus 56 for the offset.
- * | offset |0110|o|type |00|
- * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
- * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
+ * Bits 54 and 63 are used to indicate the page type.
+ * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
+ * This leaves the bits 0-51 and bits 56-62 to store type and offset.
+ * We use the 5 bits from 57-61 for the type and the 52 bits from 0-51
+ * for the offset.
+ * | offset |01100|type |00|
+ * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
+ * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
*/
-#define __SWP_OFFSET_MASK (~0UL >> 11)
+#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
+#define __SWP_OFFSET_SHIFT 12
+#define __SWP_TYPE_MASK ((1UL << 5) - 1)
+#define __SWP_TYPE_SHIFT 2
static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
{
pte_t pte;
- offset &= __SWP_OFFSET_MASK;
- pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
- ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
+
+ pte_val(pte) = _PAGE_INVALID | _PAGE_PROTECT;
+ pte_val(pte) |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
+ pte_val(pte) |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
return pte;
}
-#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
-#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
-#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
+static inline unsigned long __swp_type(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
+}
+
+static inline unsigned long __swp_offset(swp_entry_t entry)
+{
+ return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
+}
+
+static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
+{
+ return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
+}
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
diff --git a/arch/s390/mm/hugetlbpage.c b/arch/s390/mm/hugetlbpage.c
index 210ffed..e617e74 100644
--- a/arch/s390/mm/hugetlbpage.c
+++ b/arch/s390/mm/hugetlbpage.c
@@ -14,20 +14,23 @@ static inline pmd_t __pte_to_pmd(pte_t pte)
/*
* Convert encoding pte bits pmd bits
- * .IR...wrdytp dy..R...I...wr
- * empty .10...000000 -> 00..0...1...00
- * prot-none, clean, old .11...000001 -> 00..1...1...00
- * prot-none, clean, young .11...000101 -> 01..1...1...00
- * prot-none, dirty, old .10...001001 -> 10..1...1...00
- * prot-none, dirty, young .10...001101 -> 11..1...1...00
- * read-only, clean, old .11...010001 -> 00..1...1...01
- * read-only, clean, young .01...010101 -> 01..1...0...01
- * read-only, dirty, old .11...011001 -> 10..1...1...01
- * read-only, dirty, young .01...011101 -> 11..1...0...01
- * read-write, clean, old .11...110001 -> 00..0...1...11
- * read-write, clean, young .01...110101 -> 01..0...0...11
- * read-write, dirty, old .10...111001 -> 10..0...1...11
- * read-write, dirty, young .00...111101 -> 11..0...0...11
+ * lIR.uswrdy.p dy..R...I...wr
+ * empty 010.000000.0 -> 00..0...1...00
+ * prot-none, clean, old 111.000000.1 -> 00..1...1...00
+ * prot-none, clean, young 111.000001.1 -> 01..1...1...00
+ * prot-none, dirty, old 111.000010.1 -> 10..1...1...00
+ * prot-none, dirty, young 111.000011.1 -> 11..1...1...00
+ * read-only, clean, old 111.000100.1 -> 00..1...1...01
+ * read-only, clean, young 101.000101.1 -> 01..1...0...01
+ * read-only, dirty, old 111.000110.1 -> 10..1...1...01
+ * read-only, dirty, young 101.000111.1 -> 11..1...0...01
+ * read-write, clean, old 111.001100.1 -> 00..1...1...11
+ * read-write, clean, young 101.001101.1 -> 01..1...0...11
+ * read-write, dirty, old 110.001110.1 -> 10..0...1...11
+ * read-write, dirty, young 100.001111.1 -> 11..0...0...11
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
*/
if (pte_present(pte)) {
pmd_val(pmd) = pte_val(pte) & PAGE_MASK;
@@ -48,20 +51,23 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
/*
* Convert encoding pmd bits pte bits
- * dy..R...I...wr .IR...wrdytp
- * empty 00..0...1...00 -> .10...001100
- * prot-none, clean, old 00..0...1...00 -> .10...000001
- * prot-none, clean, young 01..0...1...00 -> .10...000101
- * prot-none, dirty, old 10..0...1...00 -> .10...001001
- * prot-none, dirty, young 11..0...1...00 -> .10...001101
- * read-only, clean, old 00..1...1...01 -> .11...010001
- * read-only, clean, young 01..1...1...01 -> .11...010101
- * read-only, dirty, old 10..1...1...01 -> .11...011001
- * read-only, dirty, young 11..1...1...01 -> .11...011101
- * read-write, clean, old 00..0...1...11 -> .10...110001
- * read-write, clean, young 01..0...1...11 -> .10...110101
- * read-write, dirty, old 10..0...1...11 -> .10...111001
- * read-write, dirty, young 11..0...1...11 -> .10...111101
+ * dy..R...I...wr lIR.uswrdy.p
+ * empty 00..0...1...00 -> 010.000000.0
+ * prot-none, clean, old 00..1...1...00 -> 111.000000.1
+ * prot-none, clean, young 01..1...1...00 -> 111.000001.1
+ * prot-none, dirty, old 10..1...1...00 -> 111.000010.1
+ * prot-none, dirty, young 11..1...1...00 -> 111.000011.1
+ * read-only, clean, old 00..1...1...01 -> 111.000100.1
+ * read-only, clean, young 01..1...0...01 -> 101.000101.1
+ * read-only, dirty, old 10..1...1...01 -> 111.000110.1
+ * read-only, dirty, young 11..1...0...01 -> 101.000111.1
+ * read-write, clean, old 00..1...1...11 -> 111.001100.1
+ * read-write, clean, young 01..1...0...11 -> 101.001101.1
+ * read-write, dirty, old 10..0...1...11 -> 110.001110.1
+ * read-write, dirty, young 11..0...0...11 -> 100.001111.1
+ * HW-bits: R read-only, I invalid
+ * SW-bits: p present, y young, d dirty, r read, w write, s special,
+ * u unused, l large
*/
if (pmd_present(pmd)) {
pte_val(pte) = pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN_LARGE;
@@ -70,8 +76,8 @@ static inline pte_t __pmd_to_pte(pmd_t pmd)
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) << 4;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) << 5;
pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT);
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_DIRTY) << 10;
- pmd_val(pmd) |= (pte_val(pte) & _PAGE_YOUNG) << 10;
+ pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) >> 10;
+ pte_val(pte) |= (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) >> 10;
} else
pte_val(pte) = _PAGE_INVALID;
return pte;
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 33f5894..b33f661 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,7 @@
#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/swapops.h>
+#include <linux/sysctl.h>
#include <linux/ksm.h>
#include <linux/mman.h>
@@ -920,6 +921,40 @@ unsigned long get_guest_storage_key(struct mm_struct *mm, unsigned long addr)
}
EXPORT_SYMBOL(get_guest_storage_key);
+static int page_table_allocate_pgste_min = 0;
+static int page_table_allocate_pgste_max = 1;
+int page_table_allocate_pgste = 0;
+EXPORT_SYMBOL(page_table_allocate_pgste);
+
+static struct ctl_table page_table_sysctl[] = {
+ {
+ .procname = "allocate_pgste",
+ .data = &page_table_allocate_pgste,
+ .maxlen = sizeof(int),
+ .mode = S_IRUGO | S_IWUSR,
+ .proc_handler = proc_dointvec,
+ .extra1 = &page_table_allocate_pgste_min,
+ .extra2 = &page_table_allocate_pgste_max,
+ },
+ { }
+};
+
+static struct ctl_table page_table_sysctl_dir[] = {
+ {
+ .procname = "vm",
+ .maxlen = 0,
+ .mode = 0555,
+ .child = page_table_sysctl,
+ },
+ { }
+};
+
+static int __init page_table_register_sysctl(void)
+{
+ return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
+}
+__initcall(page_table_register_sysctl);
+
#else /* CONFIG_PGSTE */
static inline int page_table_with_pgste(struct page *page)
@@ -963,7 +998,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
struct page *uninitialized_var(page);
unsigned int mask, bit;
- if (mm_has_pgste(mm))
+ if (mm_alloc_pgste(mm))
return page_table_alloc_pgste(mm);
/* Allocate fragments of a 4K page as 1K/2K page table */
spin_lock_bh(&mm->context.list_lock);
@@ -1165,116 +1200,25 @@ static inline void thp_split_mm(struct mm_struct *mm)
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
-static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
- struct mm_struct *mm, pud_t *pud,
- unsigned long addr, unsigned long end)
-{
- unsigned long next, *table, *new;
- struct page *page;
- spinlock_t *ptl;
- pmd_t *pmd;
-
- pmd = pmd_offset(pud, addr);
- do {
- next = pmd_addr_end(addr, end);
-again:
- if (pmd_none_or_clear_bad(pmd))
- continue;
- table = (unsigned long *) pmd_deref(*pmd);
- page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
- if (page_table_with_pgste(page))
- continue;
- /* Allocate new page table with pgstes */
- new = page_table_alloc_pgste(mm);
- if (!new)
- return -ENOMEM;
-
- ptl = pmd_lock(mm, pmd);
- if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
- /* Nuke pmd entry pointing to the "short" page table */
- pmdp_flush_lazy(mm, addr, pmd);
- pmd_clear(pmd);
- /* Copy ptes from old table to new table */
- memcpy(new, table, PAGE_SIZE/2);
- clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
- /* Establish new table */
- pmd_populate(mm, pmd, (pte_t *) new);
- /* Free old table with rcu, there might be a walker! */
- page_table_free_rcu(tlb, table, addr);
- new = NULL;
- }
- spin_unlock(ptl);
- if (new) {
- page_table_free_pgste(new);
- goto again;
- }
- } while (pmd++, addr = next, addr != end);
-
- return addr;
-}
-
-static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
- struct mm_struct *mm, pgd_t *pgd,
- unsigned long addr, unsigned long end)
-{
- unsigned long next;
- pud_t *pud;
-
- pud = pud_offset(pgd, addr);
- do {
- next = pud_addr_end(addr, end);
- if (pud_none_or_clear_bad(pud))
- continue;
- next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
- if (unlikely(IS_ERR_VALUE(next)))
- return next;
- } while (pud++, addr = next, addr != end);
-
- return addr;
-}
-
-static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
- unsigned long addr, unsigned long end)
-{
- unsigned long next;
- pgd_t *pgd;
-
- pgd = pgd_offset(mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
- if (unlikely(IS_ERR_VALUE(next)))
- return next;
- } while (pgd++, addr = next, addr != end);
-
- return 0;
-}
-
/*
* switch on pgstes for its userspace process (for kvm)
*/
int s390_enable_sie(void)
{
- struct task_struct *tsk = current;
- struct mm_struct *mm = tsk->mm;
- struct mmu_gather tlb;
+ struct mm_struct *mm = current->mm;
/* Do we have pgstes? if yes, we are done */
- if (mm_has_pgste(tsk->mm))
+ if (mm_has_pgste(mm))
return 0;
-
+ /* Fail if the page tables are 2K */
+ if (!mm_alloc_pgste(mm))
+ return -EINVAL;
down_write(&mm->mmap_sem);
+ mm->context.has_pgste = 1;
/* split thp mappings and disable thp for future mappings */
thp_split_mm(mm);
- /* Reallocate the page tables with pgstes */
- tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
- if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
- mm->context.has_pgste = 1;
- tlb_finish_mmu(&tlb, 0, TASK_SIZE);
up_write(&mm->mmap_sem);
- return mm->context.has_pgste ? 0 : -ENOMEM;
+ return 0;
}
EXPORT_SYMBOL_GPL(s390_enable_sie);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6873f00..d366675 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -774,7 +774,7 @@ static void __init zone_sizes_init(void)
* though, there'll be no lowmem, so we just alloc_bootmem
* the memmap. There will be no percpu memory either.
*/
- if (i != 0 && cpumask_test_cpu(i, &isolnodes)) {
+ if (i != 0 && node_isset(i, isolnodes)) {
node_memmap_pfn[i] =
alloc_bootmem_pfn(0, memmap_size, 0);
BUG_ON(node_percpu[i] != 0);
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index ef17683..48304b8 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1109,6 +1109,8 @@ struct boot_params *make_boot_params(struct efi_config *c)
if (!cmdline_ptr)
goto fail;
hdr->cmd_line_ptr = (unsigned long)cmdline_ptr;
+ /* Fill in upper bits of command line address, NOP on 32 bit */
+ boot_params->ext_cmd_line_ptr = (u64)(unsigned long)cmdline_ptr >> 32;
hdr->ramdisk_image = 0;
hdr->ramdisk_size = 0;
diff --git a/arch/x86/include/asm/hypervisor.h b/arch/x86/include/asm/hypervisor.h
index e42f758..055ea99 100644
--- a/arch/x86/include/asm/hypervisor.h
+++ b/arch/x86/include/asm/hypervisor.h
@@ -50,7 +50,7 @@ extern const struct hypervisor_x86 *x86_hyper;
/* Recognized hypervisors */
extern const struct hypervisor_x86 x86_hyper_vmware;
extern const struct hypervisor_x86 x86_hyper_ms_hyperv;
-extern const struct hypervisor_x86 x86_hyper_xen_hvm;
+extern const struct hypervisor_x86 x86_hyper_xen;
extern const struct hypervisor_x86 x86_hyper_kvm;
extern void init_hypervisor(struct cpuinfo_x86 *c);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index 25b1cc0..d6b078e 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -95,7 +95,6 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
struct pvclock_vsyscall_time_info {
struct pvclock_vcpu_time_info pvti;
- u32 migrate_count;
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define PVTI_SIZE sizeof(struct pvclock_vsyscall_time_info)
diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index cf87de3..64b6117 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -169,7 +169,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
struct __raw_tickets tmp = READ_ONCE(lock->tickets);
tmp.head &= ~TICKET_SLOWPATH_FLAG;
- return (tmp.tail - tmp.head) > TICKET_LOCK_INC;
+ return (__ticket_t)(tmp.tail - tmp.head) > TICKET_LOCK_INC;
}
#define arch_spin_is_contended arch_spin_is_contended
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index 358dcd3..c44a5d5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -269,4 +269,9 @@ static inline bool xen_arch_need_swiotlb(struct device *dev,
return false;
}
+static inline unsigned long xen_get_swiotlb_free_pages(unsigned int order)
+{
+ return __get_free_pages(__GFP_NOWARN, order);
+}
+
#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/kernel/cpu/hypervisor.c b/arch/x86/kernel/cpu/hypervisor.c
index 36ce402..d820d8e 100644
--- a/arch/x86/kernel/cpu/hypervisor.c
+++ b/arch/x86/kernel/cpu/hypervisor.c
@@ -27,8 +27,8 @@
static const __initconst struct hypervisor_x86 * const hypervisors[] =
{
-#ifdef CONFIG_XEN_PVHVM
- &x86_hyper_xen_hvm,
+#ifdef CONFIG_XEN
+ &x86_hyper_xen,
#endif
&x86_hyper_vmware,
&x86_hyper_ms_hyperv,
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 219d3fb..960e85d 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2533,34 +2533,6 @@ ssize_t intel_event_sysfs_show(char *page, u64 config)
return x86_event_sysfs_show(page, config, event);
}
-static __initconst const struct x86_pmu core_pmu = {
- .name = "core",
- .handle_irq = x86_pmu_handle_irq,
- .disable_all = x86_pmu_disable_all,
- .enable_all = core_pmu_enable_all,
- .enable = core_pmu_enable_event,
- .disable = x86_pmu_disable_event,
- .hw_config = x86_pmu_hw_config,
- .schedule_events = x86_schedule_events,
- .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
- .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
- .event_map = intel_pmu_event_map,
- .max_events = ARRAY_SIZE(intel_perfmon_event_map),
- .apic = 1,
- /*
- * Intel PMCs cannot be accessed sanely above 32 bit width,
- * so we install an artificial 1<<31 period regardless of
- * the generic event period:
- */
- .max_period = (1ULL << 31) - 1,
- .get_event_constraints = intel_get_event_constraints,
- .put_event_constraints = intel_put_event_constraints,
- .event_constraints = intel_core_event_constraints,
- .guest_get_msrs = core_guest_get_msrs,
- .format_attrs = intel_arch_formats_attr,
- .events_sysfs_show = intel_event_sysfs_show,
-};
-
struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
@@ -2743,6 +2715,44 @@ static struct attribute *intel_arch3_formats_attr[] = {
NULL,
};
+static __initconst const struct x86_pmu core_pmu = {
+ .name = "core",
+ .handle_irq = x86_pmu_handle_irq,
+ .disable_all = x86_pmu_disable_all,
+ .enable_all = core_pmu_enable_all,
+ .enable = core_pmu_enable_event,
+ .disable = x86_pmu_disable_event,
+ .hw_config = x86_pmu_hw_config,
+ .schedule_events = x86_schedule_events,
+ .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
+ .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
+ .event_map = intel_pmu_event_map,
+ .max_events = ARRAY_SIZE(intel_perfmon_event_map),
+ .apic = 1,
+ /*
+ * Intel PMCs cannot be accessed sanely above 32-bit width,
+ * so we install an artificial 1<<31 period regardless of
+ * the generic event period:
+ */
+ .max_period = (1ULL<<31) - 1,
+ .get_event_constraints = intel_get_event_constraints,
+ .put_event_constraints = intel_put_event_constraints,
+ .event_constraints = intel_core_event_constraints,
+ .guest_get_msrs = core_guest_get_msrs,
+ .format_attrs = intel_arch_formats_attr,
+ .events_sysfs_show = intel_event_sysfs_show,
+
+ /*
+ * Virtual (or funny metal) CPU can define x86_pmu.extra_regs
+ * together with PMU version 1 and thus be using core_pmu with
+ * shared_regs. We need following callbacks here to allocate
+ * it properly.
+ */
+ .cpu_prepare = intel_pmu_cpu_prepare,
+ .cpu_starting = intel_pmu_cpu_starting,
+ .cpu_dying = intel_pmu_cpu_dying,
+};
+
static __initconst const struct x86_pmu intel_pmu = {
.name = "Intel",
.handle_irq = intel_pmu_handle_irq,
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
index 3001015..4562e9e 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
@@ -1,6 +1,13 @@
/* Nehalem/SandBridge/Haswell uncore support */
#include "perf_event_intel_uncore.h"
+/* Uncore IMC PCI IDs */
+#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
+#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
+#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
+#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
+#define PCI_DEVICE_ID_INTEL_HSW_U_IMC 0x0a04
+
/* SNB event control */
#define SNB_UNC_CTL_EV_SEL_MASK 0x000000ff
#define SNB_UNC_CTL_UMASK_MASK 0x0000ff00
@@ -472,6 +479,10 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = {
PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_IMC),
.driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
},
+ { /* IMC */
+ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_HSW_U_IMC),
+ .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+ },
{ /* end: all zeroes */ },
};
@@ -502,6 +513,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
IMC_DEV(IVB_IMC, &ivb_uncore_pci_driver), /* 3rd Gen Core processor */
IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core Processor */
+ IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver), /* 4th Gen Core ULT Mobile Processor */
{ /* end marker */ }
};
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 8213da6..6e338e3 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -57,7 +57,7 @@ __visible DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, cpu_tss) = {
.io_bitmap = { [0 ... IO_BITMAP_LONGS] = ~0 },
#endif
};
-EXPORT_PER_CPU_SYMBOL_GPL(cpu_tss);
+EXPORT_PER_CPU_SYMBOL(cpu_tss);
#ifdef CONFIG_X86_64
static DEFINE_PER_CPU(unsigned char, is_idle);
@@ -156,11 +156,13 @@ void flush_thread(void)
/* FPU state will be reallocated lazily at the first use. */
drop_fpu(tsk);
free_thread_xstate(tsk);
- } else if (!used_math()) {
- /* kthread execs. TODO: cleanup this horror. */
- if (WARN_ON(init_fpu(tsk)))
- force_sig(SIGKILL, tsk);
- user_fpu_begin();
+ } else {
+ if (!tsk_used_math(tsk)) {
+ /* kthread execs. TODO: cleanup this horror. */
+ if (WARN_ON(init_fpu(tsk)))
+ force_sig(SIGKILL, tsk);
+ user_fpu_begin();
+ }
restore_init_xstate();
}
}
diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
index e5ecd20..2f355d2 100644
--- a/arch/x86/kernel/pvclock.c
+++ b/arch/x86/kernel/pvclock.c
@@ -141,46 +141,7 @@ void pvclock_read_wallclock(struct pvclock_wall_clock *wall_clock,
set_normalized_timespec(ts, now.tv_sec, now.tv_nsec);
}
-static struct pvclock_vsyscall_time_info *pvclock_vdso_info;
-
-static struct pvclock_vsyscall_time_info *
-pvclock_get_vsyscall_user_time_info(int cpu)
-{
- if (!pvclock_vdso_info) {
- BUG();
- return NULL;
- }
-
- return &pvclock_vdso_info[cpu];
-}
-
-struct pvclock_vcpu_time_info *pvclock_get_vsyscall_time_info(int cpu)
-{
- return &pvclock_get_vsyscall_user_time_info(cpu)->pvti;
-}
-
#ifdef CONFIG_X86_64
-static int pvclock_task_migrate(struct notifier_block *nb, unsigned long l,
- void *v)
-{
- struct task_migration_notifier *mn = v;
- struct pvclock_vsyscall_time_info *pvti;
-
- pvti = pvclock_get_vsyscall_user_time_info(mn->from_cpu);
-
- /* this is NULL when pvclock vsyscall is not initialized */
- if (unlikely(pvti == NULL))
- return NOTIFY_DONE;
-
- pvti->migrate_count++;
-
- return NOTIFY_DONE;
-}
-
-static struct notifier_block pvclock_migrate = {
- .notifier_call = pvclock_task_migrate,
-};
-
/*
* Initialize the generic pvclock vsyscall state. This will allocate
* a/some page(s) for the per-vcpu pvclock information, set up a
@@ -194,17 +155,12 @@ int __init pvclock_init_vsyscall(struct pvclock_vsyscall_time_info *i,
WARN_ON (size != PVCLOCK_VSYSCALL_NR_PAGES*PAGE_SIZE);
- pvclock_vdso_info = i;
-
for (idx = 0; idx <= (PVCLOCK_FIXMAP_END-PVCLOCK_FIXMAP_BEGIN); idx++) {
__set_fixmap(PVCLOCK_FIXMAP_BEGIN + idx,
__pa(i) + (idx*PAGE_SIZE),
PAGE_KERNEL_VVAR);
}
-
- register_task_migration_notifier(&pvclock_migrate);
-
return 0;
}
#endif
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ed31c31..c73efcd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1669,12 +1669,28 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
&guest_hv_clock, sizeof(guest_hv_clock))))
return 0;
- /*
- * The interface expects us to write an even number signaling that the
- * update is finished. Since the guest won't see the intermediate
- * state, we just increase by 2 at the end.
+ /* This VCPU is paused, but it's legal for a guest to read another
+ * VCPU's kvmclock, so we really have to follow the specification where
+ * it says that version is odd if data is being modified, and even after
+ * it is consistent.
+ *
+ * Version field updates must be kept separate. This is because
+ * kvm_write_guest_cached might use a "rep movs" instruction, and
+ * writes within a string instruction are weakly ordered. So there
+ * are three writes overall.
+ *
+ * As a small optimization, only write the version field in the first
+ * and third write. The vcpu->pv_time cache is still valid, because the
+ * version field is the first in the struct.
*/
- vcpu->hv_clock.version = guest_hv_clock.version + 2;
+ BUILD_BUG_ON(offsetof(struct pvclock_vcpu_time_info, version) != 0);
+
+ vcpu->hv_clock.version = guest_hv_clock.version + 1;
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
+
+ smp_wmb();
/* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
@@ -1695,6 +1711,13 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
&vcpu->hv_clock,
sizeof(vcpu->hv_clock));
+
+ smp_wmb();
+
+ vcpu->hv_clock.version++;
+ kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+ &vcpu->hv_clock,
+ sizeof(vcpu->hv_clock.version));
return 0;
}
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 5ead4d6c..70e7444 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -351,18 +351,20 @@ int arch_ioremap_pmd_supported(void)
*/
void *xlate_dev_mem_ptr(phys_addr_t phys)
{
- void *addr;
- unsigned long start = phys & PAGE_MASK;
+ unsigned long start = phys & PAGE_MASK;
+ unsigned long offset = phys & ~PAGE_MASK;
+ unsigned long vaddr;
/* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
if (page_is_ram(start >> PAGE_SHIFT))
return __va(phys);
- addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
- if (addr)
- addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
+ vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
+ /* Only add the offset on success and return NULL if the ioremap() failed: */
+ if (vaddr)
+ vaddr += offset;
- return addr;
+ return (void *)vaddr;
}
void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index e469598..d939633 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
kfree(info);
}
+/*
+ * An IO port or MMIO resource assigned to a PCI host bridge may be
+ * consumed by the host bridge itself or available to its child
+ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
+ * to tell whether the resource is consumed by the host bridge itself,
+ * but firmware hasn't used that bit consistently, so we can't rely on it.
+ *
+ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
+ * to be available to child bus/devices except one special case:
+ * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
+ * to access PCI configuration space.
+ *
+ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
+ */
+static bool resource_is_pcicfg_ioport(struct resource *res)
+{
+ return (res->flags & IORESOURCE_IO) &&
+ res->start == 0xCF8 && res->end == 0xCFF;
+}
+
static void probe_pci_root_info(struct pci_root_info *info,
struct acpi_device *device,
int busnum, int domain,
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
"no IO and memory resources present in _CRS\n");
else
resource_list_for_each_entry_safe(entry, tmp, list) {
- if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
- (entry->res->flags & IORESOURCE_DISABLED))
+ if ((entry->res->flags & IORESOURCE_DISABLED) ||
+ resource_is_pcicfg_ioport(entry->res))
resource_list_destroy_entry(entry);
else
entry->res->name = info->name;
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index 40d2473..9793322 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -82,15 +82,18 @@ static notrace cycle_t vread_pvclock(int *mode)
cycle_t ret;
u64 last;
u32 version;
- u32 migrate_count;
u8 flags;
unsigned cpu, cpu1;
/*
- * When looping to get a consistent (time-info, tsc) pair, we
- * also need to deal with the possibility we can switch vcpus,
- * so make sure we always re-fetch time-info for the current vcpu.
+ * Note: hypervisor must guarantee that:
+ * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+ * 2. that per-CPU pvclock time info is updated if the
+ * underlying CPU changes.
+ * 3. that version is increased whenever underlying CPU
+ * changes.
+ *
*/
do {
cpu = __getcpu() & VGETCPU_CPU_MASK;
@@ -99,27 +102,20 @@ static notrace cycle_t vread_pvclock(int *mode)
* __getcpu() calls (Gleb).
*/
- /* Make sure migrate_count will change if we leave the VCPU. */
- do {
- pvti = get_pvti(cpu);
- migrate_count = pvti->migrate_count;
-
- cpu1 = cpu;
- cpu = __getcpu() & VGETCPU_CPU_MASK;
- } while (unlikely(cpu != cpu1));
+ pvti = get_pvti(cpu);
version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
/*
* Test we're still on the cpu as well as the version.
- * - We must read TSC of pvti's VCPU.
- * - KVM doesn't follow the versioning protocol, so data could
- * change before version if we left the VCPU.
+ * We could have been migrated just after the first
+ * vgetcpu but before fetching the version, so we
+ * wouldn't notice a version change.
*/
- smp_rmb();
- } while (unlikely((pvti->pvti.version & 1) ||
- pvti->pvti.version != version ||
- pvti->migrate_count != migrate_count));
+ cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+ } while (unlikely(cpu != cpu1 ||
+ (pvti->pvti.version & 1) ||
+ pvti->pvti.version != version));
if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
*mode = VCLOCK_NONE;
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 94578ef..46957ea 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1760,6 +1760,9 @@ static struct notifier_block xen_hvm_cpu_notifier = {
static void __init xen_hvm_guest_init(void)
{
+ if (xen_pv_domain())
+ return;
+
init_hvm_pv_info();
xen_hvm_init_shared_info();
@@ -1775,6 +1778,7 @@ static void __init xen_hvm_guest_init(void)
xen_hvm_init_time_ops();
xen_hvm_init_mmu_ops();
}
+#endif
static bool xen_nopv = false;
static __init int xen_parse_nopv(char *arg)
@@ -1784,14 +1788,11 @@ static __init int xen_parse_nopv(char *arg)
}
early_param("xen_nopv", xen_parse_nopv);
-static uint32_t __init xen_hvm_platform(void)
+static uint32_t __init xen_platform(void)
{
if (xen_nopv)
return 0;
- if (xen_pv_domain())
- return 0;
-
return xen_cpuid_base();
}
@@ -1809,11 +1810,19 @@ bool xen_hvm_need_lapic(void)
}
EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
-const struct hypervisor_x86 x86_hyper_xen_hvm __refconst = {
- .name = "Xen HVM",
- .detect = xen_hvm_platform,
+static void xen_set_cpu_features(struct cpuinfo_x86 *c)
+{
+ if (xen_pv_domain())
+ clear_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+}
+
+const struct hypervisor_x86 x86_hyper_xen = {
+ .name = "Xen",
+ .detect = xen_platform,
+#ifdef CONFIG_XEN_PVHVM
.init_platform = xen_hvm_guest_init,
+#endif
.x2apic_available = xen_x2apic_para_available,
+ .set_cpu_features = xen_set_cpu_features,
};
-EXPORT_SYMBOL(x86_hyper_xen_hvm);
-#endif
+EXPORT_SYMBOL(x86_hyper_xen);
diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c
index d949769..53b4c08 100644
--- a/arch/x86/xen/suspend.c
+++ b/arch/x86/xen/suspend.c
@@ -88,7 +88,17 @@ static void xen_vcpu_notify_restore(void *data)
tick_resume_local();
}
+static void xen_vcpu_notify_suspend(void *data)
+{
+ tick_suspend_local();
+}
+
void xen_arch_resume(void)
{
on_each_cpu(xen_vcpu_notify_restore, NULL, 1);
}
+
+void xen_arch_suspend(void)
+{
+ on_each_cpu(xen_vcpu_notify_suspend, NULL, 1);
+}
diff --git a/block/blk-core.c b/block/blk-core.c
index fd154b9..7871603 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
q->queue_lock = &q->__queue_lock;
spin_unlock_irq(lock);
+ bdi_destroy(&q->backing_dev_info);
+
/* @q is and will stay empty, shutdown and put */
blk_put_queue(q);
}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ade8a2d..e68b71b 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
data.next = blk_rq_timeout(round_jiffies_up(data.next));
mod_timer(&q->timeout, data.next);
} else {
- queue_for_each_hw_ctx(q, hctx, i)
- blk_mq_tag_idle(hctx);
+ queue_for_each_hw_ctx(q, hctx, i) {
+ /* the hctx may be unmapped, so check it here */
+ if (blk_mq_hw_queue_mapped(hctx))
+ blk_mq_tag_idle(hctx);
+ }
}
}
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
spin_lock(&hctx->lock);
list_splice(&rq_list, &hctx->dispatch);
spin_unlock(&hctx->lock);
+ /*
+ * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
+ * it's possible the queue is stopped and restarted again
+ * before this. Queue restart will dispatch requests. And since
+ * requests in rq_list aren't added into hctx->dispatch yet,
+ * the requests in rq_list might get lost.
+ *
+ * blk_mq_run_hw_queue() already checks the STOPPED bit
+ **/
+ blk_mq_run_hw_queue(hctx, true);
}
}
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
return NOTIFY_OK;
}
-static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
-{
- struct request_queue *q = hctx->queue;
- struct blk_mq_tag_set *set = q->tag_set;
-
- if (set->tags[hctx->queue_num])
- return NOTIFY_OK;
-
- set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
- if (!set->tags[hctx->queue_num])
- return NOTIFY_STOP;
-
- hctx->tags = set->tags[hctx->queue_num];
- return NOTIFY_OK;
-}
-
static int blk_mq_hctx_notify(void *data, unsigned long action,
unsigned int cpu)
{
@@ -1594,8 +1591,11 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
return blk_mq_hctx_cpu_offline(hctx, cpu);
- else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
- return blk_mq_hctx_cpu_online(hctx, cpu);
+
+ /*
+ * In case of CPU online, tags may be reallocated
+ * in blk_mq_map_swqueue() after mapping is updated.
+ */
return NOTIFY_OK;
}
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
unsigned int i;
struct blk_mq_hw_ctx *hctx;
struct blk_mq_ctx *ctx;
+ struct blk_mq_tag_set *set = q->tag_set;
queue_for_each_hw_ctx(q, hctx, i) {
cpumask_clear(hctx->cpumask);
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
* disable it and free the request entries.
*/
if (!hctx->nr_ctx) {
- struct blk_mq_tag_set *set = q->tag_set;
-
if (set->tags[i]) {
blk_mq_free_rq_map(set, set->tags[i], i);
set->tags[i] = NULL;
- hctx->tags = NULL;
}
+ hctx->tags = NULL;
continue;
}
+ /* unmapped hw queue can be remapped after CPU topo changed */
+ if (!set->tags[i])
+ set->tags[i] = blk_mq_init_rq_map(set, i);
+ hctx->tags = set->tags[i];
+ WARN_ON(!hctx->tags);
+
/*
* Set the map size to the number of mapped software queues.
* This is more accurate and more efficient than looping
@@ -2090,9 +2095,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
*/
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_freeze_queue_start(q);
- list_for_each_entry(q, &all_q_list, all_q_node)
+ list_for_each_entry(q, &all_q_list, all_q_node) {
blk_mq_freeze_queue_wait(q);
+ /*
+ * timeout handler can't touch hw queue during the
+ * reinitialization
+ */
+ del_timer_sync(&q->timeout);
+ }
+
list_for_each_entry(q, &all_q_list, all_q_node)
blk_mq_queue_reinit(q);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index faaf36a..2b8fd30 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
blk_trace_shutdown(q);
- bdi_destroy(&q->backing_dev_info);
-
ida_simple_remove(&blk_queue_ida, q->id);
call_rcu(&q->rcu_head, blk_free_queue_rcu);
}
diff --git a/block/bounce.c b/block/bounce.c
index ab21ba2..ed9dd80 100644
--- a/block/bounce.c
+++ b/block/bounce.c
@@ -221,8 +221,8 @@ bounce:
if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
continue;
- inc_zone_page_state(to->bv_page, NR_BOUNCE);
to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+ inc_zone_page_state(to->bv_page, NR_BOUNCE);
if (rw == WRITE) {
char *vto, *vfrom;
diff --git a/block/elevator.c b/block/elevator.c
index 59794d0..8985038 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
if (unlikely(!eq))
- goto err;
+ return NULL;
eq->type = e;
kobject_init(&eq->kobj, &elv_ktype);
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
hash_init(eq->hash);
return eq;
-err:
- kfree(eq);
- elevator_put(e);
- return NULL;
}
EXPORT_SYMBOL(elevator_alloc);
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index b193f84..ff6d8ad 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
{"PNPb006"},
/* cs423x-pnpbios */
{"CSC0100"},
+ {"CSC0103"},
+ {"CSC0110"},
{"CSC0000"},
{"GIM0100"}, /* Guillemot Turtlebeach something appears to be cs4232 compatible */
/* es18xx-pnpbios */
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 5589a6e..8244f01 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
* @ares: Input ACPI resource object.
* @types: Valid resource types of IORESOURCE_XXX
*
- * This is a hepler function to support acpi_dev_get_resources(), which filters
+ * This is a helper function to support acpi_dev_get_resources(), which filters
* ACPI resource objects according to resource types.
*/
int acpi_dev_filter_resource_type(struct acpi_resource *ares,
diff --git a/drivers/acpi/sbs.c b/drivers/acpi/sbs.c
index cd82762..01504c8 100644
--- a/drivers/acpi/sbs.c
+++ b/drivers/acpi/sbs.c
@@ -684,7 +684,7 @@ static int acpi_sbs_add(struct acpi_device *device)
if (!sbs_manager_broken) {
result = acpi_manager_get_info(sbs);
if (!result) {
- sbs->manager_present = 0;
+ sbs->manager_present = 1;
for (id = 0; id < MAX_SBS_BAT; ++id)
if ((sbs->batteries_supported & (1 << id)))
acpi_battery_add(sbs, id);
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index 26e5b50..bf034f8 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/interrupt.h>
+#include <linux/dmi.h>
#include "sbshc.h"
#define PREFIX "ACPI: "
@@ -87,6 +88,8 @@ enum acpi_smb_offset {
ACPI_SMB_ALARM_DATA = 0x26, /* 2 bytes alarm data */
};
+static bool macbook;
+
static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
{
return ec_read(hc->offset + address, data);
@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
}
mutex_lock(&hc->lock);
+ if (macbook)
+ udelay(5);
if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
goto end;
if (temp) {
@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
acpi_handle handle, acpi_ec_query_func func,
void *data);
+static int macbook_dmi_match(const struct dmi_system_id *d)
+{
+ pr_debug("Detected MacBook, enabling workaround\n");
+ macbook = true;
+ return 0;
+}
+
+static struct dmi_system_id acpi_smbus_dmi_table[] = {
+ { macbook_dmi_match, "Apple MacBook", {
+ DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
+ },
+ { },
+};
+
static int acpi_smbus_hc_add(struct acpi_device *device)
{
int status;
unsigned long long val;
struct acpi_smb_hc *hc;
+ dmi_check_system(acpi_smbus_dmi_table);
+
if (!device)
return -EINVAL;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index ae3fcb4..d7173cb 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1620,8 +1620,8 @@ out:
static void loop_remove(struct loop_device *lo)
{
- del_gendisk(lo->lo_disk);
blk_cleanup_queue(lo->lo_queue);
+ del_gendisk(lo->lo_disk);
blk_mq_free_tag_set(&lo->tag_set);
put_disk(lo->lo_disk);
kfree(lo);
diff --git a/drivers/block/nvme-scsi.c b/drivers/block/nvme-scsi.c
index 6b736b0..88f13c5 100644
--- a/drivers/block/nvme-scsi.c
+++ b/drivers/block/nvme-scsi.c
@@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
u8 *inq_response, int alloc_len)
{
- __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue));
+ __be32 max_sectors = cpu_to_be32(
+ nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
__be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
__be32 discard_desc_count = cpu_to_be32(0x100);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 8125233..ec6c5c6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -2264,6 +2264,11 @@ static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
result, xferred);
if (!img_request->result)
img_request->result = result;
+ /*
+ * Need to end I/O on the entire obj_request worth of
+ * bytes in case of error.
+ */
+ xferred = obj_request->length;
}
/* Image object requests don't own their page array */
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index bd2b3bb..713fc9f 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -265,17 +265,6 @@ static void put_persistent_gnt(struct xen_blkif *blkif,
atomic_dec(&blkif->persistent_gnt_in_use);
}
-static void free_persistent_gnts_unmap_callback(int result,
- struct gntab_unmap_queue_data *data)
-{
- struct completion *c = data->data;
-
- /* BUG_ON used to reproduce existing behaviour,
- but is this the best way to deal with this? */
- BUG_ON(result);
- complete(c);
-}
-
static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
unsigned int num)
{
@@ -285,12 +274,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
struct rb_node *n;
int segs_to_unmap = 0;
struct gntab_unmap_queue_data unmap_data;
- struct completion unmap_completion;
- init_completion(&unmap_completion);
-
- unmap_data.data = &unmap_completion;
- unmap_data.done = &free_persistent_gnts_unmap_callback;
unmap_data.pages = pages;
unmap_data.unmap_ops = unmap;
unmap_data.kunmap_ops = NULL;
@@ -310,8 +294,7 @@ static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
!rb_next(&persistent_gnt->node)) {
unmap_data.count = segs_to_unmap;
- gnttab_unmap_refs_async(&unmap_data);
- wait_for_completion(&unmap_completion);
+ BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
@@ -329,8 +312,13 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct persistent_gnt *persistent_gnt;
- int ret, segs_to_unmap = 0;
+ int segs_to_unmap = 0;
struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
+ struct gntab_unmap_queue_data unmap_data;
+
+ unmap_data.pages = pages;
+ unmap_data.unmap_ops = unmap;
+ unmap_data.kunmap_ops = NULL;
while(!list_empty(&blkif->persistent_purge_list)) {
persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
@@ -346,17 +334,16 @@ void xen_blkbk_unmap_purged_grants(struct work_struct *work)
pages[segs_to_unmap] = persistent_gnt->page;
if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
- ret = gnttab_unmap_refs(unmap, NULL, pages,
- segs_to_unmap);
- BUG_ON(ret);
+ unmap_data.count = segs_to_unmap;
+ BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
segs_to_unmap = 0;
}
kfree(persistent_gnt);
}
if (segs_to_unmap > 0) {
- ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
- BUG_ON(ret);
+ unmap_data.count = segs_to_unmap;
+ BUG_ON(gnttab_unmap_refs_sync(&unmap_data));
put_free_pages(blkif, pages, segs_to_unmap);
}
}
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index c94386a..8dcbced 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -74,6 +74,27 @@ static inline struct zram *dev_to_zram(struct device *dev)
return (struct zram *)dev_to_disk(dev)->private_data;
}
+static ssize_t compact_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t len)
+{
+ unsigned long nr_migrated;
+ struct zram *zram = dev_to_zram(dev);
+ struct zram_meta *meta;
+
+ down_read(&zram->init_lock);
+ if (!init_done(zram)) {
+ up_read(&zram->init_lock);
+ return -EINVAL;
+ }
+
+ meta = zram->meta;
+ nr_migrated = zs_compact(meta->mem_pool);
+ atomic64_add(nr_migrated, &zram->stats.num_migrated);
+ up_read(&zram->init_lock);
+
+ return len;
+}
+
static ssize_t disksize_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -1038,6 +1059,7 @@ static const struct block_device_operations zram_devops = {
.owner = THIS_MODULE
};
+static DEVICE_ATTR_WO(compact);
static DEVICE_ATTR_RW(disksize);
static DEVICE_ATTR_RO(initstate);
static DEVICE_ATTR_WO(reset);
@@ -1114,6 +1136,7 @@ static struct attribute *zram_disk_attrs[] = {
&dev_attr_num_writes.attr,
&dev_attr_failed_reads.attr,
&dev_attr_failed_writes.attr,
+ &dev_attr_compact.attr,
&dev_attr_invalid_io.attr,
&dev_attr_notify_free.attr,
&dev_attr_zero_pages.attr,
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index b854125..5340604 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -660,7 +660,7 @@ validate_group(struct perf_event *event)
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
- .used_mask = CPU_BITS_NONE,
+ .used_mask = { 0 },
};
if (!validate_event(event->pmu, &fake_pmu, leader))
diff --git a/drivers/bus/omap_l3_noc.c b/drivers/bus/omap_l3_noc.c
index 11f7982..ebee57d 100644
--- a/drivers/bus/omap_l3_noc.c
+++ b/drivers/bus/omap_l3_noc.c
@@ -1,7 +1,7 @@
/*
* OMAP L3 Interconnect error handling driver
*
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* Sricharan <r.sricharan@ti.com>
*
@@ -233,7 +233,8 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
}
static const struct of_device_id l3_noc_match[] = {
- {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
+ {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+ {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
{.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
{.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
{},
diff --git a/drivers/bus/omap_l3_noc.h b/drivers/bus/omap_l3_noc.h
index 9525458..73431f8 100644
--- a/drivers/bus/omap_l3_noc.h
+++ b/drivers/bus/omap_l3_noc.h
@@ -1,7 +1,7 @@
/*
* OMAP L3 Interconnect error handling driver header
*
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
* Santosh Shilimkar <santosh.shilimkar@ti.com>
* sricharan <r.sricharan@ti.com>
*
@@ -175,16 +175,14 @@ static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
};
-static struct l3_target_data omap_l3_target_data_clk3[] = {
- {0x0100, "EMUSS",},
- {0x0300, "DEBUG SOURCE",},
- {0x0, "HOST CLK3",},
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+ {0x0100, "DEBUGSS",},
};
-static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
.offset = 0x0200,
- .l3_targ = omap_l3_target_data_clk3,
- .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3),
+ .l3_targ = omap4_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
};
static struct l3_masters_data omap_l3_masters[] = {
@@ -215,21 +213,49 @@ static struct l3_masters_data omap_l3_masters[] = {
{ 0x32, "USBHOSTFS"}
};
-static struct l3_flagmux_data *omap_l3_flagmux[] = {
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
&omap_l3_flagmux_clk1,
&omap_l3_flagmux_clk2,
- &omap_l3_flagmux_clk3,
+ &omap4_l3_flagmux_clk3,
};
-static const struct omap_l3 omap_l3_data = {
- .l3_flagmux = omap_l3_flagmux,
- .num_modules = ARRAY_SIZE(omap_l3_flagmux),
+static const struct omap_l3 omap4_l3_data = {
+ .l3_flagmux = omap4_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
.l3_masters = omap_l3_masters,
.num_masters = ARRAY_SIZE(omap_l3_masters),
/* The 6 MSBs of register field used to distinguish initiator */
.mst_addr_mask = 0xFC,
};
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+ {0x0100, "L3INSTR",},
+ {0x0300, "DEBUGSS",},
+ {0x0, "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+ .offset = 0x0200,
+ .l3_targ = omap5_l3_target_data_clk3,
+ .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+ &omap_l3_flagmux_clk1,
+ &omap_l3_flagmux_clk2,
+ &omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+ .l3_flagmux = omap5_l3_flagmux,
+ .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+ .l3_masters = omap_l3_masters,
+ .num_masters = ARRAY_SIZE(omap_l3_masters),
+ /* The 6 MSBs of register field used to distinguish initiator */
+ .mst_addr_mask = 0x7E0,
+};
+
/* DRA7 data */
static struct l3_target_data dra_l3_target_data_clk1[] = {
{0x2a00, "AES1",},
@@ -274,7 +300,7 @@ static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
static struct l3_target_data dra_l3_target_data_clk2[] = {
{0x0, "HOST CLK1",},
- {0x0, "HOST CLK2",},
+ {0x800000, "HOST CLK2",},
{0xdead, L3_TARGET_NOT_SUPPORTED,},
{0x3400, "SHA2_2",},
{0x0900, "BB2D",},
diff --git a/drivers/char/hw_random/bcm63xx-rng.c b/drivers/char/hw_random/bcm63xx-rng.c
index d1494ec..4b31f13 100644
--- a/drivers/char/hw_random/bcm63xx-rng.c
+++ b/drivers/char/hw_random/bcm63xx-rng.c
@@ -57,7 +57,7 @@ static void bcm63xx_rng_cleanup(struct hwrng *rng)
val &= ~RNG_EN;
__raw_writel(val, priv->regs + RNG_CTRL);
- clk_didsable_unprepare(prov->clk);
+ clk_disable_unprepare(priv->clk);
}
static int bcm63xx_rng_data_present(struct hwrng *rng, int wait)
@@ -97,14 +97,14 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
priv->rng.name = pdev->name;
priv->rng.init = bcm63xx_rng_init;
priv->rng.cleanup = bcm63xx_rng_cleanup;
- prov->rng.data_present = bcm63xx_rng_data_present;
+ priv->rng.data_present = bcm63xx_rng_data_present;
priv->rng.data_read = bcm63xx_rng_data_read;
priv->clk = devm_clk_get(&pdev->dev, "ipsec");
if (IS_ERR(priv->clk)) {
- error = PTR_ERR(priv->clk);
- dev_err(&pdev->dev, "no clock for device: %d\n", error);
- return error;
+ ret = PTR_ERR(priv->clk);
+ dev_err(&pdev->dev, "no clock for device: %d\n", ret);
+ return ret;
}
if (!devm_request_mem_region(&pdev->dev, r->start,
@@ -120,11 +120,11 @@ static int bcm63xx_rng_probe(struct platform_device *pdev)
return -ENOMEM;
}
- error = devm_hwrng_register(&pdev->dev, &priv->rng);
- if (error) {
+ ret = devm_hwrng_register(&pdev->dev, &priv->rng);
+ if (ret) {
dev_err(&pdev->dev, "failed to register rng device: %d\n",
- error);
- return error;
+ ret);
+ return ret;
}
dev_info(&pdev->dev, "registered RNG driver\n");
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 9bb5928..bf75f63 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2000,7 +2000,7 @@ static int smi_ipmb_proc_show(struct seq_file *m, void *v)
seq_printf(m, " %x", intf->channels[i].address);
seq_putc(m, '\n');
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_ipmb_proc_open(struct inode *inode, struct file *file)
@@ -2023,7 +2023,7 @@ static int smi_version_proc_show(struct seq_file *m, void *v)
ipmi_version_major(&intf->bmc->id),
ipmi_version_minor(&intf->bmc->id));
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_version_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5e90a18..8a45e92 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -942,8 +942,7 @@ static void sender(void *send_info,
* If we are running to completion, start it and run
* transactions until everything is clear.
*/
- smi_info->curr_msg = msg;
- smi_info->waiting_msg = NULL;
+ smi_info->waiting_msg = msg;
/*
* Run to completion means we are single-threaded, no
@@ -2244,7 +2243,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
acpi_handle handle;
acpi_status status;
unsigned long long tmp;
- int rv;
+ int rv = -EINVAL;
acpi_dev = pnp_acpi_device(dev);
if (!acpi_dev)
@@ -2262,8 +2261,10 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
/* _IFT tells us the interface type: KCS, BT, etc */
status = acpi_evaluate_integer(handle, "_IFT", NULL, &tmp);
- if (ACPI_FAILURE(status))
+ if (ACPI_FAILURE(status)) {
+ dev_err(&dev->dev, "Could not find ACPI IPMI interface type\n");
goto err_free;
+ }
switch (tmp) {
case 1:
@@ -2276,6 +2277,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
info->si_type = SI_BT;
break;
case 4: /* SSIF, just ignore */
+ rv = -ENODEV;
goto err_free;
default:
dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
@@ -2336,7 +2338,7 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
err_free:
kfree(info);
- return -EINVAL;
+ return rv;
}
static void ipmi_pnp_remove(struct pnp_dev *dev)
@@ -3080,7 +3082,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
seq_printf(m, "%s\n", si_to_str[smi->si_type]);
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -3153,7 +3155,7 @@ static int smi_params_proc_show(struct seq_file *m, void *v)
smi->irq,
smi->slave_addr);
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_params_proc_open(struct inode *inode, struct file *file)
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index f40e3bd..207689c 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -31,7 +31,6 @@
* interface into the I2C driver, I believe.
*/
-#include <linux/version.h>
#if defined(MODVERSIONS)
#include <linux/modversions.h>
#endif
@@ -166,6 +165,9 @@ enum ssif_stat_indexes {
/* Number of watchdog pretimeouts. */
SSIF_STAT_watchdog_pretimeouts,
+ /* Number of alers received. */
+ SSIF_STAT_alerts,
+
/* Always add statistics before this value, it must be last. */
SSIF_NUM_STATS
};
@@ -214,7 +216,16 @@ struct ssif_info {
#define WDT_PRE_TIMEOUT_INT 0x08
unsigned char msg_flags;
+ u8 global_enables;
bool has_event_buffer;
+ bool supports_alert;
+
+ /*
+ * Used to tell what we should do with alerts. If we are
+ * waiting on a response, read the data immediately.
+ */
+ bool got_alert;
+ bool waiting_alert;
/*
* If set to true, this will request events the next time the
@@ -478,13 +489,13 @@ static int ipmi_ssif_thread(void *data)
if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
result = i2c_smbus_write_block_data(
- ssif_info->client, SSIF_IPMI_REQUEST,
+ ssif_info->client, ssif_info->i2c_command,
ssif_info->i2c_data[0],
ssif_info->i2c_data + 1);
ssif_info->done_handler(ssif_info, result, NULL, 0);
} else {
result = i2c_smbus_read_block_data(
- ssif_info->client, SSIF_IPMI_RESPONSE,
+ ssif_info->client, ssif_info->i2c_command,
ssif_info->i2c_data);
if (result < 0)
ssif_info->done_handler(ssif_info, result,
@@ -518,15 +529,12 @@ static int ssif_i2c_send(struct ssif_info *ssif_info,
static void msg_done_handler(struct ssif_info *ssif_info, int result,
unsigned char *data, unsigned int len);
-static void retry_timeout(unsigned long data)
+static void start_get(struct ssif_info *ssif_info)
{
- struct ssif_info *ssif_info = (void *) data;
int rv;
- if (ssif_info->stopping)
- return;
-
ssif_info->rtc_us_timer = 0;
+ ssif_info->multi_pos = 0;
rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
SSIF_IPMI_RESPONSE,
@@ -540,6 +548,46 @@ static void retry_timeout(unsigned long data)
}
}
+static void retry_timeout(unsigned long data)
+{
+ struct ssif_info *ssif_info = (void *) data;
+ unsigned long oflags, *flags;
+ bool waiting;
+
+ if (ssif_info->stopping)
+ return;
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ waiting = ssif_info->waiting_alert;
+ ssif_info->waiting_alert = false;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+
+ if (waiting)
+ start_get(ssif_info);
+}
+
+
+static void ssif_alert(struct i2c_client *client, unsigned int data)
+{
+ struct ssif_info *ssif_info = i2c_get_clientdata(client);
+ unsigned long oflags, *flags;
+ bool do_get = false;
+
+ ssif_inc_stat(ssif_info, alerts);
+
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ if (ssif_info->waiting_alert) {
+ ssif_info->waiting_alert = false;
+ del_timer(&ssif_info->retry_timer);
+ do_get = true;
+ } else if (ssif_info->curr_msg) {
+ ssif_info->got_alert = true;
+ }
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ if (do_get)
+ start_get(ssif_info);
+}
+
static int start_resend(struct ssif_info *ssif_info);
static void msg_done_handler(struct ssif_info *ssif_info, int result,
@@ -559,9 +607,12 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
if (ssif_info->retries_left > 0) {
ssif_inc_stat(ssif_info, receive_retries);
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ ssif_info->waiting_alert = true;
+ ssif_info->rtc_us_timer = SSIF_MSG_USEC;
mod_timer(&ssif_info->retry_timer,
jiffies + SSIF_MSG_JIFFIES);
- ssif_info->rtc_us_timer = SSIF_MSG_USEC;
+ ipmi_ssif_unlock_cond(ssif_info, flags);
return;
}
@@ -581,9 +632,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
ssif_inc_stat(ssif_info, received_message_parts);
/* Remove the multi-part read marker. */
- for (i = 0; i < (len-2); i++)
- ssif_info->data[i] = data[i+2];
len -= 2;
+ for (i = 0; i < len; i++)
+ ssif_info->data[i] = data[i+2];
ssif_info->multi_len = len;
ssif_info->multi_pos = 1;
@@ -610,9 +661,9 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
goto continue_op;
}
- blocknum = data[ssif_info->multi_len];
+ blocknum = data[0];
- if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) {
+ if (ssif_info->multi_len + len - 1 > IPMI_MAX_MSG_LENGTH) {
/* Received message too big, abort the operation. */
result = -E2BIG;
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
@@ -622,15 +673,15 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
}
/* Remove the blocknum from the data. */
- for (i = 0; i < (len-1); i++)
- ssif_info->data[i+ssif_info->multi_len] = data[i+1];
len--;
+ for (i = 0; i < len; i++)
+ ssif_info->data[i + ssif_info->multi_len] = data[i + 1];
ssif_info->multi_len += len;
if (blocknum == 0xff) {
/* End of read */
len = ssif_info->multi_len;
data = ssif_info->data;
- } else if ((blocknum+1) != ssif_info->multi_pos) {
+ } else if (blocknum + 1 != ssif_info->multi_pos) {
/*
* Out of sequence block, just abort. Block
* numbers start at zero for the second block,
@@ -650,7 +701,7 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
if (rv < 0) {
if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
pr_info(PFX
- "Error from i2c_non_blocking_op(2)\n");
+ "Error from ssif_i2c_send\n");
result = -EIO;
} else
@@ -830,7 +881,11 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
}
if (ssif_info->multi_data) {
- /* In the middle of a multi-data write. */
+ /*
+ * In the middle of a multi-data write. See the comment
+ * in the SSIF_MULTI_n_PART case in the probe function
+ * for details on the intricacies of this.
+ */
int left;
ssif_inc_stat(ssif_info, sent_messages_parts);
@@ -864,15 +919,32 @@ static void msg_written_handler(struct ssif_info *ssif_info, int result,
msg_done_handler(ssif_info, -EIO, NULL, 0);
}
} else {
+ unsigned long oflags, *flags;
+ bool got_alert;
+
ssif_inc_stat(ssif_info, sent_messages);
ssif_inc_stat(ssif_info, sent_messages_parts);
- /* Wait a jiffie then request the next message */
- ssif_info->retries_left = SSIF_RECV_RETRIES;
- ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
- mod_timer(&ssif_info->retry_timer,
- jiffies + SSIF_MSG_PART_JIFFIES);
- return;
+ flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
+ got_alert = ssif_info->got_alert;
+ if (got_alert) {
+ ssif_info->got_alert = false;
+ ssif_info->waiting_alert = false;
+ }
+
+ if (got_alert) {
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ /* The alert already happened, try now. */
+ retry_timeout((unsigned long) ssif_info);
+ } else {
+ /* Wait a jiffie then request the next message */
+ ssif_info->waiting_alert = true;
+ ssif_info->retries_left = SSIF_RECV_RETRIES;
+ ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
+ mod_timer(&ssif_info->retry_timer,
+ jiffies + SSIF_MSG_PART_JIFFIES);
+ ipmi_ssif_unlock_cond(ssif_info, flags);
+ }
}
}
@@ -881,6 +953,8 @@ static int start_resend(struct ssif_info *ssif_info)
int rv;
int command;
+ ssif_info->got_alert = false;
+
if (ssif_info->data_len > 32) {
command = SSIF_IPMI_MULTI_PART_REQUEST_START;
ssif_info->multi_data = ssif_info->data;
@@ -915,7 +989,7 @@ static int start_send(struct ssif_info *ssif_info,
return -E2BIG;
ssif_info->retries_left = SSIF_SEND_RETRIES;
- memcpy(ssif_info->data+1, data, len);
+ memcpy(ssif_info->data + 1, data, len);
ssif_info->data_len = len;
return start_resend(ssif_info);
}
@@ -1200,7 +1274,7 @@ static int smi_type_proc_show(struct seq_file *m, void *v)
{
seq_puts(m, "ssif\n");
- return seq_has_overflowed(m);
+ return 0;
}
static int smi_type_proc_open(struct inode *inode, struct file *file)
@@ -1243,6 +1317,8 @@ static int smi_stats_proc_show(struct seq_file *m, void *v)
ssif_get_stat(ssif_info, events));
seq_printf(m, "watchdog_pretimeouts: %u\n",
ssif_get_stat(ssif_info, watchdog_pretimeouts));
+ seq_printf(m, "alerts: %u\n",
+ ssif_get_stat(ssif_info, alerts));
return 0;
}
@@ -1258,6 +1334,23 @@ static const struct file_operations smi_stats_proc_ops = {
.release = single_release,
};
+static int strcmp_nospace(char *s1, char *s2)
+{
+ while (*s1 && *s2) {
+ while (isspace(*s1))
+ s1++;
+ while (isspace(*s2))
+ s2++;
+ if (*s1 > *s2)
+ return 1;
+ if (*s1 < *s2)
+ return -1;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
static struct ssif_addr_info *ssif_info_find(unsigned short addr,
char *adapter_name,
bool match_null_name)
@@ -1272,8 +1365,10 @@ restart:
/* One is NULL and one is not */
continue;
}
- if (strcmp(info->adapter_name, adapter_name))
- /* Names to not match */
+ if (adapter_name &&
+ strcmp_nospace(info->adapter_name,
+ adapter_name))
+ /* Names do not match */
continue;
}
found = info;
@@ -1306,6 +1401,12 @@ static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
return false;
}
+/*
+ * Global enables we care about.
+ */
+#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
+ IPMI_BMC_EVT_MSG_INTR)
+
static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
unsigned char msg[3];
@@ -1391,13 +1492,33 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
break;
case SSIF_MULTI_2_PART:
- if (ssif_info->max_xmit_msg_size > 64)
- ssif_info->max_xmit_msg_size = 64;
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
if (ssif_info->max_recv_msg_size > 62)
ssif_info->max_recv_msg_size = 62;
break;
case SSIF_MULTI_n_PART:
+ /*
+ * The specification is rather confusing at
+ * this point, but I think I understand what
+ * is meant. At least I have a workable
+ * solution. With multi-part messages, you
+ * cannot send a message that is a multiple of
+ * 32-bytes in length, because the start and
+ * middle messages are 32-bytes and the end
+ * message must be at least one byte. You
+ * can't fudge on an extra byte, that would
+ * screw up things like fru data writes. So
+ * we limit the length to 63 bytes. That way
+ * a 32-byte message gets sent as a single
+ * part. A larger message will be a 32-byte
+ * start and the next message is always going
+ * to be 1-31 bytes in length. Not ideal, but
+ * it should work.
+ */
+ if (ssif_info->max_xmit_msg_size > 63)
+ ssif_info->max_xmit_msg_size = 63;
break;
default:
@@ -1407,7 +1528,7 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
} else {
no_support:
/* Assume no multi-part or PEC support */
- pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
+ pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
rv, len, resp[2]);
ssif_info->max_xmit_msg_size = 32;
@@ -1436,6 +1557,8 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
goto found;
}
+ ssif_info->global_enables = resp[3];
+
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
ssif_info->has_event_buffer = true;
/* buffer is already enabled, nothing to do. */
@@ -1444,18 +1567,37 @@ static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
- msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_EVT_MSG_BUFF;
rv = do_cmd(client, 3, msg, &len, resp);
if (rv || (len < 2)) {
- pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
+ pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
rv, len, resp[2]);
rv = 0; /* Not fatal */
goto found;
}
- if (resp[2] == 0)
+ if (resp[2] == 0) {
/* A successful return means the event buffer is supported. */
ssif_info->has_event_buffer = true;
+ ssif_info->global_enables |= IPMI_BMC_EVT_MSG_BUFF;
+ }
+
+ msg[0] = IPMI_NETFN_APP_REQUEST << 2;
+ msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
+ msg[2] = ssif_info->global_enables | IPMI_BMC_RCV_MSG_INTR;
+ rv = do_cmd(client, 3, msg, &len, resp);
+ if (rv || (len < 2)) {
+ pr_warn(PFX "Error setting global enables: %d %d %2.2x\n",
+ rv, len, resp[2]);
+ rv = 0; /* Not fatal */
+ goto found;
+ }
+
+ if (resp[2] == 0) {
+ /* A successful return means the alert is supported. */
+ ssif_info->supports_alert = true;
+ ssif_info->global_enables |= IPMI_BMC_RCV_MSG_INTR;
+ }
found:
ssif_info->intf_num = atomic_inc_return(&next_intf);
@@ -1813,6 +1955,7 @@ static struct i2c_driver ssif_i2c_driver = {
},
.probe = ssif_probe,
.remove = ssif_remove,
+ .alert = ssif_alert,
.id_table = ssif_id,
.detect = ssif_detect
};
@@ -1832,7 +1975,7 @@ static int init_ipmi_ssif(void)
rv = new_ssif_client(addr[i], adapter_name[i],
dbg[i], slave_addrs[i],
SI_HARDCODED);
- if (!rv)
+ if (rv)
pr_err(PFX
"Couldn't add hardcoded device at addr 0x%x\n",
addr[i]);
diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
index 7a73a27..61c417b 100644
--- a/drivers/cpuidle/cpuidle.c
+++ b/drivers/cpuidle/cpuidle.c
@@ -158,9 +158,18 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
int entered_state;
struct cpuidle_state *target_state = &drv->states[index];
+ bool broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
ktime_t time_start, time_end;
s64 diff;
+ /*
+ * Tell the time framework to switch to a broadcast timer because our
+ * local timer will be shut down. If a local timer is used from another
+ * CPU as a broadcast timer, this call may fail if it is not available.
+ */
+ if (broadcast && tick_broadcast_enter())
+ return -EBUSY;
+
trace_cpu_idle_rcuidle(index, dev->cpu);
time_start = ktime_get();
@@ -169,6 +178,13 @@ int cpuidle_enter_state(struct cpuidle_device *dev, struct cpuidle_driver *drv,
time_end = ktime_get();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, dev->cpu);
+ if (broadcast) {
+ if (WARN_ON_ONCE(!irqs_disabled()))
+ local_irq_disable();
+
+ tick_broadcast_exit();
+ }
+
if (!cpuidle_state_is_coupled(dev, drv, entered_state))
local_irq_enable();
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fd7ac13..bda2cb0 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -437,6 +437,7 @@ config IMG_MDC_DMA
config XGENE_DMA
tristate "APM X-Gene DMA support"
+ depends on ARCH_XGENE || COMPILE_TEST
select DMA_ENGINE
select DMA_ENGINE_RAID
select ASYNC_TX_ENABLE_CHANNEL_SWITCH
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 0e035a8..2890d74 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -571,11 +571,15 @@ struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
chan = private_candidate(&mask, device, NULL, NULL);
if (chan) {
+ dma_cap_set(DMA_PRIVATE, device->cap_mask);
+ device->privatecnt++;
err = dma_chan_get(chan);
if (err) {
pr_debug("%s: failed to get %s: (%d)\n",
__func__, dma_chan_name(chan), err);
chan = NULL;
+ if (--device->privatecnt == 0)
+ dma_cap_clear(DMA_PRIVATE, device->cap_mask);
}
}
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index f705798..ebd8a5f 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -673,6 +673,7 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
* Power management
*/
+#ifdef CONFIG_PM
static int usb_dmac_runtime_suspend(struct device *dev)
{
struct usb_dmac *dmac = dev_get_drvdata(dev);
@@ -690,6 +691,7 @@ static int usb_dmac_runtime_resume(struct device *dev)
return usb_dmac_init(dmac);
}
+#endif /* CONFIG_PM */
static const struct dev_pm_ops usb_dmac_pm = {
SET_RUNTIME_PM_OPS(usb_dmac_runtime_suspend, usb_dmac_runtime_resume,
diff --git a/drivers/firmware/efi/runtime-map.c b/drivers/firmware/efi/runtime-map.c
index 87b8e3b..5c55227 100644
--- a/drivers/firmware/efi/runtime-map.c
+++ b/drivers/firmware/efi/runtime-map.c
@@ -120,7 +120,8 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry) {
kset_unregister(map_kset);
- return entry;
+ map_kset = NULL;
+ return ERR_PTR(-ENOMEM);
}
memcpy(&entry->md, efi_runtime_map + nr * efi_memdesc_size,
@@ -132,6 +133,7 @@ add_sysfs_runtime_map_entry(struct kobject *kobj, int nr)
if (ret) {
kobject_put(&entry->kobj);
kset_unregister(map_kset);
+ map_kset = NULL;
return ERR_PTR(ret);
}
@@ -195,8 +197,6 @@ out_add_entry:
entry = *(map_entries + j);
kobject_put(&entry->kobj);
}
- if (map_kset)
- kset_unregister(map_kset);
out:
return ret;
}
diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c
index cd1d5bf..b232397 100644
--- a/drivers/gpio/gpio-omap.c
+++ b/drivers/gpio/gpio-omap.c
@@ -1054,38 +1054,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
dev_err(bank->dev, "Could not get gpio dbck\n");
}
-static void
-omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
- unsigned int num)
-{
- struct irq_chip_generic *gc;
- struct irq_chip_type *ct;
-
- gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
- handle_simple_irq);
- if (!gc) {
- dev_err(bank->dev, "Memory alloc failed for gc\n");
- return;
- }
-
- ct = gc->chip_types;
-
- /* NOTE: No ack required, reading IRQ status clears it. */
- ct->chip.irq_mask = irq_gc_mask_set_bit;
- ct->chip.irq_unmask = irq_gc_mask_clr_bit;
- ct->chip.irq_set_type = omap_gpio_irq_type;
-
- if (bank->regs->wkup_en)
- ct->chip.irq_set_wake = omap_gpio_wake_enable;
-
- ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
- irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
- IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-}
-
static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
{
- int j;
static int gpio;
int irq_base = 0;
int ret;
@@ -1132,6 +1102,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
}
#endif
+ /* MPUIO is a bit different, reading IRQ status clears it */
+ if (bank->is_mpuio) {
+ irqc->irq_ack = dummy_irq_chip.irq_ack;
+ irqc->irq_mask = irq_gc_mask_set_bit;
+ irqc->irq_unmask = irq_gc_mask_clr_bit;
+ if (!bank->regs->wkup_en)
+ irqc->irq_set_wake = NULL;
+ }
+
ret = gpiochip_irqchip_add(&bank->chip, irqc,
irq_base, omap_gpio_irq_handler,
IRQ_TYPE_NONE);
@@ -1145,15 +1124,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
gpiochip_set_chained_irqchip(&bank->chip, irqc,
bank->irq, omap_gpio_irq_handler);
- for (j = 0; j < bank->width; j++) {
- int irq = irq_find_mapping(bank->chip.irqdomain, j);
- if (bank->is_mpuio) {
- omap_mpuio_alloc_gc(bank, irq, bank->width);
- irq_set_chip_and_handler(irq, NULL, NULL);
- set_irq_flags(irq, 0);
- }
- }
-
return 0;
}
diff --git a/drivers/gpio/gpiolib-acpi.c b/drivers/gpio/gpiolib-acpi.c
index d2303d5..725d161 100644
--- a/drivers/gpio/gpiolib-acpi.c
+++ b/drivers/gpio/gpiolib-acpi.c
@@ -550,7 +550,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
length = min(agpio->pin_table_length, (u16)(pin_index + bits));
for (i = pin_index; i < length; ++i) {
- unsigned pin = agpio->pin_table[i];
+ int pin = agpio->pin_table[i];
struct acpi_gpio_connection *conn;
struct gpio_desc *desc;
bool found;
diff --git a/drivers/gpio/gpiolib-sysfs.c b/drivers/gpio/gpiolib-sysfs.c
index 7722ed5..af3bc7a 100644
--- a/drivers/gpio/gpiolib-sysfs.c
+++ b/drivers/gpio/gpiolib-sysfs.c
@@ -551,6 +551,7 @@ static struct class gpio_class = {
*/
int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
{
+ struct gpio_chip *chip;
unsigned long flags;
int status;
const char *ioname = NULL;
@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
return -EINVAL;
}
+ chip = desc->chip;
+
mutex_lock(&sysfs_lock);
+ /* check if chip is being removed */
+ if (!chip || !chip->exported) {
+ status = -ENODEV;
+ goto fail_unlock;
+ }
+
spin_lock_irqsave(&gpio_lock, flags);
if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
test_bit(FLAG_EXPORT, &desc->flags)) {
@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
{
int status;
struct device *dev;
+ struct gpio_desc *desc;
+ unsigned int i;
mutex_lock(&sysfs_lock);
dev = class_find_device(&gpio_class, NULL, chip, match_export);
if (dev) {
put_device(dev);
device_unregister(dev);
+ /* prevent further gpiod exports */
chip->exported = false;
status = 0;
} else
@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
if (status)
chip_dbg(chip, "%s: status %d\n", __func__, status);
+
+ /* unregister gpiod class devices owned by sysfs */
+ for (i = 0; i < chip->ngpio; i++) {
+ desc = &chip->desc[i];
+ if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+ gpiod_free(desc);
+ }
}
static int __init gpiolib_sysfs_init(void)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 69af73f..596ee5c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
BUG_ON(!dqm || !qpd);
- BUG_ON(!list_empty(&qpd->queues_list));
+ pr_debug("In func %s\n", __func__);
- pr_debug("kfd: In func %s\n", __func__);
+ pr_debug("qpd->queues_list is %s\n",
+ list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
mutex_lock(&dqm->lock);
@@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
return -ENOMEM;
}
+ init_sdma_vm(dqm, q, qpd);
+
retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval != 0)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 661c660..e469c4b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -728,9 +728,9 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
dev->gpu->kgd));
+
sysfs_show_64bit_prop(buffer, "local_mem_size",
- dev->gpu->kfd2kgd->get_vmem_size(
- dev->gpu->kgd));
+ (unsigned long long int) 0);
sysfs_show_32bit_prop(buffer, "fw_version",
dev->gpu->kfd2kgd->get_fw_version(
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index c8a3447..af9662e 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
/* Reinitialize corresponding vblank timestamp if high-precision query
* available. Skip this step if query unsupported or failed. Will
- * reinitialize delayed at next vblank interrupt in that case.
+ * reinitialize delayed at next vblank interrupt in that case and
+ * assign 0 for now, to mark the vblanktimestamp as invalid.
*/
- if (rc) {
- tslot = atomic_read(&vblank->count) + diff;
- vblanktimestamp(dev, crtc, tslot) = t_vblank;
- }
+ tslot = atomic_read(&vblank->count) + diff;
+ vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
smp_mb__before_atomic();
atomic_add(diff, &vblank->count);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 3da1af4..773d1d2 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -6074,6 +6074,8 @@ enum skl_disp_power_wells {
#define GTFIFOCTL 0x120008
#define GT_FIFO_FREE_ENTRIES_MASK 0x7f
#define GT_FIFO_NUM_RESERVED_ENTRIES 20
+#define GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL (1 << 12)
+#define GT_FIFO_CTL_RC6_POLICY_STALL (1 << 11)
#define HSW_IDICR 0x9008
#define IDIHASHMSK(x) (((x) & 0x3f) << 16)
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index d547d9c8..d0f3cbc 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
};
static struct intel_quirk intel_quirks[] = {
- /* HP Mini needs pipe A force quirk (LP: #322104) */
- { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
-
/* Toshiba Protege R-205, S-209 needs pipe A force quirk */
{ 0x2592, 0x1179, 0x0001, quirk_pipea_force },
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index d023710..f27346e 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -1348,7 +1348,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
pipe_config->has_dp_encoder = true;
pipe_config->has_drrs = false;
- pipe_config->has_audio = intel_dp->has_audio;
+ pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -2211,8 +2211,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
int dotclock;
tmp = I915_READ(intel_dp->output_reg);
- if (tmp & DP_AUDIO_OUTPUT_ENABLE)
- pipe_config->has_audio = true;
+
+ pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
if (tmp & DP_SYNC_HS_HIGH)
@@ -3812,7 +3812,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
if (val == 0)
break;
- intel_dp->sink_rates[i] = val * 200;
+ /* Value read is in kHz while drm clock is saved in deca-kHz */
+ intel_dp->sink_rates[i] = (val * 200) / 10;
}
intel_dp->num_sink_rates = i;
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 5abda1d..fbcc7df 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
static const struct dmi_system_id intel_dual_link_lvds[] = {
{
.callback = intel_dual_link_lvds_callback,
- .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+ .ident = "Apple MacBook Pro 15\" (2010)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
+ },
+ },
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2011)",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
},
},
+ {
+ .callback = intel_dual_link_lvds_callback,
+ .ident = "Apple MacBook Pro 15\" (2012)",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
+ },
+ },
{ } /* terminating entry */
};
@@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
if (i915.lvds_channel_mode > 0)
return i915.lvds_channel_mode == 2;
+ /* single channel LVDS is limited to 112 MHz */
+ if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
+ > 112999)
+ return true;
+
if (dmi_check_system(intel_dual_link_lvds))
return true;
@@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev)
out:
mutex_unlock(&dev->mode_config.mutex);
+ intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
lvds_encoder->is_dual_link ? "dual" : "single");
@@ -1125,7 +1148,6 @@ out:
}
drm_connector_register(connector);
- intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_panel_setup_backlight(connector, INVALID_PIPE);
return;
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index ab5cc94..ff2a746 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -360,6 +360,14 @@ static void __intel_uncore_early_sanitize(struct drm_device *dev,
__raw_i915_write32(dev_priv, GTFIFODBG,
__raw_i915_read32(dev_priv, GTFIFODBG));
+ /* WaDisableShadowRegForCpd:chv */
+ if (IS_CHERRYVIEW(dev)) {
+ __raw_i915_write32(dev_priv, GTFIFOCTL,
+ __raw_i915_read32(dev_priv, GTFIFOCTL) |
+ GT_FIFO_CTL_BLOCK_ALL_POLICY_STALL |
+ GT_FIFO_CTL_RC6_POLICY_STALL);
+ }
+
intel_uncore_forcewake_reset(dev, restore_forcewake);
}
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dac78ad..42b2ea3 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -580,6 +580,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
else
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
+ /* if there is no audio, set MINM_OVER_MAXP */
+ if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
+ radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
if (rdev->family < CHIP_RV770)
radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
/* use frac fb div on APUs */
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index f57c1ab..dd39f43 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1761,17 +1761,15 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
int encoder_mode = atombios_get_encoder_mode(encoder);
DRM_DEBUG_KMS("encoder dpms %d to mode %d, devices %08x, active_devices %08x\n",
radeon_encoder->encoder_id, mode, radeon_encoder->devices,
radeon_encoder->active_device);
- if (connector && (radeon_audio != 0) &&
+ if ((radeon_audio != 0) &&
((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
- (ENCODER_MODE_IS_DP(encoder_mode) &&
- drm_detect_monitor_audio(radeon_connector_edid(connector)))))
+ ENCODER_MODE_IS_DP(encoder_mode)))
radeon_audio_dpms(encoder, mode);
switch (radeon_encoder->encoder_id) {
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 3adc2af..68fd9fc 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -295,28 +295,3 @@ void dce6_dp_audio_set_dto(struct radeon_device *rdev,
WREG32(DCCG_AUDIO_DTO1_MODULE, clock);
}
}
-
-void dce6_dp_enable(struct drm_encoder *encoder, bool enable)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_device *rdev = dev->dev_private;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
-
- if (!dig || !dig->afmt)
- return;
-
- if (enable) {
- WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
- EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset,
- EVERGREEN_DP_SEC_ASP_ENABLE | /* Audio packet transmission */
- EVERGREEN_DP_SEC_ATP_ENABLE | /* Audio timestamp packet transmission */
- EVERGREEN_DP_SEC_AIP_ENABLE | /* Audio infoframe packet transmission */
- EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
- } else {
- WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
- }
-
- dig->afmt->enabled = enable;
-}
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index c18d4ec..0926739 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -219,13 +219,9 @@ void evergreen_set_avi_packet(struct radeon_device *rdev, u32 offset,
WREG32(AFMT_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
- WREG32_OR(HDMI_INFOFRAME_CONTROL0 + offset,
- HDMI_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
-
WREG32_P(HDMI_INFOFRAME_CONTROL1 + offset,
- HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
- ~HDMI_AVI_INFO_LINE_MASK);
+ HDMI_AVI_INFO_LINE(2), /* anything other than 0 */
+ ~HDMI_AVI_INFO_LINE_MASK);
}
void dce4_hdmi_audio_set_dto(struct radeon_device *rdev,
@@ -370,9 +366,13 @@ void dce4_set_audio_packet(struct drm_encoder *encoder, u32 offset)
WREG32(AFMT_AUDIO_PACKET_CONTROL2 + offset,
AFMT_AUDIO_CHANNEL_ENABLE(0xff));
+ WREG32(HDMI_AUDIO_PACKET_CONTROL + offset,
+ HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
+ HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+
/* allow 60958 channel status and send audio packets fields to be updated */
- WREG32(AFMT_AUDIO_PACKET_CONTROL + offset,
- AFMT_AUDIO_SAMPLE_SEND | AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + offset,
+ AFMT_RESET_FIFO_WHEN_AUDIO_DIS | AFMT_60958_CS_UPDATE);
}
@@ -398,17 +398,26 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
return;
if (enable) {
- WREG32(HDMI_INFOFRAME_CONTROL1 + dig->afmt->offset,
- HDMI_AUDIO_INFO_LINE(2)); /* anything other than 0 */
-
- WREG32(HDMI_AUDIO_PACKET_CONTROL + dig->afmt->offset,
- HDMI_AUDIO_DELAY_EN(1) | /* set the default audio delay */
- HDMI_AUDIO_PACKETS_PER_LINE(3)); /* should be suffient for all audio modes and small enough for all hblanks */
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
- WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
- HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
- HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
+ HDMI_AUDIO_INFO_SEND | /* enable audio info frames (frames won't be set until audio is enabled) */
+ HDMI_AUDIO_INFO_CONT); /* required for audio info values to be updated */
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SAMPLE_SEND);
+ } else {
+ WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
+ HDMI_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI_AVI_INFO_CONT); /* required for audio info values to be updated */
+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ ~AFMT_AUDIO_SAMPLE_SEND);
+ }
} else {
+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ ~AFMT_AUDIO_SAMPLE_SEND);
WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset, 0);
}
@@ -424,20 +433,24 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
if (!dig || !dig->afmt)
return;
- if (enable) {
+ if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
struct radeon_connector *radeon_connector = to_radeon_connector(connector);
struct radeon_connector_atom_dig *dig_connector;
uint32_t val;
+ WREG32_OR(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ AFMT_AUDIO_SAMPLE_SEND);
+
WREG32(EVERGREEN_DP_SEC_TIMESTAMP + dig->afmt->offset,
EVERGREEN_DP_SEC_TIMESTAMP_MODE(1));
- if (radeon_connector->con_priv) {
+ if (!ASIC_IS_DCE6(rdev) && radeon_connector->con_priv) {
dig_connector = radeon_connector->con_priv;
val = RREG32(EVERGREEN_DP_SEC_AUD_N + dig->afmt->offset);
val &= ~EVERGREEN_DP_SEC_N_BASE_MULTIPLE(0xf);
@@ -457,6 +470,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
EVERGREEN_DP_SEC_STREAM_ENABLE); /* Master enable for secondary stream engine */
} else {
WREG32(EVERGREEN_DP_SEC_CNTL + dig->afmt->offset, 0);
+ WREG32_AND(AFMT_AUDIO_PACKET_CONTROL + dig->afmt->offset,
+ ~AFMT_AUDIO_SAMPLE_SEND);
}
dig->afmt->enabled = enable;
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index dd6606b..e85894a 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -228,12 +228,13 @@ void r600_set_avi_packet(struct radeon_device *rdev, u32 offset,
WREG32(HDMI0_AVI_INFO3 + offset,
frame[0xC] | (frame[0xD] << 8) | (buffer[1] << 24));
+ WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
+ HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
+
WREG32_OR(HDMI0_INFOFRAME_CONTROL0 + offset,
- HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
- HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
+ HDMI0_AVI_INFO_SEND | /* enable AVI info frames */
+ HDMI0_AVI_INFO_CONT); /* send AVI info frames every frame/field */
- WREG32_OR(HDMI0_INFOFRAME_CONTROL1 + offset,
- HDMI0_AVI_INFO_LINE(2)); /* anything other than 0 */
}
/*
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index d2abe48..46eb0fa 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1673,7 +1673,6 @@ struct radeon_uvd {
struct radeon_bo *vcpu_bo;
void *cpu_addr;
uint64_t gpu_addr;
- void *saved_bo;
atomic_t handles[RADEON_MAX_UVD_HANDLES];
struct drm_file *filp[RADEON_MAX_UVD_HANDLES];
unsigned img_size[RADEON_MAX_UVD_HANDLES];
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
index fafd8ce..8dbf508 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.c
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
static struct radeon_asic_ring rv770_uvd_ring = {
.ib_execute = &uvd_v1_0_ib_execute,
.emit_fence = &uvd_v2_2_fence_emit,
- .emit_semaphore = &uvd_v1_0_semaphore_emit,
+ .emit_semaphore = &uvd_v2_2_semaphore_emit,
.cs_parse = &radeon_uvd_cs_parse,
.ring_test = &uvd_v1_0_ring_test,
.ib_test = &uvd_v1_0_ib_test,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index cf0a90b..a3ca8cd 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int uvd_v2_2_resume(struct radeon_device *rdev);
void uvd_v2_2_fence_emit(struct radeon_device *rdev,
struct radeon_fence *fence);
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait);
/* uvd v3.1 */
bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index 48d49e6..dcb7796 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -102,7 +102,6 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
void r600_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable);
void evergreen_dp_enable(struct drm_encoder *encoder, bool enable);
-void dce6_dp_enable(struct drm_encoder *encoder, bool enable);
static const u32 pin_offsets[7] =
{
@@ -240,7 +239,7 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
.set_avi_packet = evergreen_set_avi_packet,
.set_audio_packet = dce4_set_audio_packet,
.mode_set = radeon_audio_dp_mode_set,
- .dpms = dce6_dp_enable,
+ .dpms = evergreen_dp_enable,
};
static void radeon_audio_interface_init(struct radeon_device *rdev)
@@ -461,30 +460,37 @@ void radeon_audio_detect(struct drm_connector *connector,
if (!connector || !connector->encoder)
return;
+ if (!radeon_encoder_is_digital(connector->encoder))
+ return;
+
rdev = connector->encoder->dev->dev_private;
+
+ if (!radeon_audio_chipset_supported(rdev))
+ return;
+
radeon_encoder = to_radeon_encoder(connector->encoder);
dig = radeon_encoder->enc_priv;
- if (status == connector_status_connected) {
- struct radeon_connector *radeon_connector;
- int sink_type;
-
- if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
- radeon_encoder->audio = NULL;
- return;
- }
+ if (!dig->afmt)
+ return;
- radeon_connector = to_radeon_connector(connector);
- sink_type = radeon_dp_getsinktype(radeon_connector);
+ if (status == connector_status_connected) {
+ struct radeon_connector *radeon_connector = to_radeon_connector(connector);
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
- sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ radeon_dp_getsinktype(radeon_connector) ==
+ CONNECTOR_OBJECT_ID_DISPLAYPORT)
radeon_encoder->audio = rdev->audio.dp_funcs;
else
radeon_encoder->audio = rdev->audio.hdmi_funcs;
dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
- radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
+ } else {
+ radeon_audio_enable(rdev, dig->afmt->pin, 0);
+ dig->afmt->pin = NULL;
+ }
} else {
radeon_audio_enable(rdev, dig->afmt->pin, 0);
dig->afmt->pin = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e..d17d251 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,10 @@ out:
/* updated in get modes as well since we need to know if it's analog or digital */
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
+ radeon_connector_get_edid(connector);
radeon_audio_detect(connector, ret);
+ }
exit:
pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1719,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
radeon_connector_update_scratch_regs(connector, ret);
- if (radeon_audio != 0)
+ if (radeon_audio != 0) {
+ radeon_connector_get_edid(connector);
radeon_audio_detect(connector, ret);
+ }
out:
pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 4d0f96c..ab39b85 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -88,7 +88,7 @@ static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
p->dma_reloc_idx = 0;
/* FIXME: we assume that each relocs use 4 dwords */
p->nrelocs = chunk->length_dw / 4;
- p->relocs = kcalloc(p->nrelocs, sizeof(struct radeon_bo_list), GFP_KERNEL);
+ p->relocs = drm_calloc_large(p->nrelocs, sizeof(struct radeon_bo_list));
if (p->relocs == NULL) {
return -ENOMEM;
}
@@ -428,7 +428,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
}
}
kfree(parser->track);
- kfree(parser->relocs);
+ drm_free_large(parser->relocs);
drm_free_large(parser->vm_bos);
for (i = 0; i < parser->nchunks; i++)
drm_free_large(parser->chunks[i].kdata);
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index 0170137..eef006c 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -135,28 +135,31 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
while (it) {
struct radeon_mn_node *node;
struct radeon_bo *bo;
- int r;
+ long r;
node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
list_for_each_entry(bo, &node->bos, mn_list) {
+ if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ continue;
+
r = radeon_bo_reserve(bo, true);
if (r) {
- DRM_ERROR("(%d) failed to reserve user bo\n", r);
+ DRM_ERROR("(%ld) failed to reserve user bo\n", r);
continue;
}
r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
true, false, MAX_SCHEDULE_TIMEOUT);
- if (r)
- DRM_ERROR("(%d) failed to wait for user bo\n", r);
+ if (r <= 0)
+ DRM_ERROR("(%ld) failed to wait for user bo\n", r);
radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
if (r)
- DRM_ERROR("(%d) failed to validate user bo\n", r);
+ DRM_ERROR("(%ld) failed to validate user bo\n", r);
radeon_bo_unreserve(bo);
}
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index b292aca..edafd3c 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
{
struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
struct radeon_ttm_tt *gtt = (void *)ttm;
- struct scatterlist *sg;
- int i;
+ struct sg_page_iter sg_iter;
int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
/* free the sg table and pages again */
dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
- for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
- struct page *page = sg_page(sg);
-
+ for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
+ struct page *page = sg_page_iter_page(&sg_iter);
if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
set_page_dirty(page);
diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c
index c10b2ae..6edcb54 100644
--- a/drivers/gpu/drm/radeon/radeon_uvd.c
+++ b/drivers/gpu/drm/radeon/radeon_uvd.c
@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
int radeon_uvd_suspend(struct radeon_device *rdev)
{
- unsigned size;
- void *ptr;
- int i;
+ int i, r;
if (rdev->uvd.vcpu_bo == NULL)
return 0;
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
- if (atomic_read(&rdev->uvd.handles[i]))
- break;
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+ if (handle != 0) {
+ struct radeon_fence *fence;
- if (i == RADEON_MAX_UVD_HANDLES)
- return 0;
+ radeon_uvd_note_usage(rdev);
- size = radeon_bo_size(rdev->uvd.vcpu_bo);
- size -= rdev->uvd_fw->size;
+ r = radeon_uvd_get_destroy_msg(rdev,
+ R600_RING_TYPE_UVD_INDEX, handle, &fence);
+ if (r) {
+ DRM_ERROR("Error destroying UVD (%d)!\n", r);
+ continue;
+ }
- ptr = rdev->uvd.cpu_addr;
- ptr += rdev->uvd_fw->size;
+ radeon_fence_wait(fence, false);
+ radeon_fence_unref(&fence);
- rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
- memcpy(rdev->uvd.saved_bo, ptr, size);
+ rdev->uvd.filp[i] = NULL;
+ atomic_set(&rdev->uvd.handles[i], 0);
+ }
+ }
return 0;
}
@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
ptr = rdev->uvd.cpu_addr;
ptr += rdev->uvd_fw->size;
- if (rdev->uvd.saved_bo != NULL) {
- memcpy(ptr, rdev->uvd.saved_bo, size);
- kfree(rdev->uvd.saved_bo);
- rdev->uvd.saved_bo = NULL;
- } else
- memset(ptr, 0, size);
+ memset(ptr, 0, size);
return 0;
}
@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
return 0;
}
+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
+ unsigned stream_type)
+{
+ switch (stream_type) {
+ case 0: /* H264 */
+ case 1: /* VC1 */
+ /* always supported */
+ return 0;
+
+ case 3: /* MPEG2 */
+ case 4: /* MPEG4 */
+ /* only since UVD 3 */
+ if (p->rdev->family >= CHIP_PALM)
+ return 0;
+
+ /* fall through */
+ default:
+ DRM_ERROR("UVD codec not supported by hardware %d!\n",
+ stream_type);
+ return -EINVAL;
+ }
+}
+
static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
unsigned offset, unsigned buf_sizes[])
{
@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
return -EINVAL;
}
- if (msg_type == 1) {
- /* it's a decode msg, calc buffer sizes */
- r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
- /* calc image size (width * height) */
- img_size = msg[6] * msg[7];
+ switch (msg_type) {
+ case 0:
+ /* it's a create msg, calc image size (width * height) */
+ img_size = msg[7] * msg[8];
+
+ r = radeon_uvd_validate_codec(p, msg[4]);
radeon_bo_kunmap(bo);
if (r)
return r;
- } else if (msg_type == 2) {
+ /* try to alloc a new handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+ DRM_ERROR("Handle 0x%x already in use!\n", handle);
+ return -EINVAL;
+ }
+
+ if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+ p->rdev->uvd.filp[i] = p->filp;
+ p->rdev->uvd.img_size[i] = img_size;
+ return 0;
+ }
+ }
+
+ DRM_ERROR("No more free UVD handles!\n");
+ return -EINVAL;
+
+ case 1:
+ /* it's a decode msg, validate codec and calc buffer sizes */
+ r = radeon_uvd_validate_codec(p, msg[4]);
+ if (!r)
+ r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+ radeon_bo_kunmap(bo);
+ if (r)
+ return r;
+
+ /* validate the handle */
+ for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+ if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+ if (p->rdev->uvd.filp[i] != p->filp) {
+ DRM_ERROR("UVD handle collision detected!\n");
+ return -EINVAL;
+ }
+ return 0;
+ }
+ }
+
+ DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+ return -ENOENT;
+
+ case 2:
/* it's a destroy msg, free the handle */
for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
radeon_bo_kunmap(bo);
return 0;
- } else {
- /* it's a create msg, calc image size (width * height) */
- img_size = msg[7] * msg[8];
- radeon_bo_kunmap(bo);
- if (msg_type != 0) {
- DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
- return -EINVAL;
- }
-
- /* it's a create msg, no special handling needed */
- }
-
- /* create or decode, validate the handle */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
- return 0;
- }
+ default:
- /* handle not found try to alloc a new one */
- for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
- if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
- p->rdev->uvd.filp[i] = p->filp;
- p->rdev->uvd.img_size[i] = img_size;
- return 0;
- }
+ DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+ return -EINVAL;
}
- DRM_ERROR("No more free UVD handles!\n");
+ BUG();
return -EINVAL;
}
diff --git a/drivers/gpu/drm/radeon/radeon_vce.c b/drivers/gpu/drm/radeon/radeon_vce.c
index 24f849f..0de5711 100644
--- a/drivers/gpu/drm/radeon/radeon_vce.c
+++ b/drivers/gpu/drm/radeon/radeon_vce.c
@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
*
* @p: parser context
* @handle: handle to validate
+ * @allocated: allocated a new handle?
*
* Validates the handle and return the found session index or -EINVAL
* we we don't have another free session index.
*/
-int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
+ uint32_t handle, bool *allocated)
{
unsigned i;
+ *allocated = false;
+
/* validate the handle */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
- if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+ if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
+ if (p->rdev->vce.filp[i] != p->filp) {
+ DRM_ERROR("VCE handle collision detected!\n");
+ return -EINVAL;
+ }
return i;
+ }
}
/* handle not found try to alloc a new one */
@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
p->rdev->vce.filp[i] = p->filp;
p->rdev->vce.img_size[i] = 0;
+ *allocated = true;
return i;
}
}
@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
int radeon_vce_cs_parse(struct radeon_cs_parser *p)
{
int session_idx = -1;
- bool destroyed = false;
+ bool destroyed = false, created = false, allocated = false;
uint32_t tmp, handle = 0;
uint32_t *size = &tmp;
- int i, r;
+ int i, r = 0;
while (p->idx < p->chunk_ib->length_dw) {
uint32_t len = radeon_get_ib_value(p, p->idx);
@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
if ((len < 8) || (len & 3)) {
DRM_ERROR("invalid VCE command length (%d)!\n", len);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
if (destroyed) {
DRM_ERROR("No other command allowed after destroy!\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
switch (cmd) {
case 0x00000001: // session
handle = radeon_get_ib_value(p, p->idx + 2);
- session_idx = radeon_vce_validate_handle(p, handle);
+ session_idx = radeon_vce_validate_handle(p, handle,
+ &allocated);
if (session_idx < 0)
return session_idx;
size = &p->rdev->vce.img_size[session_idx];
@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
break;
case 0x01000001: // create
+ created = true;
+ if (!allocated) {
+ DRM_ERROR("Handle already in use!\n");
+ r = -EINVAL;
+ goto out;
+ }
+
*size = radeon_get_ib_value(p, p->idx + 8) *
radeon_get_ib_value(p, p->idx + 10) *
8 * 3 / 2;
@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
*size);
if (r)
- return r;
+ goto out;
r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
*size / 3);
if (r)
- return r;
+ goto out;
break;
case 0x02000001: // destroy
@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
*size * 2);
if (r)
- return r;
+ goto out;
break;
case 0x05000004: // video bitstream buffer
@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
tmp);
if (r)
- return r;
+ goto out;
break;
case 0x05000005: // feedback buffer
r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
4096);
if (r)
- return r;
+ goto out;
break;
default:
DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
if (session_idx == -1) {
DRM_ERROR("no session command at start of IB\n");
- return -EINVAL;
+ r = -EINVAL;
+ goto out;
}
p->idx += len / 4;
}
- if (destroyed) {
- /* IB contains a destroy msg, free the handle */
+ if (allocated && !created) {
+ DRM_ERROR("New session without create command!\n");
+ r = -ENOENT;
+ }
+
+out:
+ if ((!r && destroyed) || (r && allocated)) {
+ /*
+ * IB contains a destroy msg or we have allocated an
+ * handle and got an error, anyway free the handle
+ */
for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
}
- return 0;
+ return r;
}
/**
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 2a5a4a9..de42fc4 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -473,6 +473,23 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
}
mutex_lock(&vm->mutex);
+ soffset /= RADEON_GPU_PAGE_SIZE;
+ eoffset /= RADEON_GPU_PAGE_SIZE;
+ if (soffset || eoffset) {
+ struct interval_tree_node *it;
+ it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
+ if (it && it != &bo_va->it) {
+ struct radeon_bo_va *tmp;
+ tmp = container_of(it, struct radeon_bo_va, it);
+ /* bo and tmp overlap, invalid offset */
+ dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
+ "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
+ soffset, tmp->bo, tmp->it.start, tmp->it.last);
+ mutex_unlock(&vm->mutex);
+ return -EINVAL;
+ }
+ }
+
if (bo_va->it.start || bo_va->it.last) {
if (bo_va->addr) {
/* add a clone of the bo_va to clear the old address */
@@ -490,6 +507,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
spin_lock(&vm->status_lock);
list_add(&tmp->vm_status, &vm->freed);
spin_unlock(&vm->status_lock);
+
+ bo_va->addr = 0;
}
interval_tree_remove(&bo_va->it, &vm->va);
@@ -497,21 +516,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
bo_va->it.last = 0;
}
- soffset /= RADEON_GPU_PAGE_SIZE;
- eoffset /= RADEON_GPU_PAGE_SIZE;
if (soffset || eoffset) {
- struct interval_tree_node *it;
- it = interval_tree_iter_first(&vm->va, soffset, eoffset - 1);
- if (it) {
- struct radeon_bo_va *tmp;
- tmp = container_of(it, struct radeon_bo_va, it);
- /* bo and tmp overlap, invalid offset */
- dev_err(rdev->dev, "bo %p va 0x%010Lx conflict with "
- "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
- soffset, tmp->bo, tmp->it.start, tmp->it.last);
- mutex_unlock(&vm->mutex);
- return -EINVAL;
- }
bo_va->it.start = soffset;
bo_va->it.last = eoffset - 1;
interval_tree_insert(&bo_va->it, &vm->va);
@@ -1107,7 +1112,8 @@ void radeon_vm_bo_rmv(struct radeon_device *rdev,
list_del(&bo_va->bo_list);
mutex_lock(&vm->mutex);
- interval_tree_remove(&bo_va->it, &vm->va);
+ if (bo_va->it.start || bo_va->it.last)
+ interval_tree_remove(&bo_va->it, &vm->va);
spin_lock(&vm->status_lock);
list_del(&bo_va->vm_status);
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 3cf1e29..9ef2064 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -989,6 +989,9 @@
((n) & 0x3FFF) << 16)
/* UVD */
+#define UVD_SEMA_ADDR_LOW 0xef00
+#define UVD_SEMA_ADDR_HIGH 0xef04
+#define UVD_SEMA_CMD 0xef08
#define UVD_GPCOM_VCPU_CMD 0xef0c
#define UVD_GPCOM_VCPU_DATA0 0xef10
#define UVD_GPCOM_VCPU_DATA1 0xef14
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index b35bccf..ff8b83f 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2924,6 +2924,7 @@ struct si_dpm_quirk {
static struct si_dpm_quirk si_dpm_quirk_list[] = {
/* PITCAIRN - https://bugs.freedesktop.org/show_bug.cgi?id=76490 */
{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
+ { PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
{ 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/radeon/uvd_v1_0.c b/drivers/gpu/drm/radeon/uvd_v1_0.c
index e72b3cb..c6b1cbc 100644
--- a/drivers/gpu/drm/radeon/uvd_v1_0.c
+++ b/drivers/gpu/drm/radeon/uvd_v1_0.c
@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
struct radeon_semaphore *semaphore,
bool emit_wait)
{
- uint64_t addr = semaphore->gpu_addr;
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
- radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
- radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
- radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
- radeon_ring_write(ring, emit_wait ? 1 : 0);
-
- return true;
+ /* disable semaphores for UVD V1 hardware */
+ return false;
}
/**
diff --git a/drivers/gpu/drm/radeon/uvd_v2_2.c b/drivers/gpu/drm/radeon/uvd_v2_2.c
index 8919351..7ed778c 100644
--- a/drivers/gpu/drm/radeon/uvd_v2_2.c
+++ b/drivers/gpu/drm/radeon/uvd_v2_2.c
@@ -60,6 +60,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
}
/**
+ * uvd_v2_2_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+ struct radeon_ring *ring,
+ struct radeon_semaphore *semaphore,
+ bool emit_wait)
+{
+ uint64_t addr = semaphore->gpu_addr;
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+ radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+ radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+ radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+ radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+ return true;
+}
+
+/**
* uvd_v2_2_resume - memory controller programming
*
* @rdev: radeon_device pointer
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index ccb0ce0..4557f33 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -1409,7 +1409,7 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
struct vop *vop;
struct resource *res;
size_t alloc_size;
- int ret;
+ int ret, irq;
of_id = of_match_device(vop_driver_dt_match, dev);
vop_data = of_id->data;
@@ -1445,11 +1445,12 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
return ret;
}
- vop->irq = platform_get_irq(pdev, 0);
- if (vop->irq < 0) {
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
dev_err(dev, "cannot find irq for vop\n");
- return vop->irq;
+ return irq;
}
+ vop->irq = (unsigned int)irq;
spin_lock_init(&vop->reg_lock);
spin_lock_init(&vop->irq_lock);
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 1833abd..bfad15a 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
drm->irq_enabled = true;
/* syncpoints are used for full 32-bit hardware VBLANK counters */
- drm->vblank_disable_immediate = true;
drm->max_vblank_count = 0xffffffff;
err = drm_vblank_init(drm, drm->mode_config.num_crtc);
diff --git a/drivers/infiniband/core/addr.c b/drivers/infiniband/core/addr.c
index f80da50..38339d2 100644
--- a/drivers/infiniband/core/addr.c
+++ b/drivers/infiniband/core/addr.c
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
} sgid_addr, dgid_addr;
- ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
- if (ret)
- return ret;
-
- ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
- if (ret)
- return ret;
+ rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+ rdma_gid2ip(&dgid_addr._sockaddr, dgid);
memset(&dev_addr, 0, sizeof(dev_addr));
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
struct sockaddr_in6 _sockaddr_in6;
} gid_addr;
- ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
+ rdma_gid2ip(&gid_addr._sockaddr, sgid);
- if (ret)
- return ret;
memset(&dev_addr, 0, sizeof(dev_addr));
ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
if (ret)
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index e28a494..0c14191 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
return cm_id_priv;
}
-static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
+static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
{
int i;
- for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
- ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
- ((unsigned long *) mask)[i];
+ for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
+ dst[i] = src[i] & mask[i];
}
static int cm_compare_data(struct ib_cm_compare_data *src_data,
struct ib_cm_compare_data *dst_data)
{
- u8 src[IB_CM_COMPARE_SIZE];
- u8 dst[IB_CM_COMPARE_SIZE];
+ u32 src[IB_CM_COMPARE_SIZE];
+ u32 dst[IB_CM_COMPARE_SIZE];
if (!src_data || !dst_data)
return 0;
cm_mask_copy(src, src_data->data, dst_data->mask);
cm_mask_copy(dst, dst_data->data, src_data->mask);
- return memcmp(src, dst, IB_CM_COMPARE_SIZE);
+ return memcmp(src, dst, sizeof(src));
}
-static int cm_compare_private_data(u8 *private_data,
+static int cm_compare_private_data(u32 *private_data,
struct ib_cm_compare_data *dst_data)
{
- u8 src[IB_CM_COMPARE_SIZE];
+ u32 src[IB_CM_COMPARE_SIZE];
if (!dst_data)
return 0;
cm_mask_copy(src, private_data, dst_data->mask);
- return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
+ return memcmp(src, dst_data->data, sizeof(src));
}
/*
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
static struct cm_id_private * cm_find_listen(struct ib_device *device,
__be64 service_id,
- u8 *private_data)
+ u32 *private_data)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
cm_mask_copy(cm_id_priv->compare_data->data,
compare_data->data, compare_data->mask);
memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
- IB_CM_COMPARE_SIZE);
+ sizeof(compare_data->mask));
}
cm_id->state = IB_CM_LISTEN;
diff --git a/drivers/infiniband/core/cm_msgs.h b/drivers/infiniband/core/cm_msgs.h
index be068f4..8b76f0e 100644
--- a/drivers/infiniband/core/cm_msgs.h
+++ b/drivers/infiniband/core/cm_msgs.h
@@ -103,7 +103,7 @@ struct cm_req_msg {
/* local ACK timeout:5, rsvd:3 */
u8 alt_offset139;
- u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+ u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
} __attribute__ ((packed));
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg {
__be16 rsvd;
__be64 service_id;
- u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
+ u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
} __attribute__ ((packed));
struct cm_sidr_rep_msg {
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index d570030..06441a4 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -859,19 +859,27 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
memcpy(&ib->sib_addr, &path->dgid, 16);
}
+static __be16 ss_get_port(const struct sockaddr_storage *ss)
+{
+ if (ss->ss_family == AF_INET)
+ return ((struct sockaddr_in *)ss)->sin_port;
+ else if (ss->ss_family == AF_INET6)
+ return ((struct sockaddr_in6 *)ss)->sin6_port;
+ BUG();
+}
+
static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
struct cma_hdr *hdr)
{
- struct sockaddr_in *listen4, *ip4;
+ struct sockaddr_in *ip4;
- listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
- ip4->sin_family = listen4->sin_family;
+ ip4->sin_family = AF_INET;
ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
- ip4->sin_port = listen4->sin_port;
+ ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
- ip4->sin_family = listen4->sin_family;
+ ip4->sin_family = AF_INET;
ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
ip4->sin_port = hdr->port;
}
@@ -879,16 +887,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
struct cma_hdr *hdr)
{
- struct sockaddr_in6 *listen6, *ip6;
+ struct sockaddr_in6 *ip6;
- listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
- ip6->sin6_family = listen6->sin6_family;
+ ip6->sin6_family = AF_INET6;
ip6->sin6_addr = hdr->dst_addr.ip6;
- ip6->sin6_port = listen6->sin6_port;
+ ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
- ip6->sin6_family = listen6->sin6_family;
+ ip6->sin6_family = AF_INET6;
ip6->sin6_addr = hdr->src_addr.ip6;
ip6->sin6_port = hdr->port;
}
diff --git a/drivers/infiniband/core/iwpm_msg.c b/drivers/infiniband/core/iwpm_msg.c
index b85ddbc..ab08170 100644
--- a/drivers/infiniband/core/iwpm_msg.c
+++ b/drivers/infiniband/core/iwpm_msg.c
@@ -468,7 +468,8 @@ add_mapping_response_exit:
}
EXPORT_SYMBOL(iwpm_add_mapping_cb);
-/* netlink attribute policy for the response to add and query mapping request */
+/* netlink attribute policy for the response to add and query mapping request
+ * and response with remote address info */
static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
[IWPM_NLA_QUERY_MAPPING_SEQ] = { .type = NLA_U32 },
[IWPM_NLA_QUERY_LOCAL_ADDR] = { .len = sizeof(struct sockaddr_storage) },
@@ -559,6 +560,76 @@ query_mapping_response_exit:
}
EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
+/*
+ * iwpm_remote_info_cb - Process a port mapper message, containing
+ * the remote connecting peer address info
+ */
+int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+ struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
+ struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
+ struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
+ struct iwpm_remote_info *rem_info;
+ const char *msg_type;
+ u8 nl_client;
+ int ret = -EINVAL;
+
+ msg_type = "Remote Mapping info";
+ if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
+ resp_query_policy, nltb, msg_type))
+ return ret;
+
+ nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+ if (!iwpm_valid_client(nl_client)) {
+ pr_info("%s: Invalid port mapper client = %d\n",
+ __func__, nl_client);
+ return ret;
+ }
+ atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+
+ local_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+ remote_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+ mapped_loc_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
+ mapped_rem_sockaddr = (struct sockaddr_storage *)
+ nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
+
+ if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
+ mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
+ pr_info("%s: Sockaddr family doesn't match the requested one\n",
+ __func__);
+ return ret;
+ }
+ rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
+ if (!rem_info) {
+ pr_err("%s: Unable to allocate a remote info\n", __func__);
+ ret = -ENOMEM;
+ return ret;
+ }
+ memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&rem_info->remote_sockaddr, remote_sockaddr,
+ sizeof(struct sockaddr_storage));
+ memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr,
+ sizeof(struct sockaddr_storage));
+ rem_info->nl_client = nl_client;
+
+ iwpm_add_remote_info(rem_info);
+
+ iwpm_print_sockaddr(local_sockaddr,
+ "remote_info: Local sockaddr:");
+ iwpm_print_sockaddr(mapped_loc_sockaddr,
+ "remote_info: Mapped local sockaddr:");
+ iwpm_print_sockaddr(remote_sockaddr,
+ "remote_info: Remote sockaddr:");
+ iwpm_print_sockaddr(mapped_rem_sockaddr,
+ "remote_info: Mapped remote sockaddr:");
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_remote_info_cb);
+
/* netlink attribute policy for the received request for mapping info */
static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
[IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
diff --git a/drivers/infiniband/core/iwpm_util.c b/drivers/infiniband/core/iwpm_util.c
index 69e9f84..a626795 100644
--- a/drivers/infiniband/core/iwpm_util.c
+++ b/drivers/infiniband/core/iwpm_util.c
@@ -33,8 +33,10 @@
#include "iwpm_util.h"
-#define IWPM_HASH_BUCKET_SIZE 512
-#define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1)
+#define IWPM_MAPINFO_HASH_SIZE 512
+#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
+#define IWPM_REMINFO_HASH_SIZE 64
+#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
static LIST_HEAD(iwpm_nlmsg_req_list);
static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
static struct hlist_head *iwpm_hash_bucket;
static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
+static struct hlist_head *iwpm_reminfo_bucket;
+static DEFINE_SPINLOCK(iwpm_reminfo_lock);
+
static DEFINE_MUTEX(iwpm_admin_lock);
static struct iwpm_admin_data iwpm_admin;
int iwpm_init(u8 nl_client)
{
+ int ret = 0;
if (iwpm_valid_client(nl_client))
return -EINVAL;
mutex_lock(&iwpm_admin_lock);
if (atomic_read(&iwpm_admin.refcount) == 0) {
- iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE *
+ iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
sizeof(struct hlist_head), GFP_KERNEL);
if (!iwpm_hash_bucket) {
- mutex_unlock(&iwpm_admin_lock);
+ ret = -ENOMEM;
pr_err("%s Unable to create mapinfo hash table\n", __func__);
- return -ENOMEM;
+ goto init_exit;
+ }
+ iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
+ sizeof(struct hlist_head), GFP_KERNEL);
+ if (!iwpm_reminfo_bucket) {
+ kfree(iwpm_hash_bucket);
+ ret = -ENOMEM;
+ pr_err("%s Unable to create reminfo hash table\n", __func__);
+ goto init_exit;
}
}
atomic_inc(&iwpm_admin.refcount);
+init_exit:
mutex_unlock(&iwpm_admin_lock);
- iwpm_set_valid(nl_client, 1);
- return 0;
+ if (!ret) {
+ iwpm_set_valid(nl_client, 1);
+ pr_debug("%s: Mapinfo and reminfo tables are created\n",
+ __func__);
+ }
+ return ret;
}
EXPORT_SYMBOL(iwpm_init);
static void free_hash_bucket(void);
+static void free_reminfo_bucket(void);
int iwpm_exit(u8 nl_client)
{
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client)
}
if (atomic_dec_and_test(&iwpm_admin.refcount)) {
free_hash_bucket();
- pr_debug("%s: Mapinfo hash table is destroyed\n", __func__);
+ free_reminfo_bucket();
+ pr_debug("%s: Resources are destroyed\n", __func__);
}
mutex_unlock(&iwpm_admin_lock);
iwpm_set_valid(nl_client, 0);
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client)
}
EXPORT_SYMBOL(iwpm_exit);
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *,
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
struct sockaddr_storage *);
int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
struct hlist_head *hash_bucket_head;
struct iwpm_mapping_info *map_info;
unsigned long flags;
+ int ret = -EINVAL;
if (!iwpm_valid_client(nl_client))
- return -EINVAL;
+ return ret;
map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
if (!map_info) {
pr_err("%s: Unable to allocate a mapping info\n", __func__);
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
- hash_bucket_head = get_hash_bucket_head(
+ hash_bucket_head = get_mapinfo_hash_bucket(
&map_info->local_sockaddr,
&map_info->mapped_sockaddr);
- hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+ if (hash_bucket_head) {
+ hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+ ret = 0;
+ }
}
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
- return 0;
+ return ret;
}
EXPORT_SYMBOL(iwpm_create_mapinfo);
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
- hash_bucket_head = get_hash_bucket_head(
+ hash_bucket_head = get_mapinfo_hash_bucket(
local_sockaddr,
mapped_local_addr);
+ if (!hash_bucket_head)
+ goto remove_mapinfo_exit;
+
hlist_for_each_entry_safe(map_info, tmp_hlist_node,
hash_bucket_head, hlist_node) {
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
}
}
}
+remove_mapinfo_exit:
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
return ret;
}
@@ -166,7 +195,7 @@ static void free_hash_bucket(void)
/* remove all the mapinfo data from the list */
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
- for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry_safe(map_info, tmp_hlist_node,
&iwpm_hash_bucket[i], hlist_node) {
@@ -180,6 +209,96 @@ static void free_hash_bucket(void)
spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
}
+static void free_reminfo_bucket(void)
+{
+ struct hlist_node *tmp_hlist_node;
+ struct iwpm_remote_info *rem_info;
+ unsigned long flags;
+ int i;
+
+ /* remove all the remote info from the list */
+ spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+ for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+ &iwpm_reminfo_bucket[i], hlist_node) {
+
+ hlist_del_init(&rem_info->hlist_node);
+ kfree(rem_info);
+ }
+ }
+ /* free the hash list */
+ kfree(iwpm_reminfo_bucket);
+ iwpm_reminfo_bucket = NULL;
+ spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *,
+ struct sockaddr_storage *);
+
+void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
+{
+ struct hlist_head *hash_bucket_head;
+ unsigned long flags;
+
+ spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+ if (iwpm_reminfo_bucket) {
+ hash_bucket_head = get_reminfo_hash_bucket(
+ &rem_info->mapped_loc_sockaddr,
+ &rem_info->mapped_rem_sockaddr);
+ if (hash_bucket_head)
+ hlist_add_head(&rem_info->hlist_node, hash_bucket_head);
+ }
+ spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr,
+ u8 nl_client)
+{
+ struct hlist_node *tmp_hlist_node;
+ struct hlist_head *hash_bucket_head;
+ struct iwpm_remote_info *rem_info = NULL;
+ unsigned long flags;
+ int ret = -EINVAL;
+
+ if (!iwpm_valid_client(nl_client)) {
+ pr_info("%s: Invalid client = %d\n", __func__, nl_client);
+ return ret;
+ }
+ spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+ if (iwpm_reminfo_bucket) {
+ hash_bucket_head = get_reminfo_hash_bucket(
+ mapped_loc_addr,
+ mapped_rem_addr);
+ if (!hash_bucket_head)
+ goto get_remote_info_exit;
+ hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+ hash_bucket_head, hlist_node) {
+
+ if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr,
+ mapped_loc_addr) &&
+ !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr,
+ mapped_rem_addr)) {
+
+ memcpy(remote_addr, &rem_info->remote_sockaddr,
+ sizeof(struct sockaddr_storage));
+ iwpm_print_sockaddr(remote_addr,
+ "get_remote_info: Remote sockaddr:");
+
+ hlist_del_init(&rem_info->hlist_node);
+ kfree(rem_info);
+ ret = 0;
+ break;
+ }
+ }
+ }
+get_remote_info_exit:
+ spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+ return ret;
+}
+EXPORT_SYMBOL(iwpm_get_remote_info);
+
struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
u8 nl_client, gfp_t gfp)
{
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
return hash;
}
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage
- *local_sockaddr,
- struct sockaddr_storage
- *mapped_sockaddr)
+static int get_hash_bucket(struct sockaddr_storage *a_sockaddr,
+ struct sockaddr_storage *b_sockaddr, u32 *hash)
{
- u32 local_hash, mapped_hash, hash;
+ u32 a_hash, b_hash;
- if (local_sockaddr->ss_family == AF_INET) {
- local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr);
- mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr);
+ if (a_sockaddr->ss_family == AF_INET) {
+ a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr);
+ b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr);
- } else if (local_sockaddr->ss_family == AF_INET6) {
- local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr);
- mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr);
+ } else if (a_sockaddr->ss_family == AF_INET6) {
+ a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr);
+ b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr);
} else {
pr_err("%s: Invalid sockaddr family\n", __func__);
- return NULL;
+ return -EINVAL;
}
- if (local_hash == mapped_hash) /* if port mapper isn't available */
- hash = local_hash;
+ if (a_hash == b_hash) /* if port mapper isn't available */
+ *hash = a_hash;
else
- hash = jhash_2words(local_hash, mapped_hash, 0);
+ *hash = jhash_2words(a_hash, b_hash, 0);
+ return 0;
+}
+
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage
+ *local_sockaddr, struct sockaddr_storage
+ *mapped_sockaddr)
+{
+ u32 hash;
+ int ret;
- return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK];
+ ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash);
+ if (ret)
+ return NULL;
+ return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK];
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage
+ *mapped_loc_sockaddr, struct sockaddr_storage
+ *mapped_rem_sockaddr)
+{
+ u32 hash;
+ int ret;
+
+ ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash);
+ if (ret)
+ return NULL;
+ return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK];
}
static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
}
skb_num++;
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
- for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
hlist_node) {
if (map_info->nl_client != nl_client)
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void)
spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
if (iwpm_hash_bucket) {
- for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+ for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
if (!hlist_empty(&iwpm_hash_bucket[i])) {
full_bucket = 1;
break;
diff --git a/drivers/infiniband/core/iwpm_util.h b/drivers/infiniband/core/iwpm_util.h
index 9777c86..ee2d9ff 100644
--- a/drivers/infiniband/core/iwpm_util.h
+++ b/drivers/infiniband/core/iwpm_util.h
@@ -76,6 +76,14 @@ struct iwpm_mapping_info {
u8 nl_client;
};
+struct iwpm_remote_info {
+ struct hlist_node hlist_node;
+ struct sockaddr_storage remote_sockaddr;
+ struct sockaddr_storage mapped_loc_sockaddr;
+ struct sockaddr_storage mapped_rem_sockaddr;
+ u8 nl_client;
+};
+
struct iwpm_admin_data {
atomic_t refcount;
atomic_t nlmsg_seq;
@@ -128,6 +136,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
int iwpm_get_nlmsg_seq(void);
/**
+ * iwpm_add_reminfo - Add remote address info of the connecting peer
+ * to the remote info hash table
+ * @reminfo: The remote info to be added
+ */
+void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
+
+/**
* iwpm_valid_client - Check if the port mapper client is valid
* @nl_client: The index of the netlink client
*
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index 8b8cc6f..40becdb 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
int remove_existing_mapping = 0;
int ret = 0;
- mutex_lock(&umem->odp_data->umem_mutex);
/*
* Note: we avoid writing if seq is different from the initial seq, to
* handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
}
out:
- mutex_unlock(&umem->odp_data->umem_mutex);
-
/* On Demand Paging - avoid pinning the page */
if (umem->context->invalidate_range || !stored_page)
put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
user_virt += npages << PAGE_SHIFT;
+ mutex_lock(&umem->odp_data->umem_mutex);
for (j = 0; j < npages; ++j) {
ret = ib_umem_odp_map_dma_single_page(
umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
break;
k++;
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
if (ret < 0) {
/* Release left over pages when handling errors. */
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* faults from completion. We might be racing with other
* invalidations, so we must make sure we free each page only
* once. */
+ mutex_lock(&umem->odp_data->umem_mutex);
for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
- mutex_lock(&umem->odp_data->umem_mutex);
if (umem->odp_data->page_list[idx]) {
struct page *page = umem->odp_data->page_list[idx];
- struct page *head_page = compound_head(page);
dma_addr_t dma = umem->odp_data->dma_list[idx];
dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
DMA_BIDIRECTIONAL);
- if (dma & ODP_WRITE_ALLOWED_BIT)
+ if (dma & ODP_WRITE_ALLOWED_BIT) {
+ struct page *head_page = compound_head(page);
/*
* set_page_dirty prefers being called with
* the page lock. However, MMU notifiers are
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
* be removed.
*/
set_page_dirty(head_page);
+ }
/* on demand pinning support */
if (!umem->context->invalidate_range)
put_page(page);
umem->odp_data->page_list[idx] = NULL;
umem->odp_data->dma_list[idx] = 0;
}
- mutex_unlock(&umem->odp_data->umem_mutex);
}
+ mutex_unlock(&umem->odp_data->umem_mutex);
}
EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 57176dd..bb95a6c 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
sizeof(ep->com.mapped_remote_addr));
}
+static int get_remote_addr(struct c4iw_ep *ep)
+{
+ int ret;
+
+ print_addr(&ep->com, __func__, "get_remote_addr");
+
+ ret = iwpm_get_remote_info(&ep->com.mapped_local_addr,
+ &ep->com.mapped_remote_addr,
+ &ep->com.remote_addr, RDMA_NL_C4IW);
+ if (ret)
+ pr_info(MOD "Unable to find remote peer addr info - err %d\n",
+ ret);
+
+ return ret;
+}
+
static void best_mtu(const unsigned short *mtus, unsigned short mtu,
unsigned int *idx, int use_ts, int ipv6)
{
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep)
if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
- opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+ opt2 |= T5_ISS_F;
}
t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
status, status2errno(status));
if (is_neg_adv(status)) {
- dev_warn(&dev->rdev.lldi.pdev->dev,
- "Connection problems for atid %u status %u (%s)\n",
- atid, status, neg_adv_str(status));
+ PDBG("%s Connection problems for atid %u status %u (%s)\n",
+ __func__, atid, status, neg_adv_str(status));
+ ep->stats.connect_neg_adv++;
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.neg_adv++;
+ mutex_unlock(&dev->rdev.stats.lock);
return 0;
}
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
u32 isn = (prandom_u32() & ~7UL) - 1;
opt2 |= T5_OPT_2_VALID_F;
opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
- opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+ opt2 |= T5_ISS_F;
rpl5 = (void *)rpl;
memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
if (peer2peer)
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
state_set(&child_ep->com, CONNECTING);
child_ep->com.dev = dev;
child_ep->com.cm_id = NULL;
+
+ /*
+ * The mapped_local and mapped_remote addresses get setup with
+ * the actual 4-tuple. The local address will be based on the
+ * actual local address of the connection, but on the port number
+ * of the parent listening endpoint. The remote address is
+ * setup based on a query to the IWPM since we don't know what it
+ * originally was before mapping. If no mapping was done, then
+ * mapped_remote == remote, and mapped_local == local.
+ */
if (iptype == 4) {
struct sockaddr_in *sin = (struct sockaddr_in *)
- &child_ep->com.local_addr;
+ &child_ep->com.mapped_local_addr;
+
sin->sin_family = PF_INET;
sin->sin_port = local_port;
sin->sin_addr.s_addr = *(__be32 *)local_ip;
- sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+
+ sin = (struct sockaddr_in *)&child_ep->com.local_addr;
+ sin->sin_family = PF_INET;
+ sin->sin_port = ((struct sockaddr_in *)
+ &parent_ep->com.local_addr)->sin_port;
+ sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+ sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
sin->sin_family = PF_INET;
sin->sin_port = peer_port;
sin->sin_addr.s_addr = *(__be32 *)peer_ip;
} else {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
- &child_ep->com.local_addr;
+ &child_ep->com.mapped_local_addr;
+
sin6->sin6_family = PF_INET6;
sin6->sin6_port = local_port;
memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
- sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
+
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
+ sin6->sin6_family = PF_INET6;
+ sin6->sin6_port = ((struct sockaddr_in6 *)
+ &parent_ep->com.local_addr)->sin6_port;
+ memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+
+ sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
sin6->sin6_family = PF_INET6;
sin6->sin6_port = peer_port;
memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
}
+ memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
+ sizeof(child_ep->com.remote_addr));
+ get_remote_addr(child_ep);
+
c4iw_get_ep(&parent_ep->com);
child_ep->parent_ep = parent_ep;
child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
ep = lookup_tid(t, tid);
if (is_neg_adv(req->status)) {
- dev_warn(&dev->rdev.lldi.pdev->dev,
- "Negative advice on abort - tid %u status %d (%s)\n",
- ep->hwtid, req->status, neg_adv_str(req->status));
+ PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
+ ep->stats.abort_neg_adv++;
+ mutex_lock(&dev->rdev.stats.lock);
+ dev->rdev.stats.neg_adv++;
+ mutex_unlock(&dev->rdev.stats.lock);
return 0;
}
PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
* TP will ignore any value > 0 for MSS index.
*/
req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
- req->cookie = (unsigned long)skb;
+ req->cookie = (uintptr_t)skb;
set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
return 0;
}
if (is_neg_adv(req->status)) {
- dev_warn(&dev->rdev.lldi.pdev->dev,
- "Negative advice on abort - tid %u status %d (%s)\n",
- ep->hwtid, req->status, neg_adv_str(req->status));
+ PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+ __func__, ep->hwtid, req->status,
+ neg_adv_str(req->status));
+ ep->stats.abort_neg_adv++;
+ dev->rdev.stats.neg_adv++;
kfree_skb(skb);
return 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/cq.c b/drivers/infiniband/hw/cxgb4/cq.c
index ab7692a..68ddb37 100644
--- a/drivers/infiniband/hw/cxgb4/cq.c
+++ b/drivers/infiniband/hw/cxgb4/cq.c
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (unsigned long) &wr_wait;
+ res_wr->cookie = (uintptr_t)&wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
FW_RI_RES_WR_NRES_V(1) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (unsigned long) &wr_wait;
+ res_wr->cookie = (uintptr_t)&wr_wait;
res = res_wr->res;
res->u.cq.restype = FW_RI_RES_TYPE_CQ;
res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
goto err4;
cq->gen = 1;
- cq->gts = rdev->lldi.gts_reg;
cq->rdev = rdev;
if (user) {
- cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
- (cq->cqid << rdev->cqshift);
- cq->ugts &= PAGE_MASK;
+ u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
+
+ cq->ugts = (u64)rdev->bar2_pa + off;
+ } else if (is_t4(rdev->lldi.adapter_type)) {
+ cq->gts = rdev->lldi.gts_reg;
+ cq->qid_mask = -1U;
+ } else {
+ u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
+
+ cq->gts = rdev->bar2_kva + off;
+ cq->qid_mask = rdev->qpmask;
}
return 0;
err4:
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
}
PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
__func__, chp->cq.cqid, chp, chp->cq.size,
- chp->cq.memsize,
- (unsigned long long) chp->cq.dma_addr);
+ chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
return &chp->ibcq;
err5:
kfree(mm2);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index 8fb295e..cf54d69 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
};
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
int prev_ts_set = 0;
int idx, end;
-#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
idx = atomic_read(&dev->rdev.wr_log_idx) &
(dev->rdev.wr_log_size - 1);
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v)
dev->rdev.stats.act_ofld_conn_fails);
seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
dev->rdev.stats.pas_ofld_conn_fails);
+ seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
return 0;
}
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data)
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
+ "conn_na %u abort_na %u "
"%pI4:%d/%d <-> %pI4:%d/%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
+ ep->stats.connect_neg_adv,
+ ep->stats.abort_neg_adv,
&lsin->sin_addr, ntohs(lsin->sin_port),
ntohs(mapped_lsin->sin_port),
&rsin->sin_addr, ntohs(rsin->sin_port),
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data)
cc = snprintf(epd->buf + epd->pos, space,
"ep %p cm_id %p qp %p state %d flags 0x%lx "
"history 0x%lx hwtid %d atid %d "
+ "conn_na %u abort_na %u "
"%pI6:%d/%d <-> %pI6:%d/%d\n",
ep, ep->com.cm_id, ep->com.qp,
(int)ep->com.state, ep->com.flags,
ep->com.history, ep->hwtid, ep->atid,
+ ep->stats.connect_neg_adv,
+ ep->stats.abort_neg_adv,
&lsin6->sin6_addr, ntohs(lsin6->sin6_port),
ntohs(mapped_lsin6->sin6_port),
&rsin6->sin6_addr, ntohs(rsin6->sin6_port),
@@ -765,6 +773,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
c4iw_init_dev_ucontext(rdev, &rdev->uctx);
/*
+ * This implementation assumes udb_density == ucq_density! Eventually
+ * we might need to support this but for now fail the open. Also the
+ * cqid and qpid range must match for now.
+ */
+ if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
+ pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
+ rdev->lldi.ucq_density);
+ err = -EINVAL;
+ goto err1;
+ }
+ if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
+ rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
+ pr_err(MOD "%s: unsupported qp and cq id ranges "
+ "qp start %u size %u cq start %u size %u\n",
+ pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
+ rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
+ rdev->lldi.vr->cq.size);
+ err = -EINVAL;
+ goto err1;
+ }
+
+ /*
* qpshift is the number of bits to shift the qpid left in order
* to get the correct address of the doorbell for that qp.
*/
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
rdev->lldi.vr->qp.size,
rdev->lldi.vr->cq.start,
rdev->lldi.vr->cq.size);
- PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
+ PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
"qpmask 0x%x cqshift %lu cqmask 0x%x\n",
(unsigned)pci_resource_len(rdev->lldi.pdev, 2),
- (u64)pci_resource_start(rdev->lldi.pdev, 2),
+ (void *)pci_resource_start(rdev->lldi.pdev, 2),
rdev->lldi.db_reg,
rdev->lldi.gts_reg,
rdev->qpshift, rdev->qpmask,
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index d87e165..97bb555 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -137,6 +137,7 @@ struct c4iw_stats {
u64 tcam_full;
u64 act_ofld_conn_fails;
u64 pas_ofld_conn_fails;
+ u64 neg_adv;
};
struct c4iw_hw_queue {
@@ -814,6 +815,11 @@ struct c4iw_listen_ep {
int backlog;
};
+struct c4iw_ep_stats {
+ unsigned connect_neg_adv;
+ unsigned abort_neg_adv;
+};
+
struct c4iw_ep {
struct c4iw_ep_common com;
struct c4iw_ep *parent_ep;
@@ -846,6 +852,7 @@ struct c4iw_ep {
unsigned int retry_count;
int snd_win;
int rcv_win;
+ struct c4iw_ep_stats stats;
};
static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
index 3ef0cf9..cff815b 100644
--- a/drivers/infiniband/hw/cxgb4/mem.c
+++ b/drivers/infiniband/hw/cxgb4/mem.c
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
if (i == (num_wqe-1)) {
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
FW_WR_COMPL_F);
- req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
+ req->wr.wr_lo = (__force __be64)&wr_wait;
} else
req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
req->wr.wr_mid = cpu_to_be32(
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
mhp->attr.zbva = 0;
mhp->attr.va_fbo = 0;
mhp->attr.page_size = 0;
- mhp->attr.len = ~0UL;
+ mhp->attr.len = ~0ULL;
mhp->attr.pbl_size = 0;
ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
FW_RI_STAG_NSMR, mhp->attr.perms,
- mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+ mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
if (ret)
goto err1;
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index 15cae5a..389ced3 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
FW_RI_RES_WR_NRES_V(2) |
FW_WR_COMPL_F);
res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
- res_wr->cookie = (unsigned long) &wr_wait;
+ res_wr->cookie = (uintptr_t)&wr_wait;
res = res_wr->res;
res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
wqe->flowid_len16 = cpu_to_be32(
FW_WR_FLOWID_V(ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (unsigned long) &ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)&ep->com.wr_wait;
wqe->u.fini.type = FW_RI_TYPE_FINI;
ret = c4iw_ofld_send(&rhp->rdev, skb);
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
FW_WR_FLOWID_V(qhp->ep->hwtid) |
FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
- wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
+ wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
wqe->u.init.type = FW_RI_TYPE_INIT;
wqe->u.init.mpareqbit_p2ptype =
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
insert_mmap(ucontext, mm2);
mm3->key = uresp.sq_db_gts_key;
- mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
+ mm3->addr = (__force unsigned long)qhp->wq.sq.udb;
mm3->len = PAGE_SIZE;
insert_mmap(ucontext, mm3);
mm4->key = uresp.rq_db_gts_key;
- mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
+ mm4->addr = (__force unsigned long)qhp->wq.rq.udb;
mm4->len = PAGE_SIZE;
insert_mmap(ucontext, mm4);
if (mm5) {
diff --git a/drivers/infiniband/hw/cxgb4/t4.h b/drivers/infiniband/hw/cxgb4/t4.h
index 871cdca..7f2a6c2 100644
--- a/drivers/infiniband/hw/cxgb4/t4.h
+++ b/drivers/infiniband/hw/cxgb4/t4.h
@@ -539,6 +539,7 @@ struct t4_cq {
size_t memsize;
__be64 bits_type_ts;
u32 cqid;
+ u32 qid_mask;
int vector;
u16 size; /* including status page */
u16 cidx;
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
set_bit(CQ_ARMED, &cq->flags);
while (cq->cidx_inc > CIDXINC_M) {
val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
- INGRESSQID_V(cq->cqid);
+ INGRESSQID_V(cq->cqid & cq->qid_mask);
writel(val, cq->gts);
cq->cidx_inc -= CIDXINC_M;
}
val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
- INGRESSQID_V(cq->cqid);
+ INGRESSQID_V(cq->cqid & cq->qid_mask);
writel(val, cq->gts);
cq->cidx_inc = 0;
return 0;
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq)
u32 val;
val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
- INGRESSQID_V(cq->cqid);
+ INGRESSQID_V(cq->cqid & cq->qid_mask);
writel(val, cq->gts);
cq->cidx_inc = 0;
}
diff --git a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
index 5e53327..343e8daf 100644
--- a/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
+++ b/drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
@@ -848,6 +848,8 @@ enum { /* TCP congestion control algorithms */
#define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
#define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
-#define CONG_CNTRL_VALID (1 << 18)
+#define T5_ISS_S 18
+#define T5_ISS_V(x) ((x) << T5_ISS_S)
+#define T5_ISS_F T5_ISS_V(1U)
#endif /* _T4FW_RI_API_H_ */
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
index 3b2a6dc..9f9d5c5 100644
--- a/drivers/infiniband/hw/nes/nes.c
+++ b/drivers/infiniband/hw/nes/nes.c
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = {
[RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
[RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
[RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+ [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
[RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
[RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
[RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
index 6f09a72..72b4341 100644
--- a/drivers/infiniband/hw/nes/nes_cm.c
+++ b/drivers/infiniband/hw/nes/nes_cm.c
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic,
memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
}
+static void record_sockaddr_info(struct sockaddr_storage *addr_info,
+ nes_addr_t *ip_addr, u16 *port_num)
+{
+ struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
+
+ if (in_addr->sin_family == AF_INET) {
+ *ip_addr = ntohl(in_addr->sin_addr.s_addr);
+ *port_num = ntohs(in_addr->sin_port);
+ }
+}
+
/*
* nes_record_pm_msg - Save the received mapping info
*/
static void nes_record_pm_msg(struct nes_cm_info *cm_info,
struct iwpm_sa_data *pm_msg)
{
- struct sockaddr_in *mapped_loc_addr =
- (struct sockaddr_in *)&pm_msg->mapped_loc_addr;
- struct sockaddr_in *mapped_rem_addr =
- (struct sockaddr_in *)&pm_msg->mapped_rem_addr;
-
- if (mapped_loc_addr->sin_family == AF_INET) {
- cm_info->mapped_loc_addr =
- ntohl(mapped_loc_addr->sin_addr.s_addr);
- cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port);
- }
- if (mapped_rem_addr->sin_family == AF_INET) {
- cm_info->mapped_rem_addr =
- ntohl(mapped_rem_addr->sin_addr.s_addr);
- cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port);
- }
+ record_sockaddr_info(&pm_msg->mapped_loc_addr,
+ &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
+
+ record_sockaddr_info(&pm_msg->mapped_rem_addr,
+ &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
+}
+
+/*
+ * nes_get_reminfo - Get the address info of the remote connecting peer
+ */
+static int nes_get_remote_addr(struct nes_cm_node *cm_node)
+{
+ struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
+ struct sockaddr_storage remote_addr;
+ int ret;
+
+ nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
+ htons(cm_node->mapped_loc_port), &mapped_loc_addr);
+ nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
+ htons(cm_node->mapped_rem_port), &mapped_rem_addr);
+
+ ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
+ &remote_addr, RDMA_NL_NES);
+ if (ret)
+ nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
+ else
+ record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
+ &cm_node->rem_port);
+ return ret;
}
/**
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
return NULL;
/* set our node specific transport info */
- cm_node->loc_addr = cm_info->loc_addr;
+ if (listener) {
+ cm_node->loc_addr = listener->loc_addr;
+ cm_node->loc_port = listener->loc_port;
+ } else {
+ cm_node->loc_addr = cm_info->loc_addr;
+ cm_node->loc_port = cm_info->loc_port;
+ }
cm_node->rem_addr = cm_info->rem_addr;
- cm_node->loc_port = cm_info->loc_port;
cm_node->rem_port = cm_info->rem_port;
cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
cm_node->state = NES_CM_STATE_ESTABLISHED;
if (datasize) {
cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+ nes_get_remote_addr(cm_node);
handle_rcv_mpa(cm_node, skb);
} else { /* rcvd ACK only */
dev_kfree_skb_any(skb);
diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
index ffd48bf..ba5173e2 100644
--- a/drivers/infiniband/hw/qib/qib.h
+++ b/drivers/infiniband/hw/qib/qib.h
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit);
extern u32 qib_cpulist_count;
extern unsigned long *qib_cpulist;
-extern unsigned qib_wc_pat;
extern unsigned qib_cc_table_size;
int qib_init(struct qib_devdata *, int);
int init_chip_wc_pat(struct qib_devdata *dd, u32);
diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c
index 9ea6c44..7258818 100644
--- a/drivers/infiniband/hw/qib/qib_file_ops.c
+++ b/drivers/infiniband/hw/qib/qib_file_ops.c
@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
vma->vm_flags &= ~VM_MAYREAD;
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
- if (qib_wc_pat)
+ /* We used PAT if wc_cookie == 0 */
+ if (!dd->wc_cookie)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
diff --git a/drivers/infiniband/hw/qib/qib_iba6120.c b/drivers/infiniband/hw/qib/qib_iba6120.c
index 0d2ba59..4b927809 100644
--- a/drivers/infiniband/hw/qib/qib_iba6120.c
+++ b/drivers/infiniband/hw/qib/qib_iba6120.c
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd)
qib_6120_config_ctxts(dd);
qib_set_ctxtcnt(dd);
- if (qib_wc_pat) {
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- }
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
set_6120_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7220.c b/drivers/infiniband/hw/qib/qib_iba7220.c
index 22affda..00b2af2 100644
--- a/drivers/infiniband/hw/qib/qib_iba7220.c
+++ b/drivers/infiniband/hw/qib/qib_iba7220.c
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
qib_7220_config_ctxts(dd);
qib_set_ctxtcnt(dd); /* needed for PAT setup */
- if (qib_wc_pat) {
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
- }
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
set_7220_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_iba7322.c b/drivers/infiniband/hw/qib/qib_iba7322.c
index ef97b71..f32b462 100644
--- a/drivers/infiniband/hw/qib/qib_iba7322.c
+++ b/drivers/infiniband/hw/qib/qib_iba7322.c
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
unsigned features, pidx, sbufcnt;
int ret, mtu;
u32 sbufs, updthresh;
+ resource_size_t vl15off;
/* pport structs are contiguous, allocated after devdata */
ppd = (struct qib_pportdata *)(dd + 1);
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
qib_7322_config_ctxts(dd);
qib_set_ctxtcnt(dd);
- if (qib_wc_pat) {
- resource_size_t vl15off;
- /*
- * We do not set WC on the VL15 buffers to avoid
- * a rare problem with unaligned writes from
- * interrupt-flushed store buffers, so we need
- * to map those separately here. We can't solve
- * this for the rarely used mtrr case.
- */
- ret = init_chip_wc_pat(dd, 0);
- if (ret)
- goto bail;
+ /*
+ * We do not set WC on the VL15 buffers to avoid
+ * a rare problem with unaligned writes from
+ * interrupt-flushed store buffers, so we need
+ * to map those separately here. We can't solve
+ * this for the rarely used mtrr case.
+ */
+ ret = init_chip_wc_pat(dd, 0);
+ if (ret)
+ goto bail;
- /* vl15 buffers start just after the 4k buffers */
- vl15off = dd->physaddr + (dd->piobufbase >> 32) +
- dd->piobcnt4k * dd->align4k;
- dd->piovl15base = ioremap_nocache(vl15off,
- NUM_VL15_BUFS * dd->align4k);
- if (!dd->piovl15base) {
- ret = -ENOMEM;
- goto bail;
- }
+ /* vl15 buffers start just after the 4k buffers */
+ vl15off = dd->physaddr + (dd->piobufbase >> 32) +
+ dd->piobcnt4k * dd->align4k;
+ dd->piovl15base = ioremap_nocache(vl15off,
+ NUM_VL15_BUFS * dd->align4k);
+ if (!dd->piovl15base) {
+ ret = -ENOMEM;
+ goto bail;
}
+
qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
ret = 0;
diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c
index 2ee3695..7e00470 100644
--- a/drivers/infiniband/hw/qib/qib_init.c
+++ b/drivers/infiniband/hw/qib/qib_init.c
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
unsigned qib_cc_table_size;
module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-/*
- * qib_wc_pat parameter:
- * 0 is WC via MTRR
- * 1 is WC via PAT
- * If PAT initialization fails, code reverts back to MTRR
- */
-unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
-module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
-MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
static void verify_interrupt(unsigned long);
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
spin_unlock(&dd->pport[pidx].cc_shadow_lock);
}
- if (!qib_wc_pat)
- qib_disable_wc(dd);
+ qib_disable_wc(dd);
if (dd->pioavailregs_dma) {
dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto bail;
}
- if (!qib_wc_pat) {
- ret = qib_enable_wc(dd);
- if (ret) {
- qib_dev_err(dd,
- "Write combining not enabled (err %d): performance may be poor\n",
- -ret);
- ret = 0;
- }
+ ret = qib_enable_wc(dd);
+ if (ret) {
+ qib_dev_err(dd,
+ "Write combining not enabled (err %d): performance may be poor\n",
+ -ret);
+ ret = 0;
}
qib_verify_pioperf(dd);
diff --git a/drivers/infiniband/hw/qib/qib_wc_x86_64.c b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
index 81b225f..6d61ef9 100644
--- a/drivers/infiniband/hw/qib/qib_wc_x86_64.c
+++ b/drivers/infiniband/hw/qib/qib_wc_x86_64.c
@@ -116,21 +116,9 @@ int qib_enable_wc(struct qib_devdata *dd)
}
if (!ret) {
- int cookie;
-
- cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
- if (cookie < 0) {
- {
- qib_devinfo(dd->pcidev,
- "mtrr_add() WC for PIO bufs failed (%d)\n",
- cookie);
- ret = -EINVAL;
- }
- } else {
- dd->wc_cookie = cookie;
- dd->wc_base = (unsigned long) pioaddr;
- dd->wc_len = (unsigned long) piolen;
- }
+ dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
+ if (dd->wc_cookie < 0)
+ ret = -EINVAL;
}
return ret;
@@ -142,18 +130,7 @@ int qib_enable_wc(struct qib_devdata *dd)
*/
void qib_disable_wc(struct qib_devdata *dd)
{
- if (dd->wc_cookie) {
- int r;
-
- r = mtrr_del(dd->wc_cookie, dd->wc_base,
- dd->wc_len);
- if (r < 0)
- qib_devinfo(dd->pcidev,
- "mtrr_del(%lx, %lx, %lx) failed: %d\n",
- dd->wc_cookie, dd->wc_base,
- dd->wc_len, r);
- dd->wc_cookie = 0; /* even on failure */
- }
+ arch_phys_wc_del(dd->wc_cookie);
}
/**
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 56959ad..cf32a778 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
rx->rx_ring[i].mapping,
GFP_KERNEL)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
- ret = -ENOMEM;
- goto err_count;
+ ret = -ENOMEM;
+ goto err_count;
}
ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
if (ret) {
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7b315e3..01999d7 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -82,19 +82,6 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
#define NR_GIC_CPU_IF 8
static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
-/*
- * Supported arch specific GIC irq extension.
- * Default make them NULL.
- */
-struct irq_chip gic_arch_extn = {
- .irq_eoi = NULL,
- .irq_mask = NULL,
- .irq_unmask = NULL,
- .irq_retrigger = NULL,
- .irq_set_type = NULL,
- .irq_set_wake = NULL,
-};
-
#ifndef MAX_GIC_NR
#define MAX_GIC_NR 1
#endif
@@ -167,34 +154,16 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
static void gic_mask_irq(struct irq_data *d)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
- if (gic_arch_extn.irq_mask)
- gic_arch_extn.irq_mask(d);
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_unmask_irq(struct irq_data *d)
{
- unsigned long flags;
-
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
- if (gic_arch_extn.irq_unmask)
- gic_arch_extn.irq_unmask(d);
gic_poke_irq(d, GIC_DIST_ENABLE_SET);
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
}
static void gic_eoi_irq(struct irq_data *d)
{
- if (gic_arch_extn.irq_eoi) {
- raw_spin_lock(&irq_controller_lock);
- gic_arch_extn.irq_eoi(d);
- raw_spin_unlock(&irq_controller_lock);
- }
-
writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
}
@@ -251,8 +220,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
{
void __iomem *base = gic_dist_base(d);
unsigned int gicirq = gic_irq(d);
- unsigned long flags;
- int ret;
/* Interrupt configuration for SGIs can't be changed */
if (gicirq < 16)
@@ -263,25 +230,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
type != IRQ_TYPE_EDGE_RISING)
return -EINVAL;
- raw_spin_lock_irqsave(&irq_controller_lock, flags);
-
- if (gic_arch_extn.irq_set_type)
- gic_arch_extn.irq_set_type(d, type);
-
- ret = gic_configure_irq(gicirq, type, base, NULL);
-
- raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-
- return ret;
-}
-
-static int gic_retrigger(struct irq_data *d)
-{
- if (gic_arch_extn.irq_retrigger)
- return gic_arch_extn.irq_retrigger(d);
-
- /* the genirq layer expects 0 if we can't retrigger in hardware */
- return 0;
+ return gic_configure_irq(gicirq, type, base, NULL);
}
#ifdef CONFIG_SMP
@@ -312,21 +261,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
}
#endif
-#ifdef CONFIG_PM
-static int gic_set_wake(struct irq_data *d, unsigned int on)
-{
- int ret = -ENXIO;
-
- if (gic_arch_extn.irq_set_wake)
- ret = gic_arch_extn.irq_set_wake(d, on);
-
- return ret;
-}
-
-#else
-#define gic_set_wake NULL
-#endif
-
static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
{
u32 irqstat, irqnr;
@@ -385,11 +319,9 @@ static struct irq_chip gic_chip = {
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
- .irq_retrigger = gic_retrigger,
#ifdef CONFIG_SMP
.irq_set_affinity = gic_set_affinity,
#endif
- .irq_set_wake = gic_set_wake,
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
};
@@ -1055,7 +987,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
set_handle_irq(gic_handle_irq);
}
- gic_chip.flags |= gic_arch_extn.flags;
gic_dist_init(gic);
gic_cpu_init(gic);
gic_pm_init(gic);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 9eeea19..5503e43 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc,
switch (r) {
/* async */
- case -EINPROGRESS:
case -EBUSY:
wait_for_completion(&ctx->restart);
reinit_completion(&ctx->restart);
+ /* fall through*/
+ case -EINPROGRESS:
ctx->req = NULL;
ctx->cc_sector++;
continue;
@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
struct crypt_config *cc = io->cc;
- if (error == -EINPROGRESS)
+ if (error == -EINPROGRESS) {
+ complete(&ctx->restart);
return;
+ }
if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
if (!atomic_dec_and_test(&ctx->cc_pending))
- goto done;
+ return;
if (bio_data_dir(io->base_bio) == READ)
kcryptd_crypt_read_done(io);
else
kcryptd_crypt_write_io_submit(io, 1);
-done:
- if (!completion_done(&ctx->restart))
- complete(&ctx->restart);
}
static void kcryptd_crypt(struct work_struct *work)
diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
index c8a18e4..720ceeb 100644
--- a/drivers/md/dm-ioctl.c
+++ b/drivers/md/dm-ioctl.c
@@ -1298,21 +1298,22 @@ static int table_load(struct dm_ioctl *param, size_t param_size)
goto err_unlock_md_type;
}
- if (dm_get_md_type(md) == DM_TYPE_NONE)
+ if (dm_get_md_type(md) == DM_TYPE_NONE) {
/* Initial table load: acquire type of table. */
dm_set_md_type(md, dm_table_get_type(t));
- else if (dm_get_md_type(md) != dm_table_get_type(t)) {
+
+ /* setup md->queue to reflect md's type (may block) */
+ r = dm_setup_md_queue(md);
+ if (r) {
+ DMWARN("unable to set up device queue for new table.");
+ goto err_unlock_md_type;
+ }
+ } else if (dm_get_md_type(md) != dm_table_get_type(t)) {
DMWARN("can't change device type after initial table load.");
r = -EINVAL;
goto err_unlock_md_type;
}
- /* setup md->queue to reflect md's type (may block) */
- r = dm_setup_md_queue(md);
- if (r) {
- DMWARN("unable to set up device queue for new table.");
- goto err_unlock_md_type;
- }
dm_unlock_md_type(md);
/* stage inactive table */
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index f8c7ca3..a930b72 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1082,18 +1082,26 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
dm_put(md);
}
-static void free_rq_clone(struct request *clone)
+static void free_rq_clone(struct request *clone, bool must_be_mapped)
{
struct dm_rq_target_io *tio = clone->end_io_data;
struct mapped_device *md = tio->md;
+ WARN_ON_ONCE(must_be_mapped && !clone->q);
+
blk_rq_unprep_clone(clone);
- if (clone->q->mq_ops)
+ if (md->type == DM_TYPE_MQ_REQUEST_BASED)
+ /* stacked on blk-mq queue(s) */
tio->ti->type->release_clone_rq(clone);
else if (!md->queue->mq_ops)
/* request_fn queue stacked on request_fn queue(s) */
free_clone_request(md, clone);
+ /*
+ * NOTE: for the blk-mq queue stacked on request_fn queue(s) case:
+ * no need to call free_clone_request() because we leverage blk-mq by
+ * allocating the clone at the end of the blk-mq pdu (see: clone_rq)
+ */
if (!md->queue->mq_ops)
free_rq_tio(tio);
@@ -1124,7 +1132,7 @@ static void dm_end_request(struct request *clone, int error)
rq->sense_len = clone->sense_len;
}
- free_rq_clone(clone);
+ free_rq_clone(clone, true);
if (!rq->q->mq_ops)
blk_end_request_all(rq, error);
else
@@ -1143,7 +1151,7 @@ static void dm_unprep_request(struct request *rq)
}
if (clone)
- free_rq_clone(clone);
+ free_rq_clone(clone, false);
}
/*
@@ -2662,9 +2670,6 @@ static int dm_init_request_based_queue(struct mapped_device *md)
{
struct request_queue *q = NULL;
- if (md->queue->elevator)
- return 0;
-
/* Fully initialize the queue */
q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
if (!q)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d4f31e1..593a024 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -4818,12 +4818,12 @@ static void md_free(struct kobject *ko)
if (mddev->sysfs_state)
sysfs_put(mddev->sysfs_state);
+ if (mddev->queue)
+ blk_cleanup_queue(mddev->queue);
if (mddev->gendisk) {
del_gendisk(mddev->gendisk);
put_disk(mddev->gendisk);
}
- if (mddev->queue)
- blk_cleanup_queue(mddev->queue);
kfree(mddev);
}
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.c b/drivers/media/platform/marvell-ccic/mcam-core.c
index 9c64b5d..110fd70 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.c
+++ b/drivers/media/platform/marvell-ccic/mcam-core.c
@@ -116,8 +116,8 @@ static struct mcam_format_struct {
.planar = false,
},
{
- .desc = "UYVY 4:2:2",
- .pixelformat = V4L2_PIX_FMT_UYVY,
+ .desc = "YVYU 4:2:2",
+ .pixelformat = V4L2_PIX_FMT_YVYU,
.mbus_code = MEDIA_BUS_FMT_YUYV8_2X8,
.bpp = 2,
.planar = false,
@@ -748,7 +748,7 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
switch (fmt->pixelformat) {
case V4L2_PIX_FMT_YUYV:
- case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
widthy = fmt->width * 2;
widthuv = 0;
break;
@@ -784,15 +784,15 @@ static void mcam_ctlr_image(struct mcam_camera *cam)
case V4L2_PIX_FMT_YUV420:
case V4L2_PIX_FMT_YVU420:
mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV | C0_YUV_420PL | C0_YUVE_YVYU, C0_DF_MASK);
+ C0_DF_YUV | C0_YUV_420PL | C0_YUVE_VYUY, C0_DF_MASK);
break;
case V4L2_PIX_FMT_YUYV:
mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_UYVY, C0_DF_MASK);
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_NOSWAP, C0_DF_MASK);
break;
- case V4L2_PIX_FMT_UYVY:
+ case V4L2_PIX_FMT_YVYU:
mcam_reg_write_mask(cam, REG_CTRL0,
- C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_YUYV, C0_DF_MASK);
+ C0_DF_YUV | C0_YUV_PACKED | C0_YUVE_SWAP24, C0_DF_MASK);
break;
case V4L2_PIX_FMT_JPEG:
mcam_reg_write_mask(cam, REG_CTRL0,
diff --git a/drivers/media/platform/marvell-ccic/mcam-core.h b/drivers/media/platform/marvell-ccic/mcam-core.h
index aa0c6ea..7ffdf4d 100644
--- a/drivers/media/platform/marvell-ccic/mcam-core.h
+++ b/drivers/media/platform/marvell-ccic/mcam-core.h
@@ -330,10 +330,10 @@ int mccic_resume(struct mcam_camera *cam);
#define C0_YUVE_YVYU 0x00010000 /* Y1CrY0Cb */
#define C0_YUVE_VYUY 0x00020000 /* CrY1CbY0 */
#define C0_YUVE_UYVY 0x00030000 /* CbY1CrY0 */
-#define C0_YUVE_XYUV 0x00000000 /* 420: .YUV */
-#define C0_YUVE_XYVU 0x00010000 /* 420: .YVU */
-#define C0_YUVE_XUVY 0x00020000 /* 420: .UVY */
-#define C0_YUVE_XVUY 0x00030000 /* 420: .VUY */
+#define C0_YUVE_NOSWAP 0x00000000 /* no bytes swapping */
+#define C0_YUVE_SWAP13 0x00010000 /* swap byte 1 and 3 */
+#define C0_YUVE_SWAP24 0x00020000 /* swap byte 2 and 4 */
+#define C0_YUVE_SWAP1324 0x00030000 /* swap bytes 1&3 and 2&4 */
/* Bayer bits 18,19 if needed */
#define C0_EOF_VSYNC 0x00400000 /* Generate EOF by VSYNC */
#define C0_VEDGE_CTRL 0x00800000 /* Detect falling edge of VSYNC */
diff --git a/drivers/media/platform/soc_camera/rcar_vin.c b/drivers/media/platform/soc_camera/rcar_vin.c
index 9351f64..6460f8e 100644
--- a/drivers/media/platform/soc_camera/rcar_vin.c
+++ b/drivers/media/platform/soc_camera/rcar_vin.c
@@ -135,6 +135,8 @@
#define VIN_MAX_WIDTH 2048
#define VIN_MAX_HEIGHT 2048
+#define TIMEOUT_MS 100
+
enum chip_id {
RCAR_GEN2,
RCAR_H1,
@@ -820,7 +822,10 @@ static void rcar_vin_wait_stop_streaming(struct rcar_vin_priv *priv)
if (priv->state == STOPPING) {
priv->request_to_stop = true;
spin_unlock_irq(&priv->lock);
- wait_for_completion(&priv->capture_stop);
+ if (!wait_for_completion_timeout(
+ &priv->capture_stop,
+ msecs_to_jiffies(TIMEOUT_MS)))
+ priv->state = STOPPED;
spin_lock_irq(&priv->lock);
}
}
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 2c25271..60f7141 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
md->reset_done &= ~type;
}
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+ struct mmc_blk_data *md = mq->data;
+ /*
+ * If this is a RPMB partition access, return ture
+ */
+ if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+ return true;
+
+ return false;
+}
+
static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
{
struct mmc_blk_data *md = mq->data;
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 236d194..8efa368 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
return BLKPREP_KILL;
}
- if (mq && mmc_card_removed(mq->card))
+ if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
return BLKPREP_KILL;
req->cmd_flags |= REQ_DONTPREP;
diff --git a/drivers/mmc/card/queue.h b/drivers/mmc/card/queue.h
index 5752d50..99e6521 100644
--- a/drivers/mmc/card/queue.h
+++ b/drivers/mmc/card/queue.h
@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
extern void mmc_packed_clean(struct mmc_queue *);
+extern int mmc_access_rpmb(struct mmc_queue *);
+
#endif
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index c296bc0..92e7671 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -2651,6 +2651,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
switch (mode) {
case PM_HIBERNATION_PREPARE:
case PM_SUSPEND_PREPARE:
+ case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 38b2926..5f5adaf 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -589,9 +589,11 @@ static int dw_mci_idmac_init(struct dw_mci *host)
host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
/* Forward link the descriptor list */
- for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+ for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
p->des3 = cpu_to_le32(host->sg_dma +
(sizeof(struct idmac_desc) * (i + 1)));
+ p->des1 = 0;
+ }
/* Set the last descriptor as the end-of-ring descriptor */
p->des3 = cpu_to_le32(host->sg_dma);
@@ -1300,7 +1302,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
int gpio_cd = mmc_gpio_get_cd(mmc);
/* Use platform get_cd function, else try onboard card detect */
- if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+ if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+ (mmc->caps & MMC_CAP_NONREMOVABLE))
present = 1;
else if (!IS_ERR_VALUE(gpio_cd))
present = gpio_cd;
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 2b6ef6b..7eff087 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -1408,7 +1408,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
host = mmc_priv(mmc);
host->mmc = mmc;
host->addr = reg;
- host->timeout = msecs_to_jiffies(1000);
+ host->timeout = msecs_to_jiffies(10000);
host->ccs_enable = !pd || !pd->ccs_unsupported;
host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 78dde56..d5fe5d5 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -82,6 +82,8 @@
#include <net/bond_3ad.h>
#include <net/bond_alb.h>
+#include "bonding_priv.h"
+
/*---------------------------- Module parameters ----------------------------*/
/* monitor all links that often (in milliseconds). <=0 disables monitoring */
@@ -4542,6 +4544,8 @@ unsigned int bond_get_num_tx_queues(void)
int bond_create(struct net *net, const char *name)
{
struct net_device *bond_dev;
+ struct bonding *bond;
+ struct alb_bond_info *bond_info;
int res;
rtnl_lock();
@@ -4555,6 +4559,14 @@ int bond_create(struct net *net, const char *name)
return -ENOMEM;
}
+ /*
+ * Initialize rx_hashtbl_used_head to RLB_NULL_INDEX.
+ * It is set to 0 by default which is wrong.
+ */
+ bond = netdev_priv(bond_dev);
+ bond_info = &(BOND_ALB_INFO(bond));
+ bond_info->rx_hashtbl_used_head = RLB_NULL_INDEX;
+
dev_net_set(bond_dev, net);
bond_dev->rtnl_link_ops = &bond_link_ops;
diff --git a/drivers/net/bonding/bond_procfs.c b/drivers/net/bonding/bond_procfs.c
index 62694cf..b20b35ac 100644
--- a/drivers/net/bonding/bond_procfs.c
+++ b/drivers/net/bonding/bond_procfs.c
@@ -4,6 +4,7 @@
#include <net/netns/generic.h>
#include <net/bonding.h>
+#include "bonding_priv.h"
static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
diff --git a/drivers/net/bonding/bonding_priv.h b/drivers/net/bonding/bonding_priv.h
new file mode 100644
index 0000000..5a4d81a
--- /dev/null
+++ b/drivers/net/bonding/bonding_priv.h
@@ -0,0 +1,25 @@
+/*
+ * Bond several ethernet interfaces into a Cisco, running 'Etherchannel'.
+ *
+ * Portions are (c) Copyright 1995 Simon "Guru Aleph-Null" Janes
+ * NCM: Network and Communications Management, Inc.
+ *
+ * BUT, I'm the one who modified it for ethernet, so:
+ * (c) Copyright 1999, Thomas Davis, tadavis@lbl.gov
+ *
+ * This software may be used and distributed according to the terms
+ * of the GNU Public License, incorporated herein by reference.
+ *
+ */
+
+#ifndef _BONDING_PRIV_H
+#define _BONDING_PRIV_H
+
+#define DRV_VERSION "3.7.1"
+#define DRV_RELDATE "April 27, 2011"
+#define DRV_NAME "bonding"
+#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
+
+#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
+
+#endif
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 58808f6..e8c96b8 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -112,7 +112,7 @@ config PCH_CAN
config CAN_GRCAN
tristate "Aeroflex Gaisler GRCAN and GRHCAN CAN devices"
- depends on OF
+ depends on OF && HAS_DMA
---help---
Say Y here if you want to use Aeroflex Gaisler GRCAN or GRHCAN.
Note that the driver supports little endian, even though little
diff --git a/drivers/net/can/usb/kvaser_usb.c b/drivers/net/can/usb/kvaser_usb.c
index 4643914..8b17a90 100644
--- a/drivers/net/can/usb/kvaser_usb.c
+++ b/drivers/net/can/usb/kvaser_usb.c
@@ -1102,7 +1102,7 @@ static void kvaser_usb_rx_can_err(const struct kvaser_usb_net_priv *priv,
if (msg->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME |
MSG_FLAG_NERR)) {
- netdev_err(priv->netdev, "Unknow error (flags: 0x%02x)\n",
+ netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n",
msg->u.rx_can_header.flag);
stats->rx_errors++;
diff --git a/drivers/net/ethernet/8390/etherh.c b/drivers/net/ethernet/8390/etherh.c
index b36ee9e..d686b9c 100644
--- a/drivers/net/ethernet/8390/etherh.c
+++ b/drivers/net/ethernet/8390/etherh.c
@@ -523,7 +523,7 @@ static int etherh_addr(char *addr, struct expansion_card *ec)
char *s;
if (!ecard_readchunk(&cd, ec, 0xf5, 0)) {
- printk(KERN_ERR "%s: unable to read podule description string\n",
+ printk(KERN_ERR "%s: unable to read module description string\n",
dev_name(&ec->dev));
goto no_addr;
}
diff --git a/drivers/net/ethernet/altera/altera_msgdmahw.h b/drivers/net/ethernet/altera/altera_msgdmahw.h
index eba070f..89cd11d 100644
--- a/drivers/net/ethernet/altera/altera_msgdmahw.h
+++ b/drivers/net/ethernet/altera/altera_msgdmahw.h
@@ -58,15 +58,12 @@ struct msgdma_extended_desc {
/* Tx buffer control flags
*/
#define MSGDMA_DESC_CTL_TX_FIRST (MSGDMA_DESC_CTL_GEN_SOP | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
-#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_TR_ERR_IRQ | \
- MSGDMA_DESC_CTL_GO)
+#define MSGDMA_DESC_CTL_TX_MIDDLE (MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_TX_LAST (MSGDMA_DESC_CTL_GEN_EOP | \
MSGDMA_DESC_CTL_TR_COMP_IRQ | \
- MSGDMA_DESC_CTL_TR_ERR_IRQ | \
MSGDMA_DESC_CTL_GO)
#define MSGDMA_DESC_CTL_TX_SINGLE (MSGDMA_DESC_CTL_GEN_SOP | \
diff --git a/drivers/net/ethernet/altera/altera_tse_main.c b/drivers/net/ethernet/altera/altera_tse_main.c
index 90a7630..da48e66 100644
--- a/drivers/net/ethernet/altera/altera_tse_main.c
+++ b/drivers/net/ethernet/altera/altera_tse_main.c
@@ -391,6 +391,12 @@ static int tse_rx(struct altera_tse_private *priv, int limit)
"RCV pktstatus %08X pktlength %08X\n",
pktstatus, pktlength);
+ /* DMA trasfer from TSE starts with 2 aditional bytes for
+ * IP payload alignment. Status returned by get_rx_status()
+ * contains DMA transfer length. Packet is 2 bytes shorter.
+ */
+ pktlength -= 2;
+
count++;
next_entry = (++priv->rx_cons) % priv->rx_ring_size;
@@ -777,6 +783,8 @@ static int init_phy(struct net_device *dev)
struct altera_tse_private *priv = netdev_priv(dev);
struct phy_device *phydev;
struct device_node *phynode;
+ bool fixed_link = false;
+ int rc = 0;
/* Avoid init phy in case of no phy present */
if (!priv->phy_iface)
@@ -789,13 +797,32 @@ static int init_phy(struct net_device *dev)
phynode = of_parse_phandle(priv->device->of_node, "phy-handle", 0);
if (!phynode) {
- netdev_dbg(dev, "no phy-handle found\n");
- if (!priv->mdio) {
- netdev_err(dev,
- "No phy-handle nor local mdio specified\n");
- return -ENODEV;
+ /* check if a fixed-link is defined in device-tree */
+ if (of_phy_is_fixed_link(priv->device->of_node)) {
+ rc = of_phy_register_fixed_link(priv->device->of_node);
+ if (rc < 0) {
+ netdev_err(dev, "cannot register fixed PHY\n");
+ return rc;
+ }
+
+ /* In the case of a fixed PHY, the DT node associated
+ * to the PHY is the Ethernet MAC DT node.
+ */
+ phynode = of_node_get(priv->device->of_node);
+ fixed_link = true;
+
+ netdev_dbg(dev, "fixed-link detected\n");
+ phydev = of_phy_connect(dev, phynode,
+ &altera_tse_adjust_link,
+ 0, priv->phy_iface);
+ } else {
+ netdev_dbg(dev, "no phy-handle found\n");
+ if (!priv->mdio) {
+ netdev_err(dev, "No phy-handle nor local mdio specified\n");
+ return -ENODEV;
+ }
+ phydev = connect_local_phy(dev);
}
- phydev = connect_local_phy(dev);
} else {
netdev_dbg(dev, "phy-handle found\n");
phydev = of_phy_connect(dev, phynode,
@@ -819,10 +846,10 @@ static int init_phy(struct net_device *dev)
/* Broken HW is sometimes missing the pull-up resistor on the
* MDIO line, which results in reads to non-existent devices returning
* 0 rather than 0xffff. Catch this here and treat 0 as a non-existent
- * device as well.
+ * device as well. If a fixed-link is used the phy_id is always 0.
* Note: phydev->phy_id is the result of reading the UID PHY registers.
*/
- if (phydev->phy_id == 0) {
+ if ((phydev->phy_id == 0) && !fixed_link) {
netdev_err(dev, "Bad PHY UID 0x%08x\n", phydev->phy_id);
phy_disconnect(phydev);
return -ENODEV;
diff --git a/drivers/net/ethernet/amd/Kconfig b/drivers/net/ethernet/amd/Kconfig
index c638c85..089c269 100644
--- a/drivers/net/ethernet/amd/Kconfig
+++ b/drivers/net/ethernet/amd/Kconfig
@@ -179,7 +179,7 @@ config SUNLANCE
config AMD_XGBE
tristate "AMD 10GbE Ethernet driver"
- depends on (OF_NET || ACPI) && HAS_IOMEM
+ depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
select PHYLIB
select AMD_XGBE_PHY
select BITREVERSE
diff --git a/drivers/net/ethernet/arc/Kconfig b/drivers/net/ethernet/arc/Kconfig
index 8e262e2..dea29ee 100644
--- a/drivers/net/ethernet/arc/Kconfig
+++ b/drivers/net/ethernet/arc/Kconfig
@@ -25,8 +25,7 @@ config ARC_EMAC_CORE
config ARC_EMAC
tristate "ARC EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ
- depends on OF_NET
+ depends on OF_IRQ && OF_NET && HAS_DMA
---help---
On some legacy ARC (Synopsys) FPGA boards such as ARCAngel4/ML50x
non-standard on-chip ethernet device ARC EMAC 10/100 is used.
@@ -35,7 +34,7 @@ config ARC_EMAC
config EMAC_ROCKCHIP
tristate "Rockchip EMAC support"
select ARC_EMAC_CORE
- depends on OF_IRQ && OF_NET && REGULATOR
+ depends on OF_IRQ && OF_NET && REGULATOR && HAS_DMA
---help---
Support for Rockchip RK3066/RK3188 EMAC ethernet controllers.
This selects Rockchip SoC glue layer support for the
diff --git a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
index 74df16a..88a6271 100644
--- a/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
+++ b/drivers/net/ethernet/atheros/atl1e/atl1e_hw.h
@@ -129,7 +129,7 @@ s32 atl1e_restart_autoneg(struct atl1e_hw *hw);
#define TWSI_CTRL_LD_SLV_ADDR_SHIFT 8
#define TWSI_CTRL_SW_LDSTART 0x800
#define TWSI_CTRL_HW_LDSTART 0x1000
-#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x0x7F
+#define TWSI_CTRL_SMB_SLV_ADDR_MASK 0x7F
#define TWSI_CTRL_SMB_SLV_ADDR_SHIFT 15
#define TWSI_CTRL_LD_EXIST 0x400000
#define TWSI_CTRL_READ_FREQ_SEL_MASK 0x3
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.h b/drivers/net/ethernet/broadcom/bcmsysport.h
index 7e3d87a..e2c043e 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.h
+++ b/drivers/net/ethernet/broadcom/bcmsysport.h
@@ -543,7 +543,7 @@ struct bcm_sysport_tx_counters {
u32 jbr; /* RO # of xmited jabber count*/
u32 bytes; /* RO # of xmited byte count */
u32 pok; /* RO # of xmited good pkt */
- u32 uc; /* RO (0x0x4f0)# of xmited unitcast pkt */
+ u32 uc; /* RO (0x4f0) # of xmited unicast pkt */
};
struct bcm_sysport_mib {
diff --git a/drivers/net/ethernet/broadcom/bgmac.c b/drivers/net/ethernet/broadcom/bgmac.c
index de77d3a..21e3c38 100644
--- a/drivers/net/ethernet/broadcom/bgmac.c
+++ b/drivers/net/ethernet/broadcom/bgmac.c
@@ -1260,7 +1260,7 @@ static int bgmac_poll(struct napi_struct *napi, int weight)
/* Poll again if more events arrived in the meantime */
if (bgmac_read(bgmac, BGMAC_INT_STATUS) & (BGMAC_IS_TX0 | BGMAC_IS_RX))
- return handled;
+ return weight;
if (handled < weight) {
napi_complete(napi);
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index 355d5fe..a3b0f7a 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -521,6 +521,7 @@ struct bnx2x_fp_txdata {
};
enum bnx2x_tpa_mode_t {
+ TPA_MODE_DISABLED,
TPA_MODE_LRO,
TPA_MODE_GRO
};
@@ -589,7 +590,6 @@ struct bnx2x_fastpath {
/* TPA related */
struct bnx2x_agg_info *tpa_info;
- u8 disable_tpa;
#ifdef BNX2X_STOP_ON_ERROR
u64 tpa_queue_used;
#endif
@@ -1545,9 +1545,7 @@ struct bnx2x {
#define USING_MSIX_FLAG (1 << 5)
#define USING_MSI_FLAG (1 << 6)
#define DISABLE_MSI_FLAG (1 << 7)
-#define TPA_ENABLE_FLAG (1 << 8)
#define NO_MCP_FLAG (1 << 9)
-#define GRO_ENABLE_FLAG (1 << 10)
#define MF_FUNC_DIS (1 << 11)
#define OWN_CNIC_IRQ (1 << 12)
#define NO_ISCSI_OOO_FLAG (1 << 13)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 2f63467..a8bb8f6 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -947,10 +947,10 @@ static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
u16 frag_size, pages;
#ifdef BNX2X_STOP_ON_ERROR
/* sanity check */
- if (fp->disable_tpa &&
+ if (fp->mode == TPA_MODE_DISABLED &&
(CQE_TYPE_START(cqe_fp_type) ||
CQE_TYPE_STOP(cqe_fp_type)))
- BNX2X_ERR("START/STOP packet while disable_tpa type %x\n",
+ BNX2X_ERR("START/STOP packet while TPA disabled, type %x\n",
CQE_TYPE(cqe_fp_type));
#endif
@@ -1396,7 +1396,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
DP(NETIF_MSG_IFUP,
"mtu %d rx_buf_size %d\n", bp->dev->mtu, fp->rx_buf_size);
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
/* Fill the per-aggregation pool */
for (i = 0; i < MAX_AGG_QS(bp); i++) {
struct bnx2x_agg_info *tpa_info =
@@ -1410,7 +1410,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
BNX2X_ERR("Failed to allocate TPA skb pool for queue[%d] - disabling TPA on this queue!\n",
j);
bnx2x_free_tpa_pool(bp, fp, i);
- fp->disable_tpa = 1;
+ fp->mode = TPA_MODE_DISABLED;
break;
}
dma_unmap_addr_set(first_buf, mapping, 0);
@@ -1438,7 +1438,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
ring_prod);
bnx2x_free_tpa_pool(bp, fp,
MAX_AGG_QS(bp));
- fp->disable_tpa = 1;
+ fp->mode = TPA_MODE_DISABLED;
ring_prod = 0;
break;
}
@@ -1560,7 +1560,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
bnx2x_free_rx_bds(fp);
- if (!fp->disable_tpa)
+ if (fp->mode != TPA_MODE_DISABLED)
bnx2x_free_tpa_pool(bp, fp, MAX_AGG_QS(bp));
}
}
@@ -2477,17 +2477,19 @@ static void bnx2x_bz_fp(struct bnx2x *bp, int index)
/* set the tpa flag for each queue. The tpa flag determines the queue
* minimal size so it must be set prior to queue memory allocation
*/
- fp->disable_tpa = !(bp->flags & TPA_ENABLE_FLAG ||
- (bp->flags & GRO_ENABLE_FLAG &&
- bnx2x_mtu_allows_gro(bp->dev->mtu)));
- if (bp->flags & TPA_ENABLE_FLAG)
+ if (bp->dev->features & NETIF_F_LRO)
fp->mode = TPA_MODE_LRO;
- else if (bp->flags & GRO_ENABLE_FLAG)
+ else if (bp->dev->features & NETIF_F_GRO &&
+ bnx2x_mtu_allows_gro(bp->dev->mtu))
fp->mode = TPA_MODE_GRO;
+ else
+ fp->mode = TPA_MODE_DISABLED;
- /* We don't want TPA on an FCoE L2 ring */
- if (IS_FCOE_FP(fp))
- fp->disable_tpa = 1;
+ /* We don't want TPA if it's disabled in bp
+ * or if this is an FCoE L2 ring.
+ */
+ if (bp->disable_tpa || IS_FCOE_FP(fp))
+ fp->mode = TPA_MODE_DISABLED;
}
int bnx2x_load_cnic(struct bnx2x *bp)
@@ -2608,7 +2610,7 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
/*
* Zero fastpath structures preserving invariants like napi, which are
* allocated only once, fp index, max_cos, bp pointer.
- * Also set fp->disable_tpa and txdata_ptr.
+ * Also set fp->mode and txdata_ptr.
*/
DP(NETIF_MSG_IFUP, "num queues: %d", bp->num_queues);
for_each_queue(bp, i)
@@ -3247,7 +3249,7 @@ int bnx2x_low_latency_recv(struct napi_struct *napi)
if ((bp->state == BNX2X_STATE_CLOSED) ||
(bp->state == BNX2X_STATE_ERROR) ||
- (bp->flags & (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG)))
+ (bp->dev->features & (NETIF_F_LRO | NETIF_F_GRO)))
return LL_FLUSH_FAILED;
if (!bnx2x_fp_lock_poll(fp))
@@ -4543,7 +4545,7 @@ alloc_mem_err:
* In these cases we disable the queue
* Min size is different for OOO, TPA and non-TPA queues
*/
- if (ring_size < (fp->disable_tpa ?
+ if (ring_size < (fp->mode == TPA_MODE_DISABLED ?
MIN_RX_SIZE_NONTPA : MIN_RX_SIZE_TPA)) {
/* release memory allocated for this queue */
bnx2x_free_fp_mem_at(bp, index);
@@ -4809,66 +4811,71 @@ netdev_features_t bnx2x_fix_features(struct net_device *dev,
{
struct bnx2x *bp = netdev_priv(dev);
+ if (pci_num_vf(bp->pdev)) {
+ netdev_features_t changed = dev->features ^ features;
+
+ /* Revert the requested changes in features if they
+ * would require internal reload of PF in bnx2x_set_features().
+ */
+ if (!(features & NETIF_F_RXCSUM) && !bp->disable_tpa) {
+ features &= ~NETIF_F_RXCSUM;
+ features |= dev->features & NETIF_F_RXCSUM;
+ }
+
+ if (changed & NETIF_F_LOOPBACK) {
+ features &= ~NETIF_F_LOOPBACK;
+ features |= dev->features & NETIF_F_LOOPBACK;
+ }
+ }
+
/* TPA requires Rx CSUM offloading */
if (!(features & NETIF_F_RXCSUM)) {
features &= ~NETIF_F_LRO;
features &= ~NETIF_F_GRO;
}
- /* Note: do not disable SW GRO in kernel when HW GRO is off */
- if (bp->disable_tpa)
- features &= ~NETIF_F_LRO;
-
return features;
}
int bnx2x_set_features(struct net_device *dev, netdev_features_t features)
{
struct bnx2x *bp = netdev_priv(dev);
- u32 flags = bp->flags;
- u32 changes;
+ netdev_features_t changes = features ^ dev->features;
bool bnx2x_reload = false;
+ int rc;
- if (features & NETIF_F_LRO)
- flags |= TPA_ENABLE_FLAG;
- else
- flags &= ~TPA_ENABLE_FLAG;
-
- if (features & NETIF_F_GRO)
- flags |= GRO_ENABLE_FLAG;
- else
- flags &= ~GRO_ENABLE_FLAG;
-
- if (features & NETIF_F_LOOPBACK) {
- if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
- bp->link_params.loopback_mode = LOOPBACK_BMAC;
- bnx2x_reload = true;
- }
- } else {
- if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
- bp->link_params.loopback_mode = LOOPBACK_NONE;
- bnx2x_reload = true;
+ /* VFs or non SRIOV PFs should be able to change loopback feature */
+ if (!pci_num_vf(bp->pdev)) {
+ if (features & NETIF_F_LOOPBACK) {
+ if (bp->link_params.loopback_mode != LOOPBACK_BMAC) {
+ bp->link_params.loopback_mode = LOOPBACK_BMAC;
+ bnx2x_reload = true;
+ }
+ } else {
+ if (bp->link_params.loopback_mode != LOOPBACK_NONE) {
+ bp->link_params.loopback_mode = LOOPBACK_NONE;
+ bnx2x_reload = true;
+ }
}
}
- changes = flags ^ bp->flags;
-
/* if GRO is changed while LRO is enabled, don't force a reload */
- if ((changes & GRO_ENABLE_FLAG) && (flags & TPA_ENABLE_FLAG))
- changes &= ~GRO_ENABLE_FLAG;
+ if ((changes & NETIF_F_GRO) && (features & NETIF_F_LRO))
+ changes &= ~NETIF_F_GRO;
/* if GRO is changed while HW TPA is off, don't force a reload */
- if ((changes & GRO_ENABLE_FLAG) && bp->disable_tpa)
- changes &= ~GRO_ENABLE_FLAG;
+ if ((changes & NETIF_F_GRO) && bp->disable_tpa)
+ changes &= ~NETIF_F_GRO;
if (changes)
bnx2x_reload = true;
- bp->flags = flags;
-
if (bnx2x_reload) {
- if (bp->recovery_state == BNX2X_RECOVERY_DONE)
- return bnx2x_reload_if_running(dev);
+ if (bp->recovery_state == BNX2X_RECOVERY_DONE) {
+ dev->features = features;
+ rc = bnx2x_reload_if_running(dev);
+ return rc ? rc : 1;
+ }
/* else: bnx2x_nic_load() will be called at end of recovery */
}
@@ -4931,6 +4938,11 @@ int bnx2x_resume(struct pci_dev *pdev)
}
bp = netdev_priv(dev);
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
+ return -EPERM;
+ }
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
BNX2X_ERR("Handling parity error recovery. Try again later\n");
return -EAGAIN;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
index adcacda..d7a7175 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
@@ -969,7 +969,7 @@ static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
{
int i;
- if (fp->disable_tpa)
+ if (fp->mode == TPA_MODE_DISABLED)
return;
for (i = 0; i < last; i++)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index e3d853c..48ed005 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1843,6 +1843,12 @@ static int bnx2x_set_ringparam(struct net_device *dev,
"set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
ering->rx_pending, ering->tx_pending);
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "VFs are enabled, can not change ring parameters\n");
+ return -EPERM;
+ }
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
DP(BNX2X_MSG_ETHTOOL,
"Handling parity error recovery. Try again later\n");
@@ -2899,6 +2905,12 @@ static void bnx2x_self_test(struct net_device *dev,
u8 is_serdes, link_up;
int rc, cnt = 0;
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV,
+ "VFs are enabled, can not perform self test\n");
+ return;
+ }
+
if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
netdev_err(bp->dev,
"Handling parity error recovery. Try again later\n");
@@ -3468,6 +3480,11 @@ static int bnx2x_set_channels(struct net_device *dev,
channels->rx_count, channels->tx_count, channels->other_count,
channels->combined_count);
+ if (pci_num_vf(bp->pdev)) {
+ DP(BNX2X_MSG_IOV, "VFs are enabled, can not set channels\n");
+ return -EPERM;
+ }
+
/* We don't support separate rx / tx channels.
* We don't allow setting 'other' channels.
*/
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index b9f85fcc..556dcc1 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -3128,7 +3128,7 @@ static unsigned long bnx2x_get_q_flags(struct bnx2x *bp,
__set_bit(BNX2X_Q_FLG_FORCE_DEFAULT_PRI, &flags);
}
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
__set_bit(BNX2X_Q_FLG_TPA, &flags);
__set_bit(BNX2X_Q_FLG_TPA_IPV6, &flags);
if (fp->mode == TPA_MODE_GRO)
@@ -3176,7 +3176,7 @@ static void bnx2x_pf_rx_q_prep(struct bnx2x *bp,
u16 sge_sz = 0;
u16 tpa_agg_size = 0;
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
pause->sge_th_lo = SGE_TH_LO(bp);
pause->sge_th_hi = SGE_TH_HI(bp);
@@ -3304,7 +3304,7 @@ static void bnx2x_pf_init(struct bnx2x *bp)
/* This flag is relevant for E1x only.
* E2 doesn't have a TPA configuration in a function level.
*/
- flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
+ flags |= (bp->dev->features & NETIF_F_LRO) ? FUNC_FLG_TPA : 0;
func_init.func_flgs = flags;
func_init.pf_id = BP_FUNC(bp);
@@ -12107,11 +12107,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
/* Set TPA flags */
if (bp->disable_tpa) {
- bp->flags &= ~(TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
+ bp->dev->hw_features &= ~NETIF_F_LRO;
bp->dev->features &= ~NETIF_F_LRO;
- } else {
- bp->flags |= (TPA_ENABLE_FLAG | GRO_ENABLE_FLAG);
- bp->dev->features |= NETIF_F_LRO;
}
if (CHIP_IS_E1(bp))
@@ -13371,6 +13368,12 @@ static int bnx2x_init_one(struct pci_dev *pdev,
bool is_vf;
int cnic_cnt;
+ /* Management FW 'remembers' living interfaces. Allow it some time
+ * to forget previously living interfaces, allowing a proper re-load.
+ */
+ if (is_kdump_kernel())
+ msleep(5000);
+
/* An estimated maximum supported CoS number according to the chip
* version.
* We will try to roughly estimate the maximum number of CoSes this chip
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
index 15b2d16..06b8c0d 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
@@ -594,7 +594,7 @@ int bnx2x_vfpf_setup_q(struct bnx2x *bp, struct bnx2x_fastpath *fp,
bnx2x_vfpf_prep(bp, &req->first_tlv, CHANNEL_TLV_SETUP_Q, sizeof(*req));
/* select tpa mode to request */
- if (!fp->disable_tpa) {
+ if (fp->mode != TPA_MODE_DISABLED) {
flags |= VFPF_QUEUE_FLG_TPA;
flags |= VFPF_QUEUE_FLG_TPA_IPV6;
if (fp->mode == TPA_MODE_GRO)
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 1270b18..069952f 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -18129,7 +18129,9 @@ static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
rtnl_lock();
- tp->pcierr_recovery = true;
+ /* We needn't recover from permanent error */
+ if (state == pci_channel_io_frozen)
+ tp->pcierr_recovery = true;
/* We probably don't have netdev yet */
if (!netdev || !netif_running(netdev))
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index 9f53872..4104d49 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -707,6 +707,9 @@ static void gem_rx_refill(struct macb *bp)
/* properly align Ethernet header */
skb_reserve(skb, NET_IP_ALIGN);
+ } else {
+ bp->rx_ring[entry].addr &= ~MACB_BIT(RX_USED);
+ bp->rx_ring[entry].ctrl = 0;
}
}
@@ -1473,9 +1476,9 @@ static void macb_init_rings(struct macb *bp)
for (i = 0; i < TX_RING_SIZE; i++) {
bp->queues[0].tx_ring[i].addr = 0;
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
- bp->queues[0].tx_head = 0;
- bp->queues[0].tx_tail = 0;
}
+ bp->queues[0].tx_head = 0;
+ bp->queues[0].tx_tail = 0;
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
bp->rx_tail = 0;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index 5959e3a..e8578a7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -492,7 +492,7 @@ int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
memoffset = (mtype * (edc_size * 1024 * 1024));
else {
mc_size = EXT_MEM0_SIZE_G(t4_read_reg(adap,
- MA_EXT_MEMORY1_BAR_A));
+ MA_EXT_MEMORY0_BAR_A));
memoffset = (MEM_MC0 * edc_size + mc_size) * 1024 * 1024;
}
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index fb0bc3c..a6dcbf8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4846,7 +4846,8 @@ err:
}
static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask)
+ struct net_device *dev, u32 filter_mask,
+ int nlflags)
{
struct be_adapter *adapter = netdev_priv(dev);
int status = 0;
@@ -4868,7 +4869,7 @@ static int be_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
hsw_mode == PORT_FWD_TYPE_VEPA ?
BRIDGE_MODE_VEPA : BRIDGE_MODE_VEB,
- 0, 0);
+ 0, 0, nlflags);
}
#ifdef CONFIG_BE2NET_VXLAN
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index f6a3a7a..66d47e4 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -988,7 +988,10 @@ fec_restart(struct net_device *ndev)
rcntl |= 0x40000000 | 0x00000020;
/* RGMII, RMII or MII */
- if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII)
+ if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+ fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
rcntl |= (1 << 6);
else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
rcntl |= (1 << 8);
diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c
index 291c870..2a0dc12 100644
--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c
+++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c
@@ -3347,7 +3347,7 @@ static int ehea_register_memory_hooks(void)
{
int ret = 0;
- if (atomic_inc_and_test(&ehea_memory_hooks_registered))
+ if (atomic_inc_return(&ehea_memory_hooks_registered) > 1)
return 0;
ret = ehea_create_busmap();
@@ -3381,12 +3381,14 @@ out3:
out2:
unregister_reboot_notifier(&ehea_reboot_nb);
out:
+ atomic_dec(&ehea_memory_hooks_registered);
return ret;
}
static void ehea_unregister_memory_hooks(void)
{
- if (atomic_read(&ehea_memory_hooks_registered))
+ /* Only remove the hooks if we've registered them */
+ if (atomic_read(&ehea_memory_hooks_registered) == 0)
return;
unregister_reboot_notifier(&ehea_reboot_nb);
diff --git a/drivers/net/ethernet/ibm/ibmveth.c b/drivers/net/ethernet/ibm/ibmveth.c
index cd7675a..1813476 100644
--- a/drivers/net/ethernet/ibm/ibmveth.c
+++ b/drivers/net/ethernet/ibm/ibmveth.c
@@ -1238,7 +1238,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
return -EINVAL;
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
- if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size)
+ if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size)
break;
if (i == IBMVETH_NUM_BUFF_POOLS)
@@ -1257,7 +1257,7 @@ static int ibmveth_change_mtu(struct net_device *dev, int new_mtu)
for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
adapter->rx_buff_pool[i].active = 1;
- if (new_mtu_oh < adapter->rx_buff_pool[i].buff_size) {
+ if (new_mtu_oh <= adapter->rx_buff_pool[i].buff_size) {
dev->mtu = new_mtu;
vio_cmo_set_dev_desired(viodev,
ibmveth_get_desired_dma
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 24481cd..a54c144 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -8053,10 +8053,10 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
#ifdef HAVE_BRIDGE_FILTER
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 __always_unused filter_mask)
+ u32 __always_unused filter_mask, int nlflags)
#else
static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev)
+ struct net_device *dev, int nlflags)
#endif /* HAVE_BRIDGE_FILTER */
{
struct i40e_netdev_priv *np = netdev_priv(dev);
@@ -8078,7 +8078,8 @@ static int i40e_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
if (!veb)
return 0;
- return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode);
+ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, veb->bridge_mode,
+ nlflags);
}
#endif /* HAVE_BRIDGE_ATTRIBS */
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index d3f4b0c..5be12a0 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8044,7 +8044,7 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask)
+ u32 filter_mask, int nlflags)
{
struct ixgbe_adapter *adapter = netdev_priv(dev);
@@ -8052,7 +8052,7 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
return 0;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev,
- adapter->bridge_mode, 0, 0);
+ adapter->bridge_mode, 0, 0, nlflags);
}
static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev)
diff --git a/drivers/net/ethernet/marvell/pxa168_eth.c b/drivers/net/ethernet/marvell/pxa168_eth.c
index af829c5..7ace07d 100644
--- a/drivers/net/ethernet/marvell/pxa168_eth.c
+++ b/drivers/net/ethernet/marvell/pxa168_eth.c
@@ -1508,7 +1508,8 @@ static int pxa168_eth_probe(struct platform_device *pdev)
np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
if (!np) {
dev_err(&pdev->dev, "missing phy-handle\n");
- return -EINVAL;
+ err = -EINVAL;
+ goto err_netdev;
}
of_property_read_u32(np, "reg", &pep->phy_addr);
pep->phy_intf = of_get_phy_mode(pdev->dev.of_node);
@@ -1526,7 +1527,7 @@ static int pxa168_eth_probe(struct platform_device *pdev)
pep->smi_bus = mdiobus_alloc();
if (pep->smi_bus == NULL) {
err = -ENOMEM;
- goto err_base;
+ goto err_netdev;
}
pep->smi_bus->priv = pep;
pep->smi_bus->name = "pxa168_eth smi";
@@ -1551,13 +1552,10 @@ err_mdiobus:
mdiobus_unregister(pep->smi_bus);
err_free_mdio:
mdiobus_free(pep->smi_bus);
-err_base:
- iounmap(pep->base);
err_netdev:
free_netdev(dev);
err_clk:
- clk_disable(clk);
- clk_put(clk);
+ clk_disable_unprepare(clk);
return err;
}
@@ -1574,13 +1572,9 @@ static int pxa168_eth_remove(struct platform_device *pdev)
if (pep->phy)
phy_disconnect(pep->phy);
if (pep->clk) {
- clk_disable(pep->clk);
- clk_put(pep->clk);
- pep->clk = NULL;
+ clk_disable_unprepare(pep->clk);
}
- iounmap(pep->base);
- pep->base = NULL;
mdiobus_unregister(pep->smi_bus);
mdiobus_free(pep->smi_bus);
unregister_netdev(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index 3f44e2b..a2ddf3d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -1102,20 +1102,21 @@ static int mlx4_en_check_rxfh_func(struct net_device *dev, u8 hfunc)
struct mlx4_en_priv *priv = netdev_priv(dev);
/* check if requested function is supported by the device */
- if ((hfunc == ETH_RSS_HASH_TOP &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP)) ||
- (hfunc == ETH_RSS_HASH_XOR &&
- !(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR)))
- return -EINVAL;
+ if (hfunc == ETH_RSS_HASH_TOP) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_TOP))
+ return -EINVAL;
+ if (!(dev->features & NETIF_F_RXHASH))
+ en_warn(priv, "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
+ return 0;
+ } else if (hfunc == ETH_RSS_HASH_XOR) {
+ if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_RSS_XOR))
+ return -EINVAL;
+ if (dev->features & NETIF_F_RXHASH)
+ en_warn(priv, "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
+ return 0;
+ }
- priv->rss_hash_fn = hfunc;
- if (hfunc == ETH_RSS_HASH_TOP && !(dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Toeplitz hash function should be used in conjunction with RX hashing for optimal performance\n");
- if (hfunc == ETH_RSS_HASH_XOR && (dev->features & NETIF_F_RXHASH))
- en_warn(priv,
- "Enabling both XOR Hash function and RX Hashing can limit RPS functionality\n");
- return 0;
+ return -EINVAL;
}
static int mlx4_en_get_rxfh(struct net_device *dev, u32 *ring_index, u8 *key,
@@ -1189,6 +1190,8 @@ static int mlx4_en_set_rxfh(struct net_device *dev, const u32 *ring_index,
priv->prof->rss_rings = rss_rings;
if (key)
memcpy(priv->rss_key, key, MLX4_EN_RSS_KEY_SIZE);
+ if (hfunc != ETH_RSS_HASH_NO_CHANGE)
+ priv->rss_hash_fn = hfunc;
if (port_up) {
err = mlx4_en_start_port(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0f1afc0..32f5ec7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1467,6 +1467,7 @@ static void mlx4_en_service_task(struct work_struct *work)
if (mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_TS)
mlx4_en_ptp_overflow_check(mdev);
+ mlx4_en_recover_from_oom(priv);
queue_delayed_work(mdev->workqueue, &priv->service_task,
SERVICE_TASK_DELAY);
}
@@ -1721,7 +1722,7 @@ mac_err:
cq_err:
while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
- mlx4_en_free_affinity_hint(priv, i);
+ mlx4_en_free_affinity_hint(priv, rx_index);
}
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 4fdd3c3..2a77a6b 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -244,6 +244,12 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
return mlx4_en_alloc_frags(priv, rx_desc, frags, ring->page_alloc, gfp);
}
+static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
+{
+ BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
+ return ring->prod == ring->cons;
+}
+
static inline void mlx4_en_update_rx_prod_db(struct mlx4_en_rx_ring *ring)
{
*ring->wqres.db.db = cpu_to_be32(ring->prod & 0xffff);
@@ -315,8 +321,7 @@ static void mlx4_en_free_rx_buf(struct mlx4_en_priv *priv,
ring->cons, ring->prod);
/* Unmap and free Rx buffers */
- BUG_ON((u32) (ring->prod - ring->cons) > ring->actual_size);
- while (ring->cons != ring->prod) {
+ while (!mlx4_en_is_ring_empty(ring)) {
index = ring->cons & ring->size_mask;
en_dbg(DRV, priv, "Processing descriptor:%d\n", index);
mlx4_en_free_rx_desc(priv, ring, index);
@@ -491,6 +496,23 @@ err_allocator:
return err;
}
+/* We recover from out of memory by scheduling our napi poll
+ * function (mlx4_en_process_cq), which tries to allocate
+ * all missing RX buffers (call to mlx4_en_refill_rx_buffers).
+ */
+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv)
+{
+ int ring;
+
+ if (!priv->port_up)
+ return;
+
+ for (ring = 0; ring < priv->rx_ring_num; ring++) {
+ if (mlx4_en_is_ring_empty(priv->rx_ring[ring]))
+ napi_reschedule(&priv->rx_cq[ring]->napi);
+ }
+}
+
void mlx4_en_destroy_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride)
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 1783705..f7bf312 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -143,8 +143,10 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
ring->queue_index = queue_index;
- if (queue_index < priv->num_tx_rings_p_up && cpu_online(queue_index))
- cpumask_set_cpu(queue_index, &ring->affinity_mask);
+ if (queue_index < priv->num_tx_rings_p_up)
+ cpumask_set_cpu_local_first(queue_index,
+ priv->mdev->dev->numa_node,
+ &ring->affinity_mask);
*pring = ring;
return 0;
@@ -213,7 +215,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
- if (!user_prio && cpu_online(ring->queue_index))
+ if (!cpumask_empty(&ring->affinity_mask))
netif_set_xps_queue(priv->dev, &ring->affinity_mask,
ring->queue_index);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index a407981..e30bf57 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -56,11 +56,13 @@ MODULE_PARM_DESC(enable_qos, "Enable Enhanced QoS support (default: on)");
#define MLX4_GET(dest, source, offset) \
do { \
void *__p = (char *) (source) + (offset); \
+ u64 val; \
switch (sizeof (dest)) { \
case 1: (dest) = *(u8 *) __p; break; \
case 2: (dest) = be16_to_cpup(__p); break; \
case 4: (dest) = be32_to_cpup(__p); break; \
- case 8: (dest) = be64_to_cpup(__p); break; \
+ case 8: val = get_unaligned((u64 *)__p); \
+ (dest) = be64_to_cpu(val); break; \
default: __buggy_use_of_MLX4_GET(); \
} \
} while (0)
@@ -1605,9 +1607,17 @@ static void get_board_id(void *vsd, char *board_id)
* swaps each 4-byte word before passing it back to
* us. Therefore we need to swab it before printing.
*/
- for (i = 0; i < 4; ++i)
- ((u32 *) board_id)[i] =
- swab32(*(u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4));
+ u32 *bid_u32 = (u32 *)board_id;
+
+ for (i = 0; i < 4; ++i) {
+ u32 *addr;
+ u32 val;
+
+ addr = (u32 *) (vsd + VSD_OFFSET_MLX_BOARD_ID + i * 4);
+ val = get_unaligned(addr);
+ val = swab32(val);
+ put_unaligned(val, &bid_u32[i]);
+ }
}
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 9de3021..d021f07 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -774,6 +774,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
void mlx4_en_deactivate_tx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_tx_ring *ring);
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev);
+void mlx4_en_recover_from_oom(struct mlx4_en_priv *priv);
int mlx4_en_create_rx_ring(struct mlx4_en_priv *priv,
struct mlx4_en_rx_ring **pring,
u32 size, u16 stride, int node);
diff --git a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
index 1412f5a..2bae502 100644
--- a/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
+++ b/drivers/net/ethernet/myricom/myri10ge/myri10ge.c
@@ -69,11 +69,7 @@
#include <net/ip.h>
#include <net/tcp.h>
#include <asm/byteorder.h>
-#include <asm/io.h>
#include <asm/processor.h>
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
-#endif
#include <net/busy_poll.h>
#include "myri10ge_mcp.h"
@@ -242,8 +238,7 @@ struct myri10ge_priv {
unsigned int rdma_tags_available;
int intr_coal_delay;
__be32 __iomem *intr_coal_delay_ptr;
- int mtrr;
- int wc_enabled;
+ int wc_cookie;
int down_cnt;
wait_queue_head_t down_wq;
struct work_struct watchdog_work;
@@ -1905,7 +1900,7 @@ static const char myri10ge_gstrings_main_stats[][ETH_GSTRING_LEN] = {
"tx_aborted_errors", "tx_carrier_errors", "tx_fifo_errors",
"tx_heartbeat_errors", "tx_window_errors",
/* device-specific stats */
- "tx_boundary", "WC", "irq", "MSI", "MSIX",
+ "tx_boundary", "irq", "MSI", "MSIX",
"read_dma_bw_MBs", "write_dma_bw_MBs", "read_write_dma_bw_MBs",
"serial_number", "watchdog_resets",
#ifdef CONFIG_MYRI10GE_DCA
@@ -1984,7 +1979,6 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
data[i] = ((u64 *)&link_stats)[i];
data[i++] = (unsigned int)mgp->tx_boundary;
- data[i++] = (unsigned int)mgp->wc_enabled;
data[i++] = (unsigned int)mgp->pdev->irq;
data[i++] = (unsigned int)mgp->msi_enabled;
data[i++] = (unsigned int)mgp->msix_enabled;
@@ -4040,14 +4034,7 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
mgp->board_span = pci_resource_len(pdev, 0);
mgp->iomem_base = pci_resource_start(pdev, 0);
- mgp->mtrr = -1;
- mgp->wc_enabled = 0;
-#ifdef CONFIG_MTRR
- mgp->mtrr = mtrr_add(mgp->iomem_base, mgp->board_span,
- MTRR_TYPE_WRCOMB, 1);
- if (mgp->mtrr >= 0)
- mgp->wc_enabled = 1;
-#endif
+ mgp->wc_cookie = arch_phys_wc_add(mgp->iomem_base, mgp->board_span);
mgp->sram = ioremap_wc(mgp->iomem_base, mgp->board_span);
if (mgp->sram == NULL) {
dev_err(&pdev->dev, "ioremap failed for %ld bytes at 0x%lx\n",
@@ -4146,14 +4133,14 @@ static int myri10ge_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
goto abort_with_state;
}
if (mgp->msix_enabled)
- dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, WC %s\n",
+ dev_info(dev, "%d MSI-X IRQs, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->num_slices, mgp->tx_boundary, mgp->fw_name,
- (mgp->wc_enabled ? "Enabled" : "Disabled"));
+ (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
else
- dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, WC %s\n",
+ dev_info(dev, "%s IRQ %d, tx bndry %d, fw %s, MTRR %s, WC Enabled\n",
mgp->msi_enabled ? "MSI" : "xPIC",
pdev->irq, mgp->tx_boundary, mgp->fw_name,
- (mgp->wc_enabled ? "Enabled" : "Disabled"));
+ (mgp->wc_cookie > 0 ? "Enabled" : "Disabled"));
board_number++;
return 0;
@@ -4175,10 +4162,7 @@ abort_with_ioremap:
iounmap(mgp->sram);
abort_with_mtrr:
-#ifdef CONFIG_MTRR
- if (mgp->mtrr >= 0)
- mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
-#endif
+ arch_phys_wc_del(mgp->wc_cookie);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
mgp->cmd, mgp->cmd_bus);
@@ -4220,11 +4204,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
pci_restore_state(pdev);
iounmap(mgp->sram);
-
-#ifdef CONFIG_MTRR
- if (mgp->mtrr >= 0)
- mtrr_del(mgp->mtrr, mgp->iomem_base, mgp->board_span);
-#endif
+ arch_phys_wc_del(mgp->wc_cookie);
myri10ge_free_slices(mgp);
kfree(mgp->msix_vectors);
dma_free_coherent(&pdev->dev, sizeof(*mgp->cmd),
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
index 5c40683..8da7c3f 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
@@ -135,7 +135,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
int i, j;
struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
- spin_lock(&adapter->tx_clean_lock);
+ spin_lock_bh(&adapter->tx_clean_lock);
cmd_buf = tx_ring->cmd_buf_arr;
for (i = 0; i < tx_ring->num_desc; i++) {
buffrag = cmd_buf->frag_array;
@@ -159,7 +159,7 @@ void netxen_release_tx_buffers(struct netxen_adapter *adapter)
}
cmd_buf++;
}
- spin_unlock(&adapter->tx_clean_lock);
+ spin_unlock_bh(&adapter->tx_clean_lock);
}
void netxen_free_sw_resources(struct netxen_adapter *adapter)
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index a570a60..ec25153 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4176,14 +4176,15 @@ static int rocker_port_bridge_setlink(struct net_device *dev,
static int rocker_port_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask)
+ u32 filter_mask, int nlflags)
{
struct rocker_port *rocker_port = netdev_priv(dev);
u16 mode = BRIDGE_MODE_UNDEF;
u32 mask = BR_LEARNING | BR_LEARNING_SYNC;
return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode,
- rocker_port->brport_flags, mask);
+ rocker_port->brport_flags, mask,
+ nlflags);
}
static int rocker_port_get_phys_port_name(struct net_device *dev,
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 2bef655..9b7e0a3 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1765,7 +1765,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
ALE_PORT_STATE,
ALE_PORT_STATE_FORWARD);
- if (ndev && slave->open)
+ if (ndev && slave->open &&
+ slave->link_interface != SGMII_LINK_MAC_PHY &&
+ slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_on(ndev);
} else {
writel(mac_control, GBE_REG_ADDR(slave, emac_regs,
@@ -1773,7 +1775,9 @@ static void netcp_ethss_link_state_action(struct gbe_priv *gbe_dev,
cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
ALE_PORT_STATE,
ALE_PORT_STATE_DISABLE);
- if (ndev)
+ if (ndev &&
+ slave->link_interface != SGMII_LINK_MAC_PHY &&
+ slave->link_interface != XGMII_LINK_MAC_PHY)
netif_carrier_off(ndev);
}
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index a10b316..41071d3 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -128,7 +128,6 @@ struct ndis_tcp_ip_checksum_info;
struct hv_netvsc_packet {
/* Bookkeeping stuff */
u32 status;
- bool part_of_skb;
bool is_data_pkt;
bool xmit_more; /* from skb */
@@ -612,6 +611,15 @@ struct multi_send_data {
u32 count; /* counter of batched packets */
};
+/* The context of the netvsc device */
+struct net_device_context {
+ /* point back to our device context */
+ struct hv_device *device_ctx;
+ struct delayed_work dwork;
+ struct work_struct work;
+ u32 msg_enable; /* debug level */
+};
+
/* Per netvsc device */
struct netvsc_device {
struct hv_device *dev;
@@ -667,6 +675,9 @@ struct netvsc_device {
struct multi_send_data msd[NR_CPUS];
u32 max_pkt; /* max number of pkt in one send, e.g. 8 */
u32 pkt_align; /* alignment bytes, e.g. 8 */
+
+ /* The net device context */
+ struct net_device_context *nd_ctx;
};
/* NdisInitialize message */
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 2e8ad06..2d9ef53 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -889,11 +889,6 @@ int netvsc_send(struct hv_device *device,
} else {
packet->page_buf_cnt = 0;
packet->total_data_buflen += msd_len;
- if (!packet->part_of_skb) {
- skb = (struct sk_buff *)(unsigned long)packet->
- send_completion_tid;
- packet->send_completion_tid = 0;
- }
}
if (msdp->pkt)
@@ -1197,6 +1192,9 @@ int netvsc_device_add(struct hv_device *device, void *additional_info)
*/
ndev = net_device->ndev;
+ /* Add netvsc_device context to netvsc_device */
+ net_device->nd_ctx = netdev_priv(ndev);
+
/* Initialize the NetVSC channel extension */
init_completion(&net_device->channel_init_wait);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index a3a9d38..5993c7e 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -40,18 +40,21 @@
#include "hyperv_net.h"
-struct net_device_context {
- /* point back to our device context */
- struct hv_device *device_ctx;
- struct delayed_work dwork;
- struct work_struct work;
-};
#define RING_SIZE_MIN 64
static int ring_size = 128;
module_param(ring_size, int, S_IRUGO);
MODULE_PARM_DESC(ring_size, "Ring buffer size (# of pages)");
+static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE |
+ NETIF_MSG_LINK | NETIF_MSG_IFUP |
+ NETIF_MSG_IFDOWN | NETIF_MSG_RX_ERR |
+ NETIF_MSG_TX_ERR;
+
+static int debug = -1;
+module_param(debug, int, S_IRUGO);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
static void do_set_multicast(struct work_struct *w)
{
struct net_device_context *ndevctx =
@@ -235,9 +238,6 @@ void netvsc_xmit_completion(void *context)
struct sk_buff *skb = (struct sk_buff *)
(unsigned long)packet->send_completion_tid;
- if (!packet->part_of_skb)
- kfree(packet);
-
if (skb)
dev_kfree_skb_any(skb);
}
@@ -389,7 +389,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
u32 net_trans_info;
u32 hash;
u32 skb_length;
- u32 head_room;
u32 pkt_sz;
struct hv_page_buffer page_buf[MAX_PAGE_BUFFER_COUNT];
@@ -402,7 +401,6 @@ static int netvsc_start_xmit(struct sk_buff *skb, struct net_device *net)
check_size:
skb_length = skb->len;
- head_room = skb_headroom(skb);
num_data_pgs = netvsc_get_slots(skb) + 2;
if (num_data_pgs > MAX_PAGE_BUFFER_COUNT && linear) {
net_alert_ratelimited("packet too big: %u pages (%u bytes)\n",
@@ -421,20 +419,14 @@ check_size:
pkt_sz = sizeof(struct hv_netvsc_packet) + RNDIS_AND_PPI_SIZE;
- if (head_room < pkt_sz) {
- packet = kmalloc(pkt_sz, GFP_ATOMIC);
- if (!packet) {
- /* out of memory, drop packet */
- netdev_err(net, "unable to alloc hv_netvsc_packet\n");
- ret = -ENOMEM;
- goto drop;
- }
- packet->part_of_skb = false;
- } else {
- /* Use the headroom for building up the packet */
- packet = (struct hv_netvsc_packet *)skb->head;
- packet->part_of_skb = true;
+ ret = skb_cow_head(skb, pkt_sz);
+ if (ret) {
+ netdev_err(net, "unable to alloc hv_netvsc_packet\n");
+ ret = -ENOMEM;
+ goto drop;
}
+ /* Use the headroom for building up the packet */
+ packet = (struct hv_netvsc_packet *)skb->head;
packet->status = 0;
packet->xmit_more = skb->xmit_more;
@@ -591,8 +583,6 @@ drop:
net->stats.tx_bytes += skb_length;
net->stats.tx_packets++;
} else {
- if (packet && !packet->part_of_skb)
- kfree(packet);
if (ret != -EAGAIN) {
dev_kfree_skb_any(skb);
net->stats.tx_dropped++;
@@ -888,6 +878,11 @@ static int netvsc_probe(struct hv_device *dev,
net_device_ctx = netdev_priv(net);
net_device_ctx->device_ctx = dev;
+ net_device_ctx->msg_enable = netif_msg_init(debug, default_msg);
+ if (netif_msg_probe(net_device_ctx))
+ netdev_dbg(net, "netvsc msg_enable: %d\n",
+ net_device_ctx->msg_enable);
+
hv_set_drvdata(dev, net);
INIT_DELAYED_WORK(&net_device_ctx->dwork, netvsc_link_change);
INIT_WORK(&net_device_ctx->work, do_set_multicast);
diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
index 0d92efe..9118cea 100644
--- a/drivers/net/hyperv/rndis_filter.c
+++ b/drivers/net/hyperv/rndis_filter.c
@@ -429,7 +429,8 @@ int rndis_filter_receive(struct hv_device *dev,
rndis_msg = pkt->data;
- dump_rndis_message(dev, rndis_msg);
+ if (netif_msg_rx_err(net_dev->nd_ctx))
+ dump_rndis_message(dev, rndis_msg);
switch (rndis_msg->ndis_msg_type) {
case RNDIS_MSG_PACKET:
diff --git a/drivers/net/phy/mdio-gpio.c b/drivers/net/phy/mdio-gpio.c
index 49ce7ec..c9cb486c 100644
--- a/drivers/net/phy/mdio-gpio.c
+++ b/drivers/net/phy/mdio-gpio.c
@@ -80,7 +80,8 @@ static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir)
* assume the pin serves as pull-up. If direction is
* output, the default value is high.
*/
- gpio_set_value(bitbang->mdo, 1 ^ bitbang->mdo_active_low);
+ gpio_set_value_cansleep(bitbang->mdo,
+ 1 ^ bitbang->mdo_active_low);
return;
}
@@ -96,7 +97,8 @@ static int mdio_get(struct mdiobb_ctrl *ctrl)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- return gpio_get_value(bitbang->mdio) ^ bitbang->mdio_active_low;
+ return gpio_get_value_cansleep(bitbang->mdio) ^
+ bitbang->mdio_active_low;
}
static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
@@ -105,9 +107,11 @@ static void mdio_set(struct mdiobb_ctrl *ctrl, int what)
container_of(ctrl, struct mdio_gpio_info, ctrl);
if (bitbang->mdo)
- gpio_set_value(bitbang->mdo, what ^ bitbang->mdo_active_low);
+ gpio_set_value_cansleep(bitbang->mdo,
+ what ^ bitbang->mdo_active_low);
else
- gpio_set_value(bitbang->mdio, what ^ bitbang->mdio_active_low);
+ gpio_set_value_cansleep(bitbang->mdio,
+ what ^ bitbang->mdio_active_low);
}
static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
@@ -115,7 +119,7 @@ static void mdc_set(struct mdiobb_ctrl *ctrl, int what)
struct mdio_gpio_info *bitbang =
container_of(ctrl, struct mdio_gpio_info, ctrl);
- gpio_set_value(bitbang->mdc, what ^ bitbang->mdc_active_low);
+ gpio_set_value_cansleep(bitbang->mdc, what ^ bitbang->mdc_active_low);
}
static struct mdiobb_ops mdio_gpio_ops = {
diff --git a/drivers/net/phy/mdio-mux-gpio.c b/drivers/net/phy/mdio-mux-gpio.c
index 1a87a58..66edd99 100644
--- a/drivers/net/phy/mdio-mux-gpio.c
+++ b/drivers/net/phy/mdio-mux-gpio.c
@@ -12,33 +12,30 @@
#include <linux/module.h>
#include <linux/phy.h>
#include <linux/mdio-mux.h>
-#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
#define DRV_VERSION "1.1"
#define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver"
-#define MDIO_MUX_GPIO_MAX_BITS 8
-
struct mdio_mux_gpio_state {
- struct gpio_desc *gpio[MDIO_MUX_GPIO_MAX_BITS];
- unsigned int num_gpios;
+ struct gpio_descs *gpios;
void *mux_handle;
};
static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
void *data)
{
- int values[MDIO_MUX_GPIO_MAX_BITS];
- unsigned int n;
struct mdio_mux_gpio_state *s = data;
+ int values[s->gpios->ndescs];
+ unsigned int n;
if (current_child == desired_child)
return 0;
- for (n = 0; n < s->num_gpios; n++) {
+ for (n = 0; n < s->gpios->ndescs; n++)
values[n] = (desired_child >> n) & 1;
- }
- gpiod_set_array_cansleep(s->num_gpios, s->gpio, values);
+
+ gpiod_set_array_cansleep(s->gpios->ndescs, s->gpios->desc, values);
return 0;
}
@@ -46,56 +43,33 @@ static int mdio_mux_gpio_switch_fn(int current_child, int desired_child,
static int mdio_mux_gpio_probe(struct platform_device *pdev)
{
struct mdio_mux_gpio_state *s;
- int num_gpios;
- unsigned int n;
int r;
- if (!pdev->dev.of_node)
- return -ENODEV;
-
- num_gpios = of_gpio_count(pdev->dev.of_node);
- if (num_gpios <= 0 || num_gpios > MDIO_MUX_GPIO_MAX_BITS)
- return -ENODEV;
-
s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
- s->num_gpios = num_gpios;
-
- for (n = 0; n < num_gpios; ) {
- struct gpio_desc *gpio = gpiod_get_index(&pdev->dev, NULL, n,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio)) {
- r = PTR_ERR(gpio);
- goto err;
- }
- s->gpio[n] = gpio;
- n++;
- }
+ s->gpios = gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW);
+ if (IS_ERR(s->gpios))
+ return PTR_ERR(s->gpios);
r = mdio_mux_init(&pdev->dev,
mdio_mux_gpio_switch_fn, &s->mux_handle, s);
- if (r == 0) {
- pdev->dev.platform_data = s;
- return 0;
- }
-err:
- while (n) {
- n--;
- gpiod_put(s->gpio[n]);
+ if (r != 0) {
+ gpiod_put_array(s->gpios);
+ return r;
}
- return r;
+
+ pdev->dev.platform_data = s;
+ return 0;
}
static int mdio_mux_gpio_remove(struct platform_device *pdev)
{
- unsigned int n;
struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev);
mdio_mux_uninit(s->mux_handle);
- for (n = 0; n < s->num_gpios; n++)
- gpiod_put(s->gpio[n]);
+ gpiod_put_array(s->gpios);
return 0;
}
diff --git a/drivers/net/ppp/ppp_mppe.c b/drivers/net/ppp/ppp_mppe.c
index 911b216..05005c6 100644
--- a/drivers/net/ppp/ppp_mppe.c
+++ b/drivers/net/ppp/ppp_mppe.c
@@ -478,7 +478,6 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
struct blkcipher_desc desc = { .tfm = state->arc4 };
unsigned ccount;
int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
- int sanity = 0;
struct scatterlist sg_in[1], sg_out[1];
if (isize <= PPP_HDRLEN + MPPE_OVHD) {
@@ -514,31 +513,19 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
"mppe_decompress[%d]: ENCRYPTED bit not set!\n",
state->unit);
state->sanity_errors += 100;
- sanity = 1;
+ goto sanity_error;
}
if (!state->stateful && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
"stateless mode!\n", state->unit);
state->sanity_errors += 100;
- sanity = 1;
+ goto sanity_error;
}
if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
"flag packet!\n", state->unit);
state->sanity_errors += 100;
- sanity = 1;
- }
-
- if (sanity) {
- if (state->sanity_errors < SANITY_MAX)
- return DECOMP_ERROR;
- else
- /*
- * Take LCP down if the peer is sending too many bogons.
- * We don't want to do this for a single or just a few
- * instances since it could just be due to packet corruption.
- */
- return DECOMP_FATALERROR;
+ goto sanity_error;
}
/*
@@ -546,6 +533,13 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
*/
if (!state->stateful) {
+ /* Discard late packet */
+ if ((ccount - state->ccount) % MPPE_CCOUNT_SPACE
+ > MPPE_CCOUNT_SPACE / 2) {
+ state->sanity_errors++;
+ goto sanity_error;
+ }
+
/* RFC 3078, sec 8.1. Rekey for every packet. */
while (state->ccount != ccount) {
mppe_rekey(state, 0);
@@ -649,6 +643,16 @@ mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
state->sanity_errors >>= 1;
return osize;
+
+sanity_error:
+ if (state->sanity_errors < SANITY_MAX)
+ return DECOMP_ERROR;
+ else
+ /* Take LCP down if the peer is sending too many bogons.
+ * We don't want to do this for a single or just a few
+ * instances since it could just be due to packet corruption.
+ */
+ return DECOMP_FATALERROR;
}
/*
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 154116a..27a5f95 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -730,12 +730,8 @@ static int vxlan_fdb_create(struct vxlan_dev *vxlan,
/* Only change unicasts */
if (!(is_multicast_ether_addr(f->eth_addr) ||
is_zero_ether_addr(f->eth_addr))) {
- int rc = vxlan_fdb_replace(f, ip, port, vni,
+ notify |= vxlan_fdb_replace(f, ip, port, vni,
ifindex);
-
- if (rc < 0)
- return rc;
- notify |= rc;
} else
return -EOPNOTSUPP;
}
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index 89dca77..18ee208 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
EXPORT_SYMBOL_GPL(devm_pinctrl_put);
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- bool dup, bool locked)
+ bool dup)
{
int i, ret;
struct pinctrl_maps *maps_node;
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
maps_node->maps = maps;
}
- if (!locked)
- mutex_lock(&pinctrl_maps_mutex);
+ mutex_lock(&pinctrl_maps_mutex);
list_add_tail(&maps_node->node, &pinctrl_maps);
- if (!locked)
- mutex_unlock(&pinctrl_maps_mutex);
+ mutex_unlock(&pinctrl_maps_mutex);
return 0;
}
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
int pinctrl_register_mappings(struct pinctrl_map const *maps,
unsigned num_maps)
{
- return pinctrl_register_map(maps, num_maps, true, false);
+ return pinctrl_register_map(maps, num_maps, true);
}
void pinctrl_unregister_map(struct pinctrl_map const *map)
diff --git a/drivers/pinctrl/core.h b/drivers/pinctrl/core.h
index 75476b3..b24ea84 100644
--- a/drivers/pinctrl/core.h
+++ b/drivers/pinctrl/core.h
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
}
int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
- bool dup, bool locked);
+ bool dup);
void pinctrl_unregister_map(struct pinctrl_map const *map);
extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index eda13de..0bbf7d7 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
dt_map->num_maps = num_maps;
list_add_tail(&dt_map->node, &p->dt_maps);
- return pinctrl_register_map(map, num_maps, false, true);
+ return pinctrl_register_map(map, num_maps, false);
}
struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 493294c..474812e 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -881,6 +881,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
if (!mtk_eint_get_mask(pctl, eint_num)) {
mtk_eint_mask(d);
unmask = 1;
+ } else {
+ unmask = 0;
}
clr_bit = 0xff << eint_offset;
diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-370.c b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
index 42f930f..03aa58c 100644
--- a/drivers/pinctrl/mvebu/pinctrl-armada-370.c
+++ b/drivers/pinctrl/mvebu/pinctrl-armada-370.c
@@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
MPP_FUNCTION(0x5, "audio", "mclk"),
MPP_FUNCTION(0x6, "uart0", "cts")),
MPP_MODE(63,
- MPP_FUNCTION(0x0, "gpo", NULL),
+ MPP_FUNCTION(0x0, "gpio", NULL),
MPP_FUNCTION(0x1, "spi0", "sck"),
MPP_FUNCTION(0x2, "tclk", NULL)),
MPP_MODE(64,
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
index b2d2221..ae4115e 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
@@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
val = 1;
}
+ val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
@@ -417,7 +418,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
return ret;
val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT;
- val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
+ val |= pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val);
if (ret < 0)
@@ -466,12 +467,13 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
seq_puts(s, " ---");
} else {
- if (!pad->input_enabled) {
+ if (pad->input_enabled) {
ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
- if (!ret) {
- ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
- pad->out_value = ret;
- }
+ if (ret < 0)
+ return;
+
+ ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+ pad->out_value = ret;
}
seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
diff --git a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
index 8f36c5f..211b942 100644
--- a/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
+++ b/drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
@@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
}
}
+ val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
@@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
if (pad->input_enabled) {
ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
- if (!ret) {
- ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
- pad->out_value = ret;
- }
+ if (ret < 0)
+ return;
+
+ ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+ pad->out_value = ret;
}
seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index b3d419a..b496db8 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -830,6 +830,13 @@ static void ideapad_acpi_notify(acpi_handle handle, u32 event, void *data)
*/
static const struct dmi_system_id no_hw_rfkill_list[] = {
{
+ .ident = "Lenovo G40-30",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo G40-30"),
+ },
+ },
+ {
.ident = "Lenovo Yoga 2 11 / 13 / Pro",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index 7769575..9bb9ad6 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -2115,7 +2115,7 @@ static int hotkey_mask_get(void)
return 0;
}
-void static hotkey_mask_warn_incomplete_mask(void)
+static void hotkey_mask_warn_incomplete_mask(void)
{
/* log only what the user can fix... */
const u32 wantedmask = hotkey_driver_mask &
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 6149ae0..0fe4ad8 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -164,6 +164,16 @@ config RTC_DRV_ABB5ZES3
This driver can also be built as a module. If so, the module
will be called rtc-ab-b5ze-s3.
+config RTC_DRV_ABX80X
+ tristate "Abracon ABx80x"
+ help
+ If you say yes here you get support for Abracon AB080X and AB180X
+ families of ultra-low-power battery- and capacitor-backed real-time
+ clock chips.
+
+ This driver can also be built as a module. If so, the module
+ will be called rtc-abx80x.
+
config RTC_DRV_AS3722
tristate "ams AS3722 RTC driver"
depends on MFD_AS3722
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index c31731c..2b82e2b 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_RTC_DRV_88PM80X) += rtc-88pm80x.o
obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o
obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o
obj-$(CONFIG_RTC_DRV_ABB5ZES3) += rtc-ab-b5ze-s3.o
+obj-$(CONFIG_RTC_DRV_ABX80X) += rtc-abx80x.o
obj-$(CONFIG_RTC_DRV_ARMADA38X) += rtc-armada38x.o
obj-$(CONFIG_RTC_DRV_AS3722) += rtc-as3722.o
obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o
diff --git a/drivers/rtc/rtc-abx80x.c b/drivers/rtc/rtc-abx80x.c
new file mode 100644
index 0000000..4337c3b
--- /dev/null
+++ b/drivers/rtc/rtc-abx80x.c
@@ -0,0 +1,307 @@
+/*
+ * A driver for the I2C members of the Abracon AB x8xx RTC family,
+ * and compatible: AB 1805 and AB 0805
+ *
+ * Copyright 2014-2015 Macq S.A.
+ *
+ * Author: Philippe De Muyter <phdm@macqel.be>
+ * Author: Alexandre Belloni <alexandre.belloni@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bcd.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/rtc.h>
+
+#define ABX8XX_REG_HTH 0x00
+#define ABX8XX_REG_SC 0x01
+#define ABX8XX_REG_MN 0x02
+#define ABX8XX_REG_HR 0x03
+#define ABX8XX_REG_DA 0x04
+#define ABX8XX_REG_MO 0x05
+#define ABX8XX_REG_YR 0x06
+#define ABX8XX_REG_WD 0x07
+
+#define ABX8XX_REG_CTRL1 0x10
+#define ABX8XX_CTRL_WRITE BIT(1)
+#define ABX8XX_CTRL_12_24 BIT(6)
+
+#define ABX8XX_REG_CFG_KEY 0x1f
+#define ABX8XX_CFG_KEY_MISC 0x9d
+
+#define ABX8XX_REG_ID0 0x28
+
+#define ABX8XX_REG_TRICKLE 0x20
+#define ABX8XX_TRICKLE_CHARGE_ENABLE 0xa0
+#define ABX8XX_TRICKLE_STANDARD_DIODE 0x8
+#define ABX8XX_TRICKLE_SCHOTTKY_DIODE 0x4
+
+static u8 trickle_resistors[] = {0, 3, 6, 11};
+
+enum abx80x_chip {AB0801, AB0803, AB0804, AB0805,
+ AB1801, AB1803, AB1804, AB1805, ABX80X};
+
+struct abx80x_cap {
+ u16 pn;
+ bool has_tc;
+};
+
+static struct abx80x_cap abx80x_caps[] = {
+ [AB0801] = {.pn = 0x0801},
+ [AB0803] = {.pn = 0x0803},
+ [AB0804] = {.pn = 0x0804, .has_tc = true},
+ [AB0805] = {.pn = 0x0805, .has_tc = true},
+ [AB1801] = {.pn = 0x1801},
+ [AB1803] = {.pn = 0x1803},
+ [AB1804] = {.pn = 0x1804, .has_tc = true},
+ [AB1805] = {.pn = 0x1805, .has_tc = true},
+ [ABX80X] = {.pn = 0}
+};
+
+static struct i2c_driver abx80x_driver;
+
+static int abx80x_enable_trickle_charger(struct i2c_client *client,
+ u8 trickle_cfg)
+{
+ int err;
+
+ /*
+ * Write the configuration key register to enable access to the Trickle
+ * register
+ */
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CFG_KEY,
+ ABX8XX_CFG_KEY_MISC);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write configuration key\n");
+ return -EIO;
+ }
+
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_TRICKLE,
+ ABX8XX_TRICKLE_CHARGE_ENABLE |
+ trickle_cfg);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write trickle register\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int abx80x_rtc_read_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[8];
+ int err;
+
+ err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_HTH,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to read date\n");
+ return -EIO;
+ }
+
+ tm->tm_sec = bcd2bin(buf[ABX8XX_REG_SC] & 0x7F);
+ tm->tm_min = bcd2bin(buf[ABX8XX_REG_MN] & 0x7F);
+ tm->tm_hour = bcd2bin(buf[ABX8XX_REG_HR] & 0x3F);
+ tm->tm_wday = buf[ABX8XX_REG_WD] & 0x7;
+ tm->tm_mday = bcd2bin(buf[ABX8XX_REG_DA] & 0x3F);
+ tm->tm_mon = bcd2bin(buf[ABX8XX_REG_MO] & 0x1F) - 1;
+ tm->tm_year = bcd2bin(buf[ABX8XX_REG_YR]) + 100;
+
+ err = rtc_valid_tm(tm);
+ if (err < 0)
+ dev_err(&client->dev, "retrieved date/time is not valid.\n");
+
+ return err;
+}
+
+static int abx80x_rtc_set_time(struct device *dev, struct rtc_time *tm)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ unsigned char buf[8];
+ int err;
+
+ if (tm->tm_year < 100)
+ return -EINVAL;
+
+ buf[ABX8XX_REG_HTH] = 0;
+ buf[ABX8XX_REG_SC] = bin2bcd(tm->tm_sec);
+ buf[ABX8XX_REG_MN] = bin2bcd(tm->tm_min);
+ buf[ABX8XX_REG_HR] = bin2bcd(tm->tm_hour);
+ buf[ABX8XX_REG_DA] = bin2bcd(tm->tm_mday);
+ buf[ABX8XX_REG_MO] = bin2bcd(tm->tm_mon + 1);
+ buf[ABX8XX_REG_YR] = bin2bcd(tm->tm_year - 100);
+ buf[ABX8XX_REG_WD] = tm->tm_wday;
+
+ err = i2c_smbus_write_i2c_block_data(client, ABX8XX_REG_HTH,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write to date registers\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct rtc_class_ops abx80x_rtc_ops = {
+ .read_time = abx80x_rtc_read_time,
+ .set_time = abx80x_rtc_set_time,
+};
+
+static int abx80x_dt_trickle_cfg(struct device_node *np)
+{
+ const char *diode;
+ int trickle_cfg = 0;
+ int i, ret;
+ u32 tmp;
+
+ ret = of_property_read_string(np, "abracon,tc-diode", &diode);
+ if (ret)
+ return ret;
+
+ if (!strcmp(diode, "standard"))
+ trickle_cfg |= ABX8XX_TRICKLE_STANDARD_DIODE;
+ else if (!strcmp(diode, "schottky"))
+ trickle_cfg |= ABX8XX_TRICKLE_SCHOTTKY_DIODE;
+ else
+ return -EINVAL;
+
+ ret = of_property_read_u32(np, "abracon,tc-resistor", &tmp);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < sizeof(trickle_resistors); i++)
+ if (trickle_resistors[i] == tmp)
+ break;
+
+ if (i == sizeof(trickle_resistors))
+ return -EINVAL;
+
+ return (trickle_cfg | i);
+}
+
+static int abx80x_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device_node *np = client->dev.of_node;
+ struct rtc_device *rtc;
+ int i, data, err, trickle_cfg = -EINVAL;
+ char buf[7];
+ unsigned int part = id->driver_data;
+ unsigned int partnumber;
+ unsigned int majrev, minrev;
+ unsigned int lot;
+ unsigned int wafer;
+ unsigned int uid;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C))
+ return -ENODEV;
+
+ err = i2c_smbus_read_i2c_block_data(client, ABX8XX_REG_ID0,
+ sizeof(buf), buf);
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to read partnumber\n");
+ return -EIO;
+ }
+
+ partnumber = (buf[0] << 8) | buf[1];
+ majrev = buf[2] >> 3;
+ minrev = buf[2] & 0x7;
+ lot = ((buf[4] & 0x80) << 2) | ((buf[6] & 0x80) << 1) | buf[3];
+ uid = ((buf[4] & 0x7f) << 8) | buf[5];
+ wafer = (buf[6] & 0x7c) >> 2;
+ dev_info(&client->dev, "model %04x, revision %u.%u, lot %x, wafer %x, uid %x\n",
+ partnumber, majrev, minrev, lot, wafer, uid);
+
+ data = i2c_smbus_read_byte_data(client, ABX8XX_REG_CTRL1);
+ if (data < 0) {
+ dev_err(&client->dev, "Unable to read control register\n");
+ return -EIO;
+ }
+
+ err = i2c_smbus_write_byte_data(client, ABX8XX_REG_CTRL1,
+ ((data & ~ABX8XX_CTRL_12_24) |
+ ABX8XX_CTRL_WRITE));
+ if (err < 0) {
+ dev_err(&client->dev, "Unable to write control register\n");
+ return -EIO;
+ }
+
+ /* part autodetection */
+ if (part == ABX80X) {
+ for (i = 0; abx80x_caps[i].pn; i++)
+ if (partnumber == abx80x_caps[i].pn)
+ break;
+ if (abx80x_caps[i].pn == 0) {
+ dev_err(&client->dev, "Unknown part: %04x\n",
+ partnumber);
+ return -EINVAL;
+ }
+ part = i;
+ }
+
+ if (partnumber != abx80x_caps[part].pn) {
+ dev_err(&client->dev, "partnumber mismatch %04x != %04x\n",
+ partnumber, abx80x_caps[part].pn);
+ return -EINVAL;
+ }
+
+ if (np && abx80x_caps[part].has_tc)
+ trickle_cfg = abx80x_dt_trickle_cfg(np);
+
+ if (trickle_cfg > 0) {
+ dev_info(&client->dev, "Enabling trickle charger: %02x\n",
+ trickle_cfg);
+ abx80x_enable_trickle_charger(client, trickle_cfg);
+ }
+
+ rtc = devm_rtc_device_register(&client->dev, abx80x_driver.driver.name,
+ &abx80x_rtc_ops, THIS_MODULE);
+
+ if (IS_ERR(rtc))
+ return PTR_ERR(rtc);
+
+ i2c_set_clientdata(client, rtc);
+
+ return 0;
+}
+
+static int abx80x_remove(struct i2c_client *client)
+{
+ return 0;
+}
+
+static const struct i2c_device_id abx80x_id[] = {
+ { "abx80x", ABX80X },
+ { "ab0801", AB0801 },
+ { "ab0803", AB0803 },
+ { "ab0804", AB0804 },
+ { "ab0805", AB0805 },
+ { "ab1801", AB1801 },
+ { "ab1803", AB1803 },
+ { "ab1804", AB1804 },
+ { "ab1805", AB1805 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, abx80x_id);
+
+static struct i2c_driver abx80x_driver = {
+ .driver = {
+ .name = "rtc-abx80x",
+ },
+ .probe = abx80x_probe,
+ .remove = abx80x_remove,
+ .id_table = abx80x_id,
+};
+
+module_i2c_driver(abx80x_driver);
+
+MODULE_AUTHOR("Philippe De Muyter <phdm@macqel.be>");
+MODULE_AUTHOR("Alexandre Belloni <alexandre.belloni@free-electrons.com>");
+MODULE_DESCRIPTION("Abracon ABX80X RTC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/rtc/rtc-armada38x.c b/drivers/rtc/rtc-armada38x.c
index 43e04af..cb70ced 100644
--- a/drivers/rtc/rtc-armada38x.c
+++ b/drivers/rtc/rtc-armada38x.c
@@ -40,6 +40,13 @@ struct armada38x_rtc {
void __iomem *regs;
void __iomem *regs_soc;
spinlock_t lock;
+ /*
+ * While setting the time, the RTC TIME register should not be
+ * accessed. Setting the RTC time involves sleeping during
+ * 100ms, so a mutex instead of a spinlock is used to protect
+ * it
+ */
+ struct mutex mutex_time;
int irq;
};
@@ -59,8 +66,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
struct armada38x_rtc *rtc = dev_get_drvdata(dev);
unsigned long time, time_check, flags;
- spin_lock_irqsave(&rtc->lock, flags);
-
+ mutex_lock(&rtc->mutex_time);
time = readl(rtc->regs + RTC_TIME);
/*
* WA for failing time set attempts. As stated in HW ERRATA if
@@ -71,7 +77,7 @@ static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
if ((time_check - time) > 1)
time_check = readl(rtc->regs + RTC_TIME);
- spin_unlock_irqrestore(&rtc->lock, flags);
+ mutex_unlock(&rtc->mutex_time);
rtc_time_to_tm(time_check, tm);
@@ -94,19 +100,12 @@ static int armada38x_rtc_set_time(struct device *dev, struct rtc_time *tm)
* then wait for 100ms before writing to the time register to be
* sure that the data will be taken into account.
*/
- spin_lock_irqsave(&rtc->lock, flags);
-
+ mutex_lock(&rtc->mutex_time);
rtc_delayed_write(0, rtc, RTC_STATUS);
-
- spin_unlock_irqrestore(&rtc->lock, flags);
-
msleep(100);
-
- spin_lock_irqsave(&rtc->lock, flags);
-
rtc_delayed_write(time, rtc, RTC_TIME);
+ mutex_unlock(&rtc->mutex_time);
- spin_unlock_irqrestore(&rtc->lock, flags);
out:
return ret;
}
@@ -230,6 +229,7 @@ static __init int armada38x_rtc_probe(struct platform_device *pdev)
return -ENOMEM;
spin_lock_init(&rtc->lock);
+ mutex_init(&rtc->mutex_time);
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "rtc");
rtc->regs = devm_ioremap_resource(&pdev->dev, res);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index c43aca6..0fc3fe5 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -667,6 +667,8 @@ static struct raw3215_info *raw3215_alloc_info(void)
info->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA);
info->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA);
if (!info->buffer || !info->inbuf) {
+ kfree(info->inbuf);
+ kfree(info->buffer);
kfree(info);
return NULL;
}
diff --git a/drivers/scsi/3w-9xxx.c b/drivers/scsi/3w-9xxx.c
index 7600639..add419d 100644
--- a/drivers/scsi/3w-9xxx.c
+++ b/drivers/scsi/3w-9xxx.c
@@ -149,7 +149,6 @@ static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
/* Functions */
@@ -1340,11 +1339,11 @@ static irqreturn_t twa_interrupt(int irq, void *dev_instance)
}
/* Now complete the io */
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twa_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
- twa_unmap_scsi_data(tw_dev, request_id);
}
/* Check for valid status after each drain */
@@ -1402,26 +1401,6 @@ static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_comm
}
} /* End twa_load_sgl() */
-/* This function will perform a pci-dma mapping for a scatter gather list */
-static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
-{
- int use_sg;
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- use_sg = scsi_dma_map(cmd);
- if (!use_sg)
- return 0;
- else if (use_sg < 0) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SGLIST;
- cmd->SCp.have_data_in = use_sg;
-
- return use_sg;
-} /* End twa_map_scsi_sg_data() */
-
/* This function will poll for a response interrupt of a request */
static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
{
@@ -1600,9 +1579,11 @@ static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
(tw_dev->state[i] != TW_S_INITIAL) &&
(tw_dev->state[i] != TW_S_COMPLETED)) {
if (tw_dev->srb[i]) {
- tw_dev->srb[i]->result = (DID_RESET << 16);
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
- twa_unmap_scsi_data(tw_dev, i);
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ cmd->result = (DID_RESET << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
}
}
}
@@ -1781,21 +1762,18 @@ static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
/* Save the scsi command for use by the ISR */
tw_dev->srb[request_id] = SCpnt;
- /* Initialize phase to zero */
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
switch (retval) {
case SCSI_MLQUEUE_HOST_BUSY:
+ scsi_dma_unmap(SCpnt);
twa_free_request_id(tw_dev, request_id);
- twa_unmap_scsi_data(tw_dev, request_id);
break;
case 1:
- tw_dev->state[request_id] = TW_S_COMPLETED;
- twa_free_request_id(tw_dev, request_id);
- twa_unmap_scsi_data(tw_dev, request_id);
SCpnt->result = (DID_ERROR << 16);
+ scsi_dma_unmap(SCpnt);
done(SCpnt);
+ tw_dev->state[request_id] = TW_S_COMPLETED;
+ twa_free_request_id(tw_dev, request_id);
retval = 0;
}
out:
@@ -1863,8 +1841,8 @@ static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
} else {
- sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
- if (sg_count == 0)
+ sg_count = scsi_dma_map(srb);
+ if (sg_count < 0)
goto out;
scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1979,15 +1957,6 @@ static char *twa_string_lookup(twa_message_type *table, unsigned int code)
return(table[index].text);
} /* End twa_string_lookup() */
-/* This function will perform a pci-dma unmap */
-static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
-{
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
- scsi_dma_unmap(cmd);
-} /* End twa_unmap_scsi_data() */
-
/* This function gets called when a disk is coming on-line */
static int twa_slave_configure(struct scsi_device *sdev)
{
diff --git a/drivers/scsi/3w-9xxx.h b/drivers/scsi/3w-9xxx.h
index 040f721..0fdc83c 100644
--- a/drivers/scsi/3w-9xxx.h
+++ b/drivers/scsi/3w-9xxx.h
@@ -324,11 +324,6 @@ static twa_message_type twa_error_table[] = {
#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SINGLE 1
-#define TW_PHASE_SGLIST 2
-
/* Misc defines */
#define TW_9550SX_DRAIN_COMPLETED 0xFFFF
#define TW_SECTOR_SIZE 512
diff --git a/drivers/scsi/3w-sas.c b/drivers/scsi/3w-sas.c
index 2361772..f837485 100644
--- a/drivers/scsi/3w-sas.c
+++ b/drivers/scsi/3w-sas.c
@@ -290,26 +290,6 @@ static int twl_post_command_packet(TW_Device_Extension *tw_dev, int request_id)
return 0;
} /* End twl_post_command_packet() */
-/* This function will perform a pci-dma mapping for a scatter gather list */
-static int twl_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
-{
- int use_sg;
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- use_sg = scsi_dma_map(cmd);
- if (!use_sg)
- return 0;
- else if (use_sg < 0) {
- TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Failed to map scatter gather list");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SGLIST;
- cmd->SCp.have_data_in = use_sg;
-
- return use_sg;
-} /* End twl_map_scsi_sg_data() */
-
/* This function hands scsi cdb's to the firmware */
static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry_ISO *sglistarg)
{
@@ -357,8 +337,8 @@ static int twl_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
if (!sglistarg) {
/* Map sglist from scsi layer to cmd packet */
if (scsi_sg_count(srb)) {
- sg_count = twl_map_scsi_sg_data(tw_dev, request_id);
- if (sg_count == 0)
+ sg_count = scsi_dma_map(srb);
+ if (sg_count <= 0)
goto out;
scsi_for_each_sg(srb, sg, sg_count, i) {
@@ -1102,15 +1082,6 @@ out:
return retval;
} /* End twl_initialize_device_extension() */
-/* This function will perform a pci-dma unmap */
-static void twl_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
-{
- struct scsi_cmnd *cmd = tw_dev->srb[request_id];
-
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
- scsi_dma_unmap(cmd);
-} /* End twl_unmap_scsi_data() */
-
/* This function will handle attention interrupts */
static int twl_handle_attention_interrupt(TW_Device_Extension *tw_dev)
{
@@ -1251,11 +1222,11 @@ static irqreturn_t twl_interrupt(int irq, void *dev_instance)
}
/* Now complete the io */
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
tw_dev->state[request_id] = TW_S_COMPLETED;
twl_free_request_id(tw_dev, request_id);
tw_dev->posted_request_count--;
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
- twl_unmap_scsi_data(tw_dev, request_id);
}
/* Check for another response interrupt */
@@ -1400,10 +1371,12 @@ static int twl_reset_device_extension(TW_Device_Extension *tw_dev, int ioctl_res
if ((tw_dev->state[i] != TW_S_FINISHED) &&
(tw_dev->state[i] != TW_S_INITIAL) &&
(tw_dev->state[i] != TW_S_COMPLETED)) {
- if (tw_dev->srb[i]) {
- tw_dev->srb[i]->result = (DID_RESET << 16);
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
- twl_unmap_scsi_data(tw_dev, i);
+ struct scsi_cmnd *cmd = tw_dev->srb[i];
+
+ if (cmd) {
+ cmd->result = (DID_RESET << 16);
+ scsi_dma_unmap(cmd);
+ cmd->scsi_done(cmd);
}
}
}
@@ -1507,9 +1480,6 @@ static int twl_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_
/* Save the scsi command for use by the ISR */
tw_dev->srb[request_id] = SCpnt;
- /* Initialize phase to zero */
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
retval = twl_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
if (retval) {
tw_dev->state[request_id] = TW_S_COMPLETED;
diff --git a/drivers/scsi/3w-sas.h b/drivers/scsi/3w-sas.h
index d474892..fec6449 100644
--- a/drivers/scsi/3w-sas.h
+++ b/drivers/scsi/3w-sas.h
@@ -103,10 +103,6 @@ static char *twl_aen_severity_table[] =
#define TW_CURRENT_DRIVER_BUILD 0
#define TW_CURRENT_DRIVER_BRANCH 0
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SGLIST 2
-
/* Misc defines */
#define TW_SECTOR_SIZE 512
#define TW_MAX_UNITS 32
diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
index c75f204..2940bd7 100644
--- a/drivers/scsi/3w-xxxx.c
+++ b/drivers/scsi/3w-xxxx.c
@@ -1271,32 +1271,6 @@ static int tw_initialize_device_extension(TW_Device_Extension *tw_dev)
return 0;
} /* End tw_initialize_device_extension() */
-static int tw_map_scsi_sg_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- int use_sg;
-
- dprintk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data()\n");
-
- use_sg = scsi_dma_map(cmd);
- if (use_sg < 0) {
- printk(KERN_WARNING "3w-xxxx: tw_map_scsi_sg_data(): pci_map_sg() failed.\n");
- return 0;
- }
-
- cmd->SCp.phase = TW_PHASE_SGLIST;
- cmd->SCp.have_data_in = use_sg;
-
- return use_sg;
-} /* End tw_map_scsi_sg_data() */
-
-static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
-{
- dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
-
- if (cmd->SCp.phase == TW_PHASE_SGLIST)
- scsi_dma_unmap(cmd);
-} /* End tw_unmap_scsi_data() */
-
/* This function will reset a device extension */
static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
{
@@ -1319,8 +1293,8 @@ static int tw_reset_device_extension(TW_Device_Extension *tw_dev)
srb = tw_dev->srb[i];
if (srb != NULL) {
srb->result = (DID_RESET << 16);
- tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[i]);
+ scsi_dma_unmap(srb);
+ srb->scsi_done(srb);
}
}
}
@@ -1767,8 +1741,8 @@ static int tw_scsiop_read_write(TW_Device_Extension *tw_dev, int request_id)
command_packet->byte8.io.lba = lba;
command_packet->byte6.block_count = num_sectors;
- use_sg = tw_map_scsi_sg_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
- if (!use_sg)
+ use_sg = scsi_dma_map(srb);
+ if (use_sg <= 0)
return 1;
scsi_for_each_sg(tw_dev->srb[request_id], sg, use_sg, i) {
@@ -1955,9 +1929,6 @@ static int tw_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_c
/* Save the scsi command for use by the ISR */
tw_dev->srb[request_id] = SCpnt;
- /* Initialize phase to zero */
- SCpnt->SCp.phase = TW_PHASE_INITIAL;
-
switch (*command) {
case READ_10:
case READ_6:
@@ -2185,12 +2156,11 @@ static irqreturn_t tw_interrupt(int irq, void *dev_instance)
/* Now complete the io */
if ((error != TW_ISR_DONT_COMPLETE)) {
+ scsi_dma_unmap(tw_dev->srb[request_id]);
+ tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
tw_dev->state[request_id] = TW_S_COMPLETED;
tw_state_request_finish(tw_dev, request_id);
tw_dev->posted_request_count--;
- tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
-
- tw_unmap_scsi_data(tw_dev->tw_pci_dev, tw_dev->srb[request_id]);
}
}
diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
index 29b0b84e..6f65e66 100644
--- a/drivers/scsi/3w-xxxx.h
+++ b/drivers/scsi/3w-xxxx.h
@@ -195,11 +195,6 @@ static unsigned char tw_sense_table[][4] =
#define TW_AEN_SMART_FAIL 0x000F
#define TW_AEN_SBUF_FAIL 0x0024
-/* Phase defines */
-#define TW_PHASE_INITIAL 0
-#define TW_PHASE_SINGLE 1
-#define TW_PHASE_SGLIST 2
-
/* Misc defines */
#define TW_ALIGNMENT_6000 64 /* 64 bytes */
#define TW_ALIGNMENT_7000 4 /* 4 bytes */
diff --git a/drivers/scsi/aha1542.c b/drivers/scsi/aha1542.c
index ec43276..b95d277 100644
--- a/drivers/scsi/aha1542.c
+++ b/drivers/scsi/aha1542.c
@@ -375,9 +375,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
u8 lun = cmd->device->lun;
unsigned long flags;
int bufflen = scsi_bufflen(cmd);
- int mbo;
+ int mbo, sg_count;
struct mailbox *mb = aha1542->mb;
struct ccb *ccb = aha1542->ccb;
+ struct chain *cptr;
if (*cmd->cmnd == REQUEST_SENSE) {
/* Don't do the command - we have the sense data already */
@@ -397,6 +398,13 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len);
}
#endif
+ if (bufflen) { /* allocate memory before taking host_lock */
+ sg_count = scsi_sg_count(cmd);
+ cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA);
+ if (!cptr)
+ return SCSI_MLQUEUE_HOST_BUSY;
+ }
+
/* Use the outgoing mailboxes in a round-robin fashion, because this
is how the host adapter will scan for them */
@@ -441,19 +449,10 @@ static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd)
if (bufflen) {
struct scatterlist *sg;
- struct chain *cptr;
- int i, sg_count = scsi_sg_count(cmd);
+ int i;
ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */
- cmd->host_scribble = kmalloc(sizeof(*cptr)*sg_count,
- GFP_KERNEL | GFP_DMA);
- cptr = (struct chain *) cmd->host_scribble;
- if (cptr == NULL) {
- /* free the claimed mailbox slot */
- aha1542->int_cmds[mbo] = NULL;
- spin_unlock_irqrestore(sh->host_lock, flags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
+ cmd->host_scribble = (void *)cptr;
scsi_for_each_sg(cmd, sg, sg_count, i) {
any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg))
+ sg->offset);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 262ab83..9f77d23 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -226,6 +226,7 @@ static struct {
{"PIONEER", "CD-ROM DRM-624X", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"Promise", "VTrak E610f", NULL, BLIST_SPARSELUN | BLIST_NO_RSOC},
{"Promise", "", NULL, BLIST_SPARSELUN},
+ {"QNAP", "iSCSI Storage", NULL, BLIST_MAX_1024},
{"QUANTUM", "XP34301", "1071", BLIST_NOTQ},
{"REGAL", "CDC-4X", NULL, BLIST_MAX5LUN | BLIST_SINGLELUN},
{"SanDisk", "ImageMate CF-SD1", NULL, BLIST_FORCELUN},
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 60aae01..6efab1c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -897,6 +897,12 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
*/
if (*bflags & BLIST_MAX_512)
blk_queue_max_hw_sectors(sdev->request_queue, 512);
+ /*
+ * Max 1024 sector transfer length for targets that report incorrect
+ * max/optimal lengths and relied on the old block layer safe default
+ */
+ else if (*bflags & BLIST_MAX_1024)
+ blk_queue_max_hw_sectors(sdev->request_queue, 1024);
/*
* Some devices may not want to have a start command automatically
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index cd4c293..fe8875f 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -80,9 +80,10 @@ static int __init sh_pm_runtime_init(void)
if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
if (!of_machine_is_compatible("renesas,emev2") &&
!of_machine_is_compatible("renesas,r7s72100") &&
- !of_machine_is_compatible("renesas,r8a73a4") &&
#ifndef CONFIG_PM_GENERIC_DOMAINS_OF
+ !of_machine_is_compatible("renesas,r8a73a4") &&
!of_machine_is_compatible("renesas,r8a7740") &&
+ !of_machine_is_compatible("renesas,sh73a0") &&
#endif
!of_machine_is_compatible("renesas,r8a7778") &&
!of_machine_is_compatible("renesas,r8a7779") &&
@@ -90,9 +91,7 @@ static int __init sh_pm_runtime_init(void)
!of_machine_is_compatible("renesas,r8a7791") &&
!of_machine_is_compatible("renesas,r8a7792") &&
!of_machine_is_compatible("renesas,r8a7793") &&
- !of_machine_is_compatible("renesas,r8a7794") &&
- !of_machine_is_compatible("renesas,sh7372") &&
- !of_machine_is_compatible("renesas,sh73a0"))
+ !of_machine_is_compatible("renesas,r8a7794"))
return 0;
}
diff --git a/drivers/staging/media/omap4iss/Kconfig b/drivers/staging/media/omap4iss/Kconfig
index b78643f..072dac0 100644
--- a/drivers/staging/media/omap4iss/Kconfig
+++ b/drivers/staging/media/omap4iss/Kconfig
@@ -2,6 +2,7 @@ config VIDEO_OMAP4
bool "OMAP 4 Camera support"
depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
depends on HAS_DMA
+ select MFD_SYSCON
select VIDEOBUF2_DMA_CONTIG
---help---
Driver for an OMAP 4 ISS controller.
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index e0ad5e5..7ced940 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -17,6 +17,7 @@
#include <linux/dma-mapping.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
+#include <linux/mfd/syscon.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
@@ -1386,6 +1387,16 @@ static int iss_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, iss);
+ /*
+ * TODO: When implementing DT support switch to syscon regmap lookup by
+ * phandle.
+ */
+ iss->syscon = syscon_regmap_lookup_by_compatible("syscon");
+ if (IS_ERR(iss->syscon)) {
+ ret = PTR_ERR(iss->syscon);
+ goto error;
+ }
+
/* Clocks */
ret = iss_map_mem_resource(pdev, iss, OMAP4_ISS_MEM_TOP);
if (ret < 0)
diff --git a/drivers/staging/media/omap4iss/iss.h b/drivers/staging/media/omap4iss/iss.h
index 734cfee..35df8b4 100644
--- a/drivers/staging/media/omap4iss/iss.h
+++ b/drivers/staging/media/omap4iss/iss.h
@@ -29,6 +29,8 @@
#include "iss_ipipe.h"
#include "iss_resizer.h"
+struct regmap;
+
#define to_iss_device(ptr_module) \
container_of(ptr_module, struct iss_device, ptr_module)
#define to_device(ptr_module) \
@@ -79,6 +81,7 @@ struct iss_reg {
/*
* struct iss_device - ISS device structure.
+ * @syscon: Regmap for the syscon register space
* @crashed: Bitmask of crashed entities (indexed by entity ID)
*/
struct iss_device {
@@ -93,6 +96,7 @@ struct iss_device {
struct resource *res[OMAP4_ISS_MEM_LAST];
void __iomem *regs[OMAP4_ISS_MEM_LAST];
+ struct regmap *syscon;
u64 raw_dmamask;
diff --git a/drivers/staging/media/omap4iss/iss_csiphy.c b/drivers/staging/media/omap4iss/iss_csiphy.c
index 7c3d55d..748607f 100644
--- a/drivers/staging/media/omap4iss/iss_csiphy.c
+++ b/drivers/staging/media/omap4iss/iss_csiphy.c
@@ -13,6 +13,7 @@
#include <linux/delay.h>
#include <linux/device.h>
+#include <linux/regmap.h>
#include "../../../../arch/arm/mach-omap2/control.h"
@@ -140,9 +141,11 @@ int omap4iss_csiphy_config(struct iss_device *iss,
* - bit [18] : CSIPHY1 CTRLCLK enable
* - bit [17:16] : CSIPHY1 config: 00 d-phy, 01/10 ccp2
*/
- cam_rx_ctrl = omap4_ctrl_pad_readl(
- OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
-
+ /*
+ * TODO: When implementing DT support specify the CONTROL_CAMERA_RX
+ * register offset in the syscon property instead of hardcoding it.
+ */
+ regmap_read(iss->syscon, 0x68, &cam_rx_ctrl);
if (subdevs->interface == ISS_INTERFACE_CSI2A_PHY1) {
cam_rx_ctrl &= ~(OMAP4_CAMERARX_CSI21_LANEENABLE_MASK |
@@ -166,8 +169,7 @@ int omap4iss_csiphy_config(struct iss_device *iss,
cam_rx_ctrl |= OMAP4_CAMERARX_CSI22_CTRLCLKEN_MASK;
}
- omap4_ctrl_pad_writel(cam_rx_ctrl,
- OMAP4_CTRL_MODULE_PAD_CORE_CONTROL_CAMERA_RX);
+ regmap_write(iss->syscon, 0x68, cam_rx_ctrl);
/* Reset used lane count */
csi2->phy->used_data_lanes = 0;
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index f1e5742..5bab1c6 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -299,11 +299,27 @@ static int xen_initial_domain_console_init(void)
return 0;
}
+static void xen_console_update_evtchn(struct xencons_info *info)
+{
+ if (xen_hvm_domain()) {
+ uint64_t v;
+ int err;
+
+ err = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
+ if (!err && v)
+ info->evtchn = v;
+ } else
+ info->evtchn = xen_start_info->console.domU.evtchn;
+}
+
void xen_console_resume(void)
{
struct xencons_info *info = vtermno_to_xencons(HVC_COOKIE);
- if (info != NULL && info->irq)
+ if (info != NULL && info->irq) {
+ if (!xen_initial_domain())
+ xen_console_update_evtchn(info);
rebind_evtchn_irq(info->evtchn, info->irq);
+ }
}
static void xencons_disconnect_backend(struct xencons_info *info)
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index 08da4d3..46bcebb 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1998,6 +1998,8 @@ pci_wch_ch38x_setup(struct serial_private *priv,
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCI_DEVICE_ID_EXAR_XR17V8358 0x8358
+
/* Unknown vendors/cards - this should not be in linux/pci_ids.h */
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1584 0x1584
#define PCI_SUBDEVICE_ID_UNKNOWN_0x1588 0x1588
@@ -2520,6 +2522,13 @@ static struct pci_serial_quirk pci_serial_quirks[] __refdata = {
.subdevice = PCI_ANY_ID,
.setup = pci_xr17v35x_setup,
},
+ {
+ .vendor = PCI_VENDOR_ID_EXAR,
+ .device = PCI_DEVICE_ID_EXAR_XR17V8358,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_xr17v35x_setup,
+ },
/*
* Xircom cards
*/
@@ -2999,6 +3008,7 @@ enum pci_board_num_t {
pbn_exar_XR17V352,
pbn_exar_XR17V354,
pbn_exar_XR17V358,
+ pbn_exar_XR17V8358,
pbn_exar_ibm_saturn,
pbn_pasemi_1682M,
pbn_ni8430_2,
@@ -3685,6 +3695,14 @@ static struct pciserial_board pci_boards[] = {
.reg_shift = 0,
.first_offset = 0,
},
+ [pbn_exar_XR17V8358] = {
+ .flags = FL_BASE0,
+ .num_ports = 16,
+ .base_baud = 7812500,
+ .uart_offset = 0x400,
+ .reg_shift = 0,
+ .first_offset = 0,
+ },
[pbn_exar_ibm_saturn] = {
.flags = FL_BASE0,
.num_ports = 1,
@@ -5080,7 +5098,7 @@ static struct pci_device_id serial_pci_tbl[] = {
0,
0, pbn_exar_XR17C158 },
/*
- * Exar Corp. XR17V35[248] Dual/Quad/Octal PCIe UARTs
+ * Exar Corp. XR17V[48]35[248] Dual/Quad/Octal/Hexa PCIe UARTs
*/
{ PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V352,
PCI_ANY_ID, PCI_ANY_ID,
@@ -5094,7 +5112,10 @@ static struct pci_device_id serial_pci_tbl[] = {
PCI_ANY_ID, PCI_ANY_ID,
0,
0, pbn_exar_XR17V358 },
-
+ { PCI_VENDOR_ID_EXAR, PCI_DEVICE_ID_EXAR_XR17V8358,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0,
+ 0, pbn_exar_XR17V8358 },
/*
* Topic TP560 Data/Fax/Voice 56k modem (reported by Evan Clarke)
*/
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index d58fe47..27dade2 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -880,6 +880,7 @@ static int atmel_prepare_tx_dma(struct uart_port *port)
config.direction = DMA_MEM_TO_DEV;
config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.dst_addr = port->mapbase + ATMEL_US_THR;
+ config.dst_maxburst = 1;
ret = dmaengine_slave_config(atmel_port->chan_tx,
&config);
@@ -1059,6 +1060,7 @@ static int atmel_prepare_rx_dma(struct uart_port *port)
config.direction = DMA_DEV_TO_MEM;
config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
config.src_addr = port->mapbase + ATMEL_US_RHR;
+ config.src_maxburst = 1;
ret = dmaengine_slave_config(atmel_port->chan_rx,
&config);
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 5b73afb..137381e 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -346,7 +346,6 @@ static const struct of_device_id of_platform_serial_table[] = {
{ .compatible = "ibm,qpace-nwp-serial",
.data = (void *)PORT_NWPSERIAL, },
#endif
- { .type = "serial", .data = (void *)PORT_UNKNOWN, },
{ /* end of list */ },
};
diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
index cf08876..a0ae942 100644
--- a/drivers/tty/serial/samsung.c
+++ b/drivers/tty/serial/samsung.c
@@ -1068,8 +1068,9 @@ static int s3c64xx_serial_startup(struct uart_port *port)
spin_lock_irqsave(&port->lock, flags);
ufcon = rd_regl(port, S3C2410_UFCON);
- ufcon |= S3C2410_UFCON_RESETRX | S3C2410_UFCON_RESETTX |
- S5PV210_UFCON_RXTRIG8;
+ ufcon |= S3C2410_UFCON_RESETRX | S5PV210_UFCON_RXTRIG8;
+ if (!uart_console(port))
+ ufcon |= S3C2410_UFCON_RESETTX;
wr_regl(port, S3C2410_UFCON, ufcon);
enable_rx_pio(ourport);
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index eb5b03b..0b7bb12 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1770,7 +1770,7 @@ static const struct file_operations uart_proc_fops = {
* @port: the port to write the message
* @s: array of characters
* @count: number of characters in string to write
- * @write: function to write character to port
+ * @putchar: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
diff --git a/drivers/tty/serial/uartlite.c b/drivers/tty/serial/uartlite.c
index 708eead..b1c6bd3 100644
--- a/drivers/tty/serial/uartlite.c
+++ b/drivers/tty/serial/uartlite.c
@@ -632,7 +632,8 @@ MODULE_DEVICE_TABLE(of, ulite_of_match);
static int ulite_probe(struct platform_device *pdev)
{
- struct resource *res, *res2;
+ struct resource *res;
+ int irq;
int id = pdev->id;
#ifdef CONFIG_OF
const __be32 *prop;
@@ -646,11 +647,11 @@ static int ulite_probe(struct platform_device *pdev)
if (!res)
return -ENODEV;
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2)
- return -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -ENXIO;
- return ulite_assign(&pdev->dev, id, res->start, res2->start);
+ return ulite_assign(&pdev->dev, id, res->start, irq);
}
static int ulite_remove(struct platform_device *pdev)
diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
index f218ec6..3ddbac7 100644
--- a/drivers/tty/serial/xilinx_uartps.c
+++ b/drivers/tty/serial/xilinx_uartps.c
@@ -1331,9 +1331,9 @@ static SIMPLE_DEV_PM_OPS(cdns_uart_dev_pm_ops, cdns_uart_suspend,
*/
static int cdns_uart_probe(struct platform_device *pdev)
{
- int rc, id;
+ int rc, id, irq;
struct uart_port *port;
- struct resource *res, *res2;
+ struct resource *res;
struct cdns_uart *cdns_uart_data;
cdns_uart_data = devm_kzalloc(&pdev->dev, sizeof(*cdns_uart_data),
@@ -1380,9 +1380,9 @@ static int cdns_uart_probe(struct platform_device *pdev)
goto err_out_clk_disable;
}
- res2 = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (!res2) {
- rc = -ENODEV;
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ rc = -ENXIO;
goto err_out_clk_disable;
}
@@ -1411,7 +1411,7 @@ static int cdns_uart_probe(struct platform_device *pdev)
* and triggers invocation of the config_port() entry point.
*/
port->mapbase = res->start;
- port->irq = res2->start;
+ port->irq = irq;
port->dev = &pdev->dev;
port->uartclk = clk_get_rate(cdns_uart_data->uartclk);
port->private_data = cdns_uart_data;
diff --git a/drivers/tty/tty_ioctl.c b/drivers/tty/tty_ioctl.c
index 632fc81..8e53fe4 100644
--- a/drivers/tty/tty_ioctl.c
+++ b/drivers/tty/tty_ioctl.c
@@ -536,7 +536,7 @@ EXPORT_SYMBOL(tty_termios_hw_change);
* Locking: termios_rwsem
*/
-static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
+int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
{
struct ktermios old_termios;
struct tty_ldisc *ld;
@@ -569,6 +569,7 @@ static int tty_set_termios(struct tty_struct *tty, struct ktermios *new_termios)
up_write(&tty->termios_rwsem);
return 0;
}
+EXPORT_SYMBOL_GPL(tty_set_termios);
/**
* set_termios - set termios values for a tty
diff --git a/drivers/usb/chipidea/otg_fsm.c b/drivers/usb/chipidea/otg_fsm.c
index 083acf4..19d655a 100644
--- a/drivers/usb/chipidea/otg_fsm.c
+++ b/drivers/usb/chipidea/otg_fsm.c
@@ -520,7 +520,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
- mutex_unlock(&fsm->lock);
if (on) {
ci_role_stop(ci);
ci_role_start(ci, CI_ROLE_HOST);
@@ -529,7 +528,6 @@ static int ci_otg_start_host(struct otg_fsm *fsm, int on)
hw_device_reset(ci);
ci_role_start(ci, CI_ROLE_GADGET);
}
- mutex_lock(&fsm->lock);
return 0;
}
@@ -537,12 +535,10 @@ static int ci_otg_start_gadget(struct otg_fsm *fsm, int on)
{
struct ci_hdrc *ci = container_of(fsm, struct ci_hdrc, fsm);
- mutex_unlock(&fsm->lock);
if (on)
usb_gadget_vbus_connect(&ci->gadget);
else
usb_gadget_vbus_disconnect(&ci->gadget);
- mutex_lock(&fsm->lock);
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 3e15add..5c8f581 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1142,11 +1142,16 @@ static int acm_probe(struct usb_interface *intf,
}
while (buflen > 0) {
+ elength = buffer[0];
+ if (!elength) {
+ dev_err(&intf->dev, "skipping garbage byte\n");
+ elength = 1;
+ goto next_desc;
+ }
if (buffer[1] != USB_DT_CS_INTERFACE) {
dev_err(&intf->dev, "skipping garbage\n");
goto next_desc;
}
- elength = buffer[0];
switch (buffer[2]) {
case USB_CDC_UNION_TYPE: /* we've found it */
diff --git a/drivers/usb/host/ehci-msm.c b/drivers/usb/host/ehci-msm.c
index 9db74ca..275c92e 100644
--- a/drivers/usb/host/ehci-msm.c
+++ b/drivers/usb/host/ehci-msm.c
@@ -88,13 +88,20 @@ static int ehci_msm_probe(struct platform_device *pdev)
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- ret = PTR_ERR(hcd->regs);
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to get memory resource\n");
+ ret = -ENODEV;
goto put_hcd;
}
+
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
+ hcd->regs = devm_ioremap(&pdev->dev, hcd->rsrc_start, hcd->rsrc_len);
+ if (!hcd->regs) {
+ dev_err(&pdev->dev, "ioremap failed\n");
+ ret = -ENOMEM;
+ goto put_hcd;
+ }
/*
* OTG driver takes care of PHY initialization, clock management,
diff --git a/drivers/usb/storage/uas-detect.h b/drivers/usb/storage/uas-detect.h
index 9893d69..f58caa9 100644
--- a/drivers/usb/storage/uas-detect.h
+++ b/drivers/usb/storage/uas-detect.h
@@ -51,7 +51,8 @@ static int uas_find_endpoints(struct usb_host_interface *alt,
}
static int uas_use_uas_driver(struct usb_interface *intf,
- const struct usb_device_id *id)
+ const struct usb_device_id *id,
+ unsigned long *flags_ret)
{
struct usb_host_endpoint *eps[4] = { };
struct usb_device *udev = interface_to_usbdev(intf);
@@ -73,7 +74,7 @@ static int uas_use_uas_driver(struct usb_interface *intf,
* this writing the following versions exist:
* ASM1051 - no uas support version
* ASM1051 - with broken (*) uas support
- * ASM1053 - with working uas support
+ * ASM1053 - with working uas support, but problems with large xfers
* ASM1153 - with working uas support
*
* Devices with these chips re-use a number of device-ids over the
@@ -103,6 +104,9 @@ static int uas_use_uas_driver(struct usb_interface *intf,
} else if (usb_ss_max_streams(&eps[1]->ss_ep_comp) == 32) {
/* Possibly an ASM1051, disable uas */
flags |= US_FL_IGNORE_UAS;
+ } else {
+ /* ASM1053, these have issues with large transfers */
+ flags |= US_FL_MAX_SECTORS_240;
}
}
@@ -132,5 +136,8 @@ static int uas_use_uas_driver(struct usb_interface *intf,
return 0;
}
+ if (flags_ret)
+ *flags_ret = flags;
+
return 1;
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 6cdabdc..6d3122a 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -759,7 +759,10 @@ static int uas_eh_bus_reset_handler(struct scsi_cmnd *cmnd)
static int uas_slave_alloc(struct scsi_device *sdev)
{
- sdev->hostdata = (void *)sdev->host->hostdata;
+ struct uas_dev_info *devinfo =
+ (struct uas_dev_info *)sdev->host->hostdata;
+
+ sdev->hostdata = devinfo;
/* USB has unusual DMA-alignment requirements: Although the
* starting address of each scatter-gather element doesn't matter,
@@ -778,6 +781,11 @@ static int uas_slave_alloc(struct scsi_device *sdev)
*/
blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1));
+ if (devinfo->flags & US_FL_MAX_SECTORS_64)
+ blk_queue_max_hw_sectors(sdev->request_queue, 64);
+ else if (devinfo->flags & US_FL_MAX_SECTORS_240)
+ blk_queue_max_hw_sectors(sdev->request_queue, 240);
+
return 0;
}
@@ -887,8 +895,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
struct Scsi_Host *shost = NULL;
struct uas_dev_info *devinfo;
struct usb_device *udev = interface_to_usbdev(intf);
+ unsigned long dev_flags;
- if (!uas_use_uas_driver(intf, id))
+ if (!uas_use_uas_driver(intf, id, &dev_flags))
return -ENODEV;
if (uas_switch_interface(udev, intf))
@@ -910,8 +919,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
devinfo->udev = udev;
devinfo->resetting = 0;
devinfo->shutdown = 0;
- devinfo->flags = id->driver_info;
- usb_stor_adjust_quirks(udev, &devinfo->flags);
+ devinfo->flags = dev_flags;
init_usb_anchor(&devinfo->cmd_urbs);
init_usb_anchor(&devinfo->sense_urbs);
init_usb_anchor(&devinfo->data_urbs);
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index 5600c33..6c10c88 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -479,7 +479,8 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
US_FL_SINGLE_LUN | US_FL_NO_WP_DETECT |
US_FL_NO_READ_DISC_INFO | US_FL_NO_READ_CAPACITY_16 |
US_FL_INITIAL_READ10 | US_FL_WRITE_CACHE |
- US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES);
+ US_FL_NO_ATA_1X | US_FL_NO_REPORT_OPCODES |
+ US_FL_MAX_SECTORS_240);
p = quirks;
while (*p) {
@@ -520,6 +521,9 @@ void usb_stor_adjust_quirks(struct usb_device *udev, unsigned long *fflags)
case 'f':
f |= US_FL_NO_REPORT_OPCODES;
break;
+ case 'g':
+ f |= US_FL_MAX_SECTORS_240;
+ break;
case 'h':
f |= US_FL_CAPACITY_HEURISTICS;
break;
@@ -1080,7 +1084,7 @@ static int storage_probe(struct usb_interface *intf,
/* If uas is enabled and this device can do uas then ignore it. */
#if IS_ENABLED(CONFIG_USB_UAS)
- if (uas_use_uas_driver(intf, id))
+ if (uas_use_uas_driver(intf, id, NULL))
return -ENXIO;
#endif
diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
index 69fab0f..e9851ad 100644
--- a/drivers/vfio/pci/vfio_pci.c
+++ b/drivers/vfio/pci/vfio_pci.c
@@ -907,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count)
mutex_lock(&vdev->igate);
if (vdev->req_trigger) {
- dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+ if (!(count % 10))
+ dev_notice_ratelimited(&vdev->pdev->dev,
+ "Relaying device request to user (#%u)\n",
+ count);
eventfd_signal(vdev->req_trigger, 1);
+ } else if (count == 0) {
+ dev_warn(&vdev->pdev->dev,
+ "No device request channel registered, blocked until released by user\n");
}
mutex_unlock(&vdev->igate);
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 0d33662..e1278fe 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -710,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev)
void *device_data = device->device_data;
struct vfio_unbound_dev *unbound;
unsigned int i = 0;
+ long ret;
+ bool interrupted = false;
/*
* The group exists so long as we have a device reference. Get
@@ -755,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev)
vfio_device_put(device);
- } while (wait_event_interruptible_timeout(vfio.release_q,
- !vfio_dev_present(group, dev),
- HZ * 10) <= 0);
+ if (interrupted) {
+ ret = wait_event_timeout(vfio.release_q,
+ !vfio_dev_present(group, dev), HZ * 10);
+ } else {
+ ret = wait_event_interruptible_timeout(vfio.release_q,
+ !vfio_dev_present(group, dev), HZ * 10);
+ if (ret == -ERESTARTSYS) {
+ interrupted = true;
+ dev_warn(dev,
+ "Device is currently in use, task"
+ " \"%s\" (%d) "
+ "blocked until device is released",
+ current->comm, task_pid_nr(current));
+ }
+ }
+ } while (ret <= 0);
vfio_group_put(group);
diff --git a/drivers/xen/events/events_2l.c b/drivers/xen/events/events_2l.c
index 5db43fc..7dd4631 100644
--- a/drivers/xen/events/events_2l.c
+++ b/drivers/xen/events/events_2l.c
@@ -345,6 +345,15 @@ irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
+static void evtchn_2l_resume(void)
+{
+ int i;
+
+ for_each_online_cpu(i)
+ memset(per_cpu(cpu_evtchn_mask, i), 0, sizeof(xen_ulong_t) *
+ EVTCHN_2L_NR_CHANNELS/BITS_PER_EVTCHN_WORD);
+}
+
static const struct evtchn_ops evtchn_ops_2l = {
.max_channels = evtchn_2l_max_channels,
.nr_channels = evtchn_2l_max_channels,
@@ -356,6 +365,7 @@ static const struct evtchn_ops evtchn_ops_2l = {
.mask = evtchn_2l_mask,
.unmask = evtchn_2l_unmask,
.handle_events = evtchn_2l_handle_events,
+ .resume = evtchn_2l_resume,
};
void __init xen_evtchn_2l_init(void)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 70fba97..2b8553b 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -529,8 +529,8 @@ static unsigned int __startup_pirq(unsigned int irq)
if (rc)
goto err;
- bind_evtchn_to_cpu(evtchn, 0);
info->evtchn = evtchn;
+ bind_evtchn_to_cpu(evtchn, 0);
rc = xen_evtchn_port_setup(info);
if (rc)
@@ -1279,8 +1279,9 @@ void rebind_evtchn_irq(int evtchn, int irq)
mutex_unlock(&irq_mapping_update_lock);
- /* new event channels are always bound to cpu 0 */
- irq_set_affinity(irq, cpumask_of(0));
+ bind_evtchn_to_cpu(evtchn, info->cpu);
+ /* This will be deferred until interrupt is processed */
+ irq_set_affinity(irq, cpumask_of(info->cpu));
/* Unmask the event channel. */
enable_irq(irq);
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index d5bb1a3..8927485 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -327,30 +327,10 @@ static int map_grant_pages(struct grant_map *map)
return err;
}
-struct unmap_grant_pages_callback_data
-{
- struct completion completion;
- int result;
-};
-
-static void unmap_grant_callback(int result,
- struct gntab_unmap_queue_data *data)
-{
- struct unmap_grant_pages_callback_data* d = data->data;
-
- d->result = result;
- complete(&d->completion);
-}
-
static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
{
int i, err = 0;
struct gntab_unmap_queue_data unmap_data;
- struct unmap_grant_pages_callback_data data;
-
- init_completion(&data.completion);
- unmap_data.data = &data;
- unmap_data.done= &unmap_grant_callback;
if (map->notify.flags & UNMAP_NOTIFY_CLEAR_BYTE) {
int pgno = (map->notify.addr >> PAGE_SHIFT);
@@ -367,11 +347,9 @@ static int __unmap_grant_pages(struct grant_map *map, int offset, int pages)
unmap_data.pages = map->pages + offset;
unmap_data.count = pages;
- gnttab_unmap_refs_async(&unmap_data);
-
- wait_for_completion(&data.completion);
- if (data.result)
- return data.result;
+ err = gnttab_unmap_refs_sync(&unmap_data);
+ if (err)
+ return err;
for (i = 0; i < pages; i++) {
if (map->unmap_ops[offset+i].status)
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 17972fb..b1c7170 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -123,6 +123,11 @@ struct gnttab_ops {
int (*query_foreign_access)(grant_ref_t ref);
};
+struct unmap_refs_callback_data {
+ struct completion completion;
+ int result;
+};
+
static struct gnttab_ops *gnttab_interface;
static int grant_table_version;
@@ -863,6 +868,29 @@ void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item)
}
EXPORT_SYMBOL_GPL(gnttab_unmap_refs_async);
+static void unmap_refs_callback(int result,
+ struct gntab_unmap_queue_data *data)
+{
+ struct unmap_refs_callback_data *d = data->data;
+
+ d->result = result;
+ complete(&d->completion);
+}
+
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item)
+{
+ struct unmap_refs_callback_data data;
+
+ init_completion(&data.completion);
+ item->data = &data;
+ item->done = &unmap_refs_callback;
+ gnttab_unmap_refs_async(item);
+ wait_for_completion(&data.completion);
+
+ return data.result;
+}
+EXPORT_SYMBOL_GPL(gnttab_unmap_refs_sync);
+
static int gnttab_map_frames_v1(xen_pfn_t *frames, unsigned int nr_gframes)
{
int rc;
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index bf19407..9e6a851 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -131,6 +131,8 @@ static void do_suspend(void)
goto out_resume;
}
+ xen_arch_suspend();
+
si.cancelled = 1;
err = stop_machine(xen_suspend, &si, cpumask_of(0));
@@ -148,11 +150,12 @@ static void do_suspend(void)
si.cancelled = 1;
}
+ xen_arch_resume();
+
out_resume:
- if (!si.cancelled) {
- xen_arch_resume();
+ if (!si.cancelled)
xs_resume();
- } else
+ else
xs_suspend_cancel();
dpm_resume_end(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index 810ad41..4c54932 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -235,7 +235,7 @@ retry:
#define SLABS_PER_PAGE (1 << (PAGE_SHIFT - IO_TLB_SHIFT))
#define IO_TLB_MIN_SLABS ((1<<20) >> IO_TLB_SHIFT)
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- xen_io_tlb_start = (void *)__get_free_pages(__GFP_NOWARN, order);
+ xen_io_tlb_start = (void *)xen_get_swiotlb_free_pages(order);
if (xen_io_tlb_start)
break;
order--;
diff --git a/drivers/xen/xen-pciback/conf_space.c b/drivers/xen/xen-pciback/conf_space.c
index 75fe3d4..9c23420 100644
--- a/drivers/xen/xen-pciback/conf_space.c
+++ b/drivers/xen/xen-pciback/conf_space.c
@@ -16,8 +16,8 @@
#include "conf_space.h"
#include "conf_space_quirks.h"
-bool permissive;
-module_param(permissive, bool, 0644);
+bool xen_pcibk_permissive;
+module_param_named(permissive, xen_pcibk_permissive, bool, 0644);
/* This is where xen_pcibk_read_config_byte, xen_pcibk_read_config_word,
* xen_pcibk_write_config_word, and xen_pcibk_write_config_byte are created. */
@@ -262,7 +262,7 @@ int xen_pcibk_config_write(struct pci_dev *dev, int offset, int size, u32 value)
* This means that some fields may still be read-only because
* they have entries in the config_field list that intercept
* the write and do nothing. */
- if (dev_data->permissive || permissive) {
+ if (dev_data->permissive || xen_pcibk_permissive) {
switch (size) {
case 1:
err = pci_write_config_byte(dev, offset,
diff --git a/drivers/xen/xen-pciback/conf_space.h b/drivers/xen/xen-pciback/conf_space.h
index 2e1d73d..62461a8 100644
--- a/drivers/xen/xen-pciback/conf_space.h
+++ b/drivers/xen/xen-pciback/conf_space.h
@@ -64,7 +64,7 @@ struct config_field_entry {
void *data;
};
-extern bool permissive;
+extern bool xen_pcibk_permissive;
#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
diff --git a/drivers/xen/xen-pciback/conf_space_header.c b/drivers/xen/xen-pciback/conf_space_header.c
index c2260a0..ad3d17d 100644
--- a/drivers/xen/xen-pciback/conf_space_header.c
+++ b/drivers/xen/xen-pciback/conf_space_header.c
@@ -118,7 +118,7 @@ static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
cmd->val = value;
- if (!permissive && (!dev_data || !dev_data->permissive))
+ if (!xen_pcibk_permissive && (!dev_data || !dev_data->permissive))
return 0;
/* Only allow the guest to control certain bits. */
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 564b315..5390a67 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -57,6 +57,7 @@
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
+#include <xen/xen-ops.h>
#include <xen/page.h>
#include <xen/hvm.h>
@@ -735,6 +736,30 @@ static int __init xenstored_local_init(void)
return err;
}
+static int xenbus_resume_cb(struct notifier_block *nb,
+ unsigned long action, void *data)
+{
+ int err = 0;
+
+ if (xen_hvm_domain()) {
+ uint64_t v;
+
+ err = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN, &v);
+ if (!err && v)
+ xen_store_evtchn = v;
+ else
+ pr_warn("Cannot update xenstore event channel: %d\n",
+ err);
+ } else
+ xen_store_evtchn = xen_start_info->store_evtchn;
+
+ return err;
+}
+
+static struct notifier_block xenbus_resume_nb = {
+ .notifier_call = xenbus_resume_cb,
+};
+
static int __init xenbus_init(void)
{
int err = 0;
@@ -793,6 +818,10 @@ static int __init xenbus_init(void)
goto out_error;
}
+ if ((xen_store_domain_type != XS_LOCAL) &&
+ (xen_store_domain_type != XS_UNKNOWN))
+ xen_resume_notifier_register(&xenbus_resume_nb);
+
#ifdef CONFIG_XEN_COMPAT_XENFS
/*
* Create xenfs mountpoint in /proc for compatibility with
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index cde698a..a2ae427 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1802,6 +1802,8 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
+ BTRFS_I(inode)->last_trans = btrfs_stack_inode_transid(inode_item);
+
inode->i_version = btrfs_stack_inode_sequence(inode_item);
inode->i_rdev = 0;
*rdev = btrfs_stack_inode_rdev(inode_item);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 1eef4ee..0ec8e22 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3178,8 +3178,8 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
btrfs_mark_buffer_dirty(leaf);
- btrfs_release_path(path);
fail:
+ btrfs_release_path(path);
if (ret)
btrfs_abort_transaction(trans, root, ret);
return ret;
@@ -3305,8 +3305,7 @@ again:
spin_lock(&block_group->lock);
if (block_group->cached != BTRFS_CACHE_FINISHED ||
- !btrfs_test_opt(root, SPACE_CACHE) ||
- block_group->delalloc_bytes) {
+ !btrfs_test_opt(root, SPACE_CACHE)) {
/*
* don't bother trying to write stuff out _if_
* a) we're not cached,
@@ -3408,17 +3407,14 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
int loops = 0;
spin_lock(&cur_trans->dirty_bgs_lock);
- if (!list_empty(&cur_trans->dirty_bgs)) {
- list_splice_init(&cur_trans->dirty_bgs, &dirty);
+ if (list_empty(&cur_trans->dirty_bgs)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ return 0;
}
+ list_splice_init(&cur_trans->dirty_bgs, &dirty);
spin_unlock(&cur_trans->dirty_bgs_lock);
again:
- if (list_empty(&dirty)) {
- btrfs_free_path(path);
- return 0;
- }
-
/*
* make sure all the block groups on our dirty list actually
* exist
@@ -3431,18 +3427,16 @@ again:
return -ENOMEM;
}
+ /*
+ * cache_write_mutex is here only to save us from balance or automatic
+ * removal of empty block groups deleting this block group while we are
+ * writing out the cache
+ */
+ mutex_lock(&trans->transaction->cache_write_mutex);
while (!list_empty(&dirty)) {
cache = list_first_entry(&dirty,
struct btrfs_block_group_cache,
dirty_list);
-
- /*
- * cache_write_mutex is here only to save us from balance
- * deleting this block group while we are writing out the
- * cache
- */
- mutex_lock(&trans->transaction->cache_write_mutex);
-
/*
* this can happen if something re-dirties a block
* group that is already under IO. Just wait for it to
@@ -3495,7 +3489,6 @@ again:
}
if (!ret)
ret = write_one_cache_group(trans, root, path, cache);
- mutex_unlock(&trans->transaction->cache_write_mutex);
/* if its not on the io list, we need to put the block group */
if (should_put)
@@ -3503,7 +3496,16 @@ again:
if (ret)
break;
+
+ /*
+ * Avoid blocking other tasks for too long. It might even save
+ * us from writing caches for block groups that are going to be
+ * removed.
+ */
+ mutex_unlock(&trans->transaction->cache_write_mutex);
+ mutex_lock(&trans->transaction->cache_write_mutex);
}
+ mutex_unlock(&trans->transaction->cache_write_mutex);
/*
* go through delayed refs for all the stuff we've just kicked off
@@ -3514,8 +3516,15 @@ again:
loops++;
spin_lock(&cur_trans->dirty_bgs_lock);
list_splice_init(&cur_trans->dirty_bgs, &dirty);
+ /*
+ * dirty_bgs_lock protects us from concurrent block group
+ * deletes too (not just cache_write_mutex).
+ */
+ if (!list_empty(&dirty)) {
+ spin_unlock(&cur_trans->dirty_bgs_lock);
+ goto again;
+ }
spin_unlock(&cur_trans->dirty_bgs_lock);
- goto again;
}
btrfs_free_path(path);
@@ -7537,7 +7546,7 @@ static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
* returns the key for the extent through ins, and a tree buffer for
* the first block of the extent through buf.
*
- * returns the tree buffer or NULL.
+ * returns the tree buffer or an ERR_PTR on error.
*/
struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
@@ -7548,6 +7557,7 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
struct btrfs_key ins;
struct btrfs_block_rsv *block_rsv;
struct extent_buffer *buf;
+ struct btrfs_delayed_extent_op *extent_op;
u64 flags = 0;
int ret;
u32 blocksize = root->nodesize;
@@ -7568,13 +7578,14 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
ret = btrfs_reserve_extent(root, blocksize, blocksize,
empty_size, hint, &ins, 0, 0);
- if (ret) {
- unuse_block_rsv(root->fs_info, block_rsv, blocksize);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto out_unuse;
buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
- BUG_ON(IS_ERR(buf)); /* -ENOMEM */
+ if (IS_ERR(buf)) {
+ ret = PTR_ERR(buf);
+ goto out_free_reserved;
+ }
if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
if (parent == 0)
@@ -7584,9 +7595,11 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
BUG_ON(parent > 0);
if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
- struct btrfs_delayed_extent_op *extent_op;
extent_op = btrfs_alloc_delayed_extent_op();
- BUG_ON(!extent_op); /* -ENOMEM */
+ if (!extent_op) {
+ ret = -ENOMEM;
+ goto out_free_buf;
+ }
if (key)
memcpy(&extent_op->key, key, sizeof(extent_op->key));
else
@@ -7601,13 +7614,24 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
extent_op->level = level;
ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
- ins.objectid,
- ins.offset, parent, root_objectid,
- level, BTRFS_ADD_DELAYED_EXTENT,
- extent_op, 0);
- BUG_ON(ret); /* -ENOMEM */
+ ins.objectid, ins.offset,
+ parent, root_objectid, level,
+ BTRFS_ADD_DELAYED_EXTENT,
+ extent_op, 0);
+ if (ret)
+ goto out_free_delayed;
}
return buf;
+
+out_free_delayed:
+ btrfs_free_delayed_extent_op(extent_op);
+out_free_buf:
+ free_extent_buffer(buf);
+out_free_reserved:
+ btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
+out_unuse:
+ unuse_block_rsv(root->fs_info, block_rsv, blocksize);
+ return ERR_PTR(ret);
}
struct walk_control {
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 782f3bc..43af5a6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -4560,36 +4560,37 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb)
do {
index--;
page = eb->pages[index];
- if (page && mapped) {
+ if (!page)
+ continue;
+ if (mapped)
spin_lock(&page->mapping->private_lock);
+ /*
+ * We do this since we'll remove the pages after we've
+ * removed the eb from the radix tree, so we could race
+ * and have this page now attached to the new eb. So
+ * only clear page_private if it's still connected to
+ * this eb.
+ */
+ if (PagePrivate(page) &&
+ page->private == (unsigned long)eb) {
+ BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
+ BUG_ON(PageDirty(page));
+ BUG_ON(PageWriteback(page));
/*
- * We do this since we'll remove the pages after we've
- * removed the eb from the radix tree, so we could race
- * and have this page now attached to the new eb. So
- * only clear page_private if it's still connected to
- * this eb.
+ * We need to make sure we haven't be attached
+ * to a new eb.
*/
- if (PagePrivate(page) &&
- page->private == (unsigned long)eb) {
- BUG_ON(test_bit(EXTENT_BUFFER_DIRTY, &eb->bflags));
- BUG_ON(PageDirty(page));
- BUG_ON(PageWriteback(page));
- /*
- * We need to make sure we haven't be attached
- * to a new eb.
- */
- ClearPagePrivate(page);
- set_page_private(page, 0);
- /* One for the page private */
- page_cache_release(page);
- }
- spin_unlock(&page->mapping->private_lock);
-
- }
- if (page) {
- /* One for when we alloced the page */
+ ClearPagePrivate(page);
+ set_page_private(page, 0);
+ /* One for the page private */
page_cache_release(page);
}
+
+ if (mapped)
+ spin_unlock(&page->mapping->private_lock);
+
+ /* One for when we alloced the page */
+ page_cache_release(page);
} while (index != 0);
}
@@ -4870,6 +4871,7 @@ struct extent_buffer *alloc_extent_buffer(struct btrfs_fs_info *fs_info,
mark_extent_buffer_accessed(exists, p);
goto free_eb;
}
+ exists = NULL;
/*
* Do this so attach doesn't complain and we need to
@@ -4933,12 +4935,12 @@ again:
return eb;
free_eb:
+ WARN_ON(!atomic_dec_and_test(&eb->refs));
for (i = 0; i < num_pages; i++) {
if (eb->pages[i])
unlock_page(eb->pages[i]);
}
- WARN_ON(!atomic_dec_and_test(&eb->refs));
btrfs_release_extent_buffer(eb);
return exists;
}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 81fa75a..5e020d7 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -86,7 +86,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
mapping_set_gfp_mask(inode->i_mapping,
mapping_gfp_mask(inode->i_mapping) &
- ~(GFP_NOFS & ~__GFP_HIGHMEM));
+ ~(__GFP_FS | __GFP_HIGHMEM));
return inode;
}
@@ -1218,7 +1218,7 @@ out:
*
* This function writes out a free space cache struct to disk for quick recovery
* on mount. This will return 0 if it was successfull in writing the cache out,
- * and -1 if it was not.
+ * or an errno if it was not.
*/
static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
struct btrfs_free_space_ctl *ctl,
@@ -1235,12 +1235,12 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
int must_iput = 0;
if (!i_size_read(inode))
- return -1;
+ return -EIO;
WARN_ON(io_ctl->pages);
ret = io_ctl_init(io_ctl, inode, root, 1);
if (ret)
- return -1;
+ return ret;
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
down_write(&block_group->data_rwsem);
@@ -1258,7 +1258,9 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
}
/* Lock all pages first so we can lock the extent safely. */
- io_ctl_prepare_pages(io_ctl, inode, 0);
+ ret = io_ctl_prepare_pages(io_ctl, inode, 0);
+ if (ret)
+ goto out;
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ada4d24..8bb01367 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3632,25 +3632,28 @@ static void btrfs_read_locked_inode(struct inode *inode)
BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
BTRFS_I(inode)->last_trans = btrfs_inode_transid(leaf, inode_item);
+ inode->i_version = btrfs_inode_sequence(leaf, inode_item);
+ inode->i_generation = BTRFS_I(inode)->generation;
+ inode->i_rdev = 0;
+ rdev = btrfs_inode_rdev(leaf, inode_item);
+
+ BTRFS_I(inode)->index_cnt = (u64)-1;
+ BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
+
+cache_index:
/*
* If we were modified in the current generation and evicted from memory
* and then re-read we need to do a full sync since we don't have any
* idea about which extents were modified before we were evicted from
* cache.
+ *
+ * This is required for both inode re-read from disk and delayed inode
+ * in delayed_nodes_tree.
*/
if (BTRFS_I(inode)->last_trans == root->fs_info->generation)
set_bit(BTRFS_INODE_NEEDS_FULL_SYNC,
&BTRFS_I(inode)->runtime_flags);
- inode->i_version = btrfs_inode_sequence(leaf, inode_item);
- inode->i_generation = BTRFS_I(inode)->generation;
- inode->i_rdev = 0;
- rdev = btrfs_inode_rdev(leaf, inode_item);
-
- BTRFS_I(inode)->index_cnt = (u64)-1;
- BTRFS_I(inode)->flags = btrfs_inode_flags(leaf, inode_item);
-
-cache_index:
path->slots[0]++;
if (inode->i_nlink != 1 ||
path->slots[0] >= btrfs_header_nritems(leaf))
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index b05653f..1c22c65 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2410,7 +2410,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file,
"Attempt to delete subvolume %llu during send",
dest->root_key.objectid);
err = -EPERM;
- goto out_dput;
+ goto out_unlock_inode;
}
d_invalidate(dentry);
@@ -2505,6 +2505,7 @@ out_up_write:
root_flags & ~BTRFS_ROOT_SUBVOL_DEAD);
spin_unlock(&dest->root_item_lock);
}
+out_unlock_inode:
mutex_unlock(&inode->i_mutex);
if (!err) {
shrink_dcache_sb(root->fs_info->sb);
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 8bcd2a0..96aebf3 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -1058,6 +1058,7 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
struct extent_map *em;
struct list_head *search_list = &trans->transaction->pending_chunks;
int ret = 0;
+ u64 physical_start = *start;
again:
list_for_each_entry(em, search_list, list) {
@@ -1068,9 +1069,9 @@ again:
for (i = 0; i < map->num_stripes; i++) {
if (map->stripes[i].dev != device)
continue;
- if (map->stripes[i].physical >= *start + len ||
+ if (map->stripes[i].physical >= physical_start + len ||
map->stripes[i].physical + em->orig_block_len <=
- *start)
+ physical_start)
continue;
*start = map->stripes[i].physical +
em->orig_block_len;
@@ -1193,8 +1194,14 @@ again:
*/
if (contains_pending_extent(trans, device,
&search_start,
- hole_size))
- hole_size = 0;
+ hole_size)) {
+ if (key.offset >= search_start) {
+ hole_size = key.offset - search_start;
+ } else {
+ WARN_ON_ONCE(1);
+ hole_size = 0;
+ }
+ }
if (hole_size > max_hole_size) {
max_hole_start = search_start;
diff --git a/fs/configfs/mount.c b/fs/configfs/mount.c
index da94e41..5373567 100644
--- a/fs/configfs/mount.c
+++ b/fs/configfs/mount.c
@@ -173,5 +173,5 @@ MODULE_LICENSE("GPL");
MODULE_VERSION("0.0.2");
MODULE_DESCRIPTION("Simple RAM filesystem for user driven kernel subsystem configuration.");
-module_init(configfs_init);
+core_initcall(configfs_init);
module_exit(configfs_exit);
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index 59fedbc..86a2121 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -121,7 +121,7 @@ static int efivarfs_callback(efi_char16_t *name16, efi_guid_t vendor,
int len, i;
int err = -ENOMEM;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
return err;
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 18228c2..024f228 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -64,8 +64,8 @@ config EXT4_FS_SECURITY
If you are not using a security module that requires using
extended attributes for file security labels, say N.
-config EXT4_FS_ENCRYPTION
- bool "Ext4 Encryption"
+config EXT4_ENCRYPTION
+ tristate "Ext4 Encryption"
depends on EXT4_FS
select CRYPTO_AES
select CRYPTO_CBC
@@ -81,6 +81,11 @@ config EXT4_FS_ENCRYPTION
efficient since it avoids caching the encrypted and
decrypted pages in the page cache.
+config EXT4_FS_ENCRYPTION
+ bool
+ default y
+ depends on EXT4_ENCRYPTION
+
config EXT4_DEBUG
bool "EXT4 debugging support"
depends on EXT4_FS
diff --git a/fs/ext4/crypto_fname.c b/fs/ext4/crypto_fname.c
index ca2f594..fded02f 100644
--- a/fs/ext4/crypto_fname.c
+++ b/fs/ext4/crypto_fname.c
@@ -66,6 +66,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
int res = 0;
char iv[EXT4_CRYPTO_BLOCK_SIZE];
struct scatterlist sg[1];
+ int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
char *workbuf;
if (iname->len <= 0 || iname->len > ctx->lim)
@@ -73,6 +74,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
ciphertext_len = (iname->len < EXT4_CRYPTO_BLOCK_SIZE) ?
EXT4_CRYPTO_BLOCK_SIZE : iname->len;
+ ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
ciphertext_len = (ciphertext_len > ctx->lim)
? ctx->lim : ciphertext_len;
@@ -101,7 +103,7 @@ static int ext4_fname_encrypt(struct ext4_fname_crypto_ctx *ctx,
/* Create encryption request */
sg_init_table(sg, 1);
sg_set_page(sg, ctx->workpage, PAGE_SIZE, 0);
- ablkcipher_request_set_crypt(req, sg, sg, iname->len, iv);
+ ablkcipher_request_set_crypt(req, sg, sg, ciphertext_len, iv);
res = crypto_ablkcipher_encrypt(req);
if (res == -EINPROGRESS || res == -EBUSY) {
BUG_ON(req->base.data != &ecr);
@@ -198,106 +200,57 @@ static int ext4_fname_decrypt(struct ext4_fname_crypto_ctx *ctx,
return oname->len;
}
+static const char *lookup_table =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+,";
+
/**
* ext4_fname_encode_digest() -
*
* Encodes the input digest using characters from the set [a-zA-Z0-9_+].
* The encoded string is roughly 4/3 times the size of the input string.
*/
-int ext4_fname_encode_digest(char *dst, char *src, u32 len)
+static int digest_encode(const char *src, int len, char *dst)
{
- static const char *lookup_table =
- "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_+";
- u32 current_chunk, num_chunks, i;
- char tmp_buf[3];
- u32 c0, c1, c2, c3;
-
- current_chunk = 0;
- num_chunks = len/3;
- for (i = 0; i < num_chunks; i++) {
- c0 = src[3*i] & 0x3f;
- c1 = (((src[3*i]>>6)&0x3) | ((src[3*i+1] & 0xf)<<2)) & 0x3f;
- c2 = (((src[3*i+1]>>4)&0xf) | ((src[3*i+2] & 0x3)<<4)) & 0x3f;
- c3 = (src[3*i+2]>>2) & 0x3f;
- dst[4*i] = lookup_table[c0];
- dst[4*i+1] = lookup_table[c1];
- dst[4*i+2] = lookup_table[c2];
- dst[4*i+3] = lookup_table[c3];
- }
- if (i*3 < len) {
- memset(tmp_buf, 0, 3);
- memcpy(tmp_buf, &src[3*i], len-3*i);
- c0 = tmp_buf[0] & 0x3f;
- c1 = (((tmp_buf[0]>>6)&0x3) | ((tmp_buf[1] & 0xf)<<2)) & 0x3f;
- c2 = (((tmp_buf[1]>>4)&0xf) | ((tmp_buf[2] & 0x3)<<4)) & 0x3f;
- c3 = (tmp_buf[2]>>2) & 0x3f;
- dst[4*i] = lookup_table[c0];
- dst[4*i+1] = lookup_table[c1];
- dst[4*i+2] = lookup_table[c2];
- dst[4*i+3] = lookup_table[c3];
+ int i = 0, bits = 0, ac = 0;
+ char *cp = dst;
+
+ while (i < len) {
+ ac += (((unsigned char) src[i]) << bits);
+ bits += 8;
+ do {
+ *cp++ = lookup_table[ac & 0x3f];
+ ac >>= 6;
+ bits -= 6;
+ } while (bits >= 6);
i++;
}
- return (i * 4);
+ if (bits)
+ *cp++ = lookup_table[ac & 0x3f];
+ return cp - dst;
}
-/**
- * ext4_fname_hash() -
- *
- * This function computes the hash of the input filename, and sets the output
- * buffer to the *encoded* digest. It returns the length of the digest as its
- * return value. Errors are returned as negative numbers. We trust the caller
- * to allocate sufficient memory to oname string.
- */
-static int ext4_fname_hash(struct ext4_fname_crypto_ctx *ctx,
- const struct ext4_str *iname,
- struct ext4_str *oname)
+static int digest_decode(const char *src, int len, char *dst)
{
- struct scatterlist sg;
- struct hash_desc desc = {
- .tfm = (struct crypto_hash *)ctx->htfm,
- .flags = CRYPTO_TFM_REQ_MAY_SLEEP
- };
- int res = 0;
-
- if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
- res = ext4_fname_encode_digest(oname->name, iname->name,
- iname->len);
- oname->len = res;
- return res;
- }
-
- sg_init_one(&sg, iname->name, iname->len);
- res = crypto_hash_init(&desc);
- if (res) {
- printk(KERN_ERR
- "%s: Error initializing crypto hash; res = [%d]\n",
- __func__, res);
- goto out;
- }
- res = crypto_hash_update(&desc, &sg, iname->len);
- if (res) {
- printk(KERN_ERR
- "%s: Error updating crypto hash; res = [%d]\n",
- __func__, res);
- goto out;
- }
- res = crypto_hash_final(&desc,
- &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE]);
- if (res) {
- printk(KERN_ERR
- "%s: Error finalizing crypto hash; res = [%d]\n",
- __func__, res);
- goto out;
+ int i = 0, bits = 0, ac = 0;
+ const char *p;
+ char *cp = dst;
+
+ while (i < len) {
+ p = strchr(lookup_table, src[i]);
+ if (p == NULL || src[i] == 0)
+ return -2;
+ ac += (p - lookup_table) << bits;
+ bits += 6;
+ if (bits >= 8) {
+ *cp++ = ac & 0xff;
+ ac >>= 8;
+ bits -= 8;
+ }
+ i++;
}
- /* Encode the digest as a printable string--this will increase the
- * size of the digest */
- oname->name[0] = 'I';
- res = ext4_fname_encode_digest(oname->name+1,
- &oname->name[EXT4_FNAME_CRYPTO_DIGEST_SIZE],
- EXT4_FNAME_CRYPTO_DIGEST_SIZE) + 1;
- oname->len = res;
-out:
- return res;
+ if (ac)
+ return -1;
+ return cp - dst;
}
/**
@@ -405,6 +358,7 @@ struct ext4_fname_crypto_ctx *ext4_get_fname_crypto_ctx(
if (IS_ERR(ctx))
return ctx;
+ ctx->flags = ei->i_crypt_policy_flags;
if (ctx->has_valid_key) {
if (ctx->key.mode != EXT4_ENCRYPTION_MODE_AES_256_CTS) {
printk_once(KERN_WARNING
@@ -517,6 +471,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
u32 namelen)
{
u32 ciphertext_len;
+ int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
if (ctx == NULL)
return -EIO;
@@ -524,6 +479,7 @@ int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
return -EACCES;
ciphertext_len = (namelen < EXT4_CRYPTO_BLOCK_SIZE) ?
EXT4_CRYPTO_BLOCK_SIZE : namelen;
+ ciphertext_len = ext4_fname_crypto_round_up(ciphertext_len, padding);
ciphertext_len = (ciphertext_len > ctx->lim)
? ctx->lim : ciphertext_len;
return (int) ciphertext_len;
@@ -539,10 +495,13 @@ int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
u32 ilen, struct ext4_str *crypto_str)
{
unsigned int olen;
+ int padding = 4 << (ctx->flags & EXT4_POLICY_FLAGS_PAD_MASK);
if (!ctx)
return -EIO;
- olen = ext4_fname_crypto_round_up(ilen, EXT4_CRYPTO_BLOCK_SIZE);
+ if (padding < EXT4_CRYPTO_BLOCK_SIZE)
+ padding = EXT4_CRYPTO_BLOCK_SIZE;
+ olen = ext4_fname_crypto_round_up(ilen, padding);
crypto_str->len = olen;
if (olen < EXT4_FNAME_CRYPTO_DIGEST_SIZE*2)
olen = EXT4_FNAME_CRYPTO_DIGEST_SIZE*2;
@@ -571,9 +530,13 @@ void ext4_fname_crypto_free_buffer(struct ext4_str *crypto_str)
* ext4_fname_disk_to_usr() - converts a filename from disk space to user space
*/
int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
- const struct ext4_str *iname,
- struct ext4_str *oname)
+ struct dx_hash_info *hinfo,
+ const struct ext4_str *iname,
+ struct ext4_str *oname)
{
+ char buf[24];
+ int ret;
+
if (ctx == NULL)
return -EIO;
if (iname->len < 3) {
@@ -587,18 +550,33 @@ int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
}
if (ctx->has_valid_key)
return ext4_fname_decrypt(ctx, iname, oname);
- else
- return ext4_fname_hash(ctx, iname, oname);
+
+ if (iname->len <= EXT4_FNAME_CRYPTO_DIGEST_SIZE) {
+ ret = digest_encode(iname->name, iname->len, oname->name);
+ oname->len = ret;
+ return ret;
+ }
+ if (hinfo) {
+ memcpy(buf, &hinfo->hash, 4);
+ memcpy(buf+4, &hinfo->minor_hash, 4);
+ } else
+ memset(buf, 0, 8);
+ memcpy(buf + 8, iname->name + iname->len - 16, 16);
+ oname->name[0] = '_';
+ ret = digest_encode(buf, 24, oname->name+1);
+ oname->len = ret + 1;
+ return ret + 1;
}
int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
const struct ext4_dir_entry_2 *de,
struct ext4_str *oname)
{
struct ext4_str iname = {.name = (unsigned char *) de->name,
.len = de->name_len };
- return _ext4_fname_disk_to_usr(ctx, &iname, oname);
+ return _ext4_fname_disk_to_usr(ctx, hinfo, &iname, oname);
}
@@ -640,10 +618,11 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct dx_hash_info *hinfo)
{
- struct ext4_str tmp, tmp2;
+ struct ext4_str tmp;
int ret = 0;
+ char buf[EXT4_FNAME_CRYPTO_DIGEST_SIZE+1];
- if (!ctx || !ctx->has_valid_key ||
+ if (!ctx ||
((iname->name[0] == '.') &&
((iname->len == 1) ||
((iname->name[1] == '.') && (iname->len == 2))))) {
@@ -651,59 +630,90 @@ int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
return 0;
}
+ if (!ctx->has_valid_key && iname->name[0] == '_') {
+ if (iname->len != 33)
+ return -ENOENT;
+ ret = digest_decode(iname->name+1, iname->len, buf);
+ if (ret != 24)
+ return -ENOENT;
+ memcpy(&hinfo->hash, buf, 4);
+ memcpy(&hinfo->minor_hash, buf + 4, 4);
+ return 0;
+ }
+
+ if (!ctx->has_valid_key && iname->name[0] != '_') {
+ if (iname->len > 43)
+ return -ENOENT;
+ ret = digest_decode(iname->name, iname->len, buf);
+ ext4fs_dirhash(buf, ret, hinfo);
+ return 0;
+ }
+
/* First encrypt the plaintext name */
ret = ext4_fname_crypto_alloc_buffer(ctx, iname->len, &tmp);
if (ret < 0)
return ret;
ret = ext4_fname_encrypt(ctx, iname, &tmp);
- if (ret < 0)
- goto out;
-
- tmp2.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
- tmp2.name = kmalloc(tmp2.len + 1, GFP_KERNEL);
- if (tmp2.name == NULL) {
- ret = -ENOMEM;
- goto out;
+ if (ret >= 0) {
+ ext4fs_dirhash(tmp.name, tmp.len, hinfo);
+ ret = 0;
}
- ret = ext4_fname_hash(ctx, &tmp, &tmp2);
- if (ret > 0)
- ext4fs_dirhash(tmp2.name, tmp2.len, hinfo);
- ext4_fname_crypto_free_buffer(&tmp2);
-out:
ext4_fname_crypto_free_buffer(&tmp);
return ret;
}
-/**
- * ext4_fname_disk_to_htree() - converts a filename from disk space to htree-access string
- */
-int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
- const struct ext4_dir_entry_2 *de,
- struct dx_hash_info *hinfo)
+int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
+ int len, const char * const name,
+ struct ext4_dir_entry_2 *de)
{
- struct ext4_str iname = {.name = (unsigned char *) de->name,
- .len = de->name_len};
- struct ext4_str tmp;
- int ret;
+ int ret = -ENOENT;
+ int bigname = (*name == '_');
- if (!ctx ||
- ((iname.name[0] == '.') &&
- ((iname.len == 1) ||
- ((iname.name[1] == '.') && (iname.len == 2))))) {
- ext4fs_dirhash(iname.name, iname.len, hinfo);
- return 0;
+ if (ctx->has_valid_key) {
+ if (cstr->name == NULL) {
+ struct qstr istr;
+
+ ret = ext4_fname_crypto_alloc_buffer(ctx, len, cstr);
+ if (ret < 0)
+ goto errout;
+ istr.name = name;
+ istr.len = len;
+ ret = ext4_fname_encrypt(ctx, &istr, cstr);
+ if (ret < 0)
+ goto errout;
+ }
+ } else {
+ if (cstr->name == NULL) {
+ cstr->name = kmalloc(32, GFP_KERNEL);
+ if (cstr->name == NULL)
+ return -ENOMEM;
+ if ((bigname && (len != 33)) ||
+ (!bigname && (len > 43)))
+ goto errout;
+ ret = digest_decode(name+bigname, len-bigname,
+ cstr->name);
+ if (ret < 0) {
+ ret = -ENOENT;
+ goto errout;
+ }
+ cstr->len = ret;
+ }
+ if (bigname) {
+ if (de->name_len < 16)
+ return 0;
+ ret = memcmp(de->name + de->name_len - 16,
+ cstr->name + 8, 16);
+ return (ret == 0) ? 1 : 0;
+ }
}
-
- tmp.len = (4 * ((EXT4_FNAME_CRYPTO_DIGEST_SIZE + 2) / 3)) + 1;
- tmp.name = kmalloc(tmp.len + 1, GFP_KERNEL);
- if (tmp.name == NULL)
- return -ENOMEM;
-
- ret = ext4_fname_hash(ctx, &iname, &tmp);
- if (ret > 0)
- ext4fs_dirhash(tmp.name, tmp.len, hinfo);
- ext4_fname_crypto_free_buffer(&tmp);
+ if (de->name_len != cstr->len)
+ return 0;
+ ret = memcmp(de->name, cstr->name, cstr->len);
+ return (ret == 0) ? 1 : 0;
+errout:
+ kfree(cstr->name);
+ cstr->name = NULL;
return ret;
}
diff --git a/fs/ext4/crypto_key.c b/fs/ext4/crypto_key.c
index c8392af..52170d0 100644
--- a/fs/ext4/crypto_key.c
+++ b/fs/ext4/crypto_key.c
@@ -110,6 +110,7 @@ int ext4_generate_encryption_key(struct inode *inode)
}
res = 0;
+ ei->i_crypt_policy_flags = ctx.flags;
if (S_ISREG(inode->i_mode))
crypt_key->mode = ctx.contents_encryption_mode;
else if (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode))
diff --git a/fs/ext4/crypto_policy.c b/fs/ext4/crypto_policy.c
index 30eaf9e..a6d6291 100644
--- a/fs/ext4/crypto_policy.c
+++ b/fs/ext4/crypto_policy.c
@@ -37,6 +37,8 @@ static int ext4_is_encryption_context_consistent_with_policy(
return 0;
return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor,
EXT4_KEY_DESCRIPTOR_SIZE) == 0 &&
+ (ctx.flags ==
+ policy->flags) &&
(ctx.contents_encryption_mode ==
policy->contents_encryption_mode) &&
(ctx.filenames_encryption_mode ==
@@ -56,25 +58,25 @@ static int ext4_create_encryption_context_from_policy(
printk(KERN_WARNING
"%s: Invalid contents encryption mode %d\n", __func__,
policy->contents_encryption_mode);
- res = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) {
printk(KERN_WARNING
"%s: Invalid filenames encryption mode %d\n", __func__,
policy->filenames_encryption_mode);
- res = -EINVAL;
- goto out;
+ return -EINVAL;
}
+ if (policy->flags & ~EXT4_POLICY_FLAGS_VALID)
+ return -EINVAL;
ctx.contents_encryption_mode = policy->contents_encryption_mode;
ctx.filenames_encryption_mode = policy->filenames_encryption_mode;
+ ctx.flags = policy->flags;
BUILD_BUG_ON(sizeof(ctx.nonce) != EXT4_KEY_DERIVATION_NONCE_SIZE);
get_random_bytes(ctx.nonce, EXT4_KEY_DERIVATION_NONCE_SIZE);
res = ext4_xattr_set(inode, EXT4_XATTR_INDEX_ENCRYPTION,
EXT4_XATTR_NAME_ENCRYPTION_CONTEXT, &ctx,
sizeof(ctx), 0);
-out:
if (!res)
ext4_set_inode_flag(inode, EXT4_INODE_ENCRYPT);
return res;
@@ -115,6 +117,7 @@ int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy)
policy->version = 0;
policy->contents_encryption_mode = ctx.contents_encryption_mode;
policy->filenames_encryption_mode = ctx.filenames_encryption_mode;
+ policy->flags = ctx.flags;
memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor,
EXT4_KEY_DESCRIPTOR_SIZE);
return 0;
@@ -176,6 +179,7 @@ int ext4_inherit_context(struct inode *parent, struct inode *child)
EXT4_ENCRYPTION_MODE_AES_256_XTS;
ctx.filenames_encryption_mode =
EXT4_ENCRYPTION_MODE_AES_256_CTS;
+ ctx.flags = 0;
memset(ctx.master_key_descriptor, 0x42,
EXT4_KEY_DESCRIPTOR_SIZE);
res = 0;
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 61db51a5..5665d82 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -249,7 +249,7 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
} else {
/* Directory is encrypted */
err = ext4_fname_disk_to_usr(enc_ctx,
- de, &fname_crypto_str);
+ NULL, de, &fname_crypto_str);
if (err < 0)
goto errout;
if (!dir_emit(ctx,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index ef267ad..009a059 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -911,6 +911,7 @@ struct ext4_inode_info {
/* on-disk additional length */
__u16 i_extra_isize;
+ char i_crypt_policy_flags;
/* Indicate the inline data space. */
u16 i_inline_off;
@@ -1066,12 +1067,6 @@ extern void ext4_set_bits(void *bm, int cur, int len);
/* Metadata checksum algorithm codes */
#define EXT4_CRC32C_CHKSUM 1
-/* Encryption algorithms */
-#define EXT4_ENCRYPTION_MODE_INVALID 0
-#define EXT4_ENCRYPTION_MODE_AES_256_XTS 1
-#define EXT4_ENCRYPTION_MODE_AES_256_GCM 2
-#define EXT4_ENCRYPTION_MODE_AES_256_CBC 3
-
/*
* Structure of the super block
*/
@@ -2093,9 +2088,11 @@ u32 ext4_fname_crypto_round_up(u32 size, u32 blksize);
int ext4_fname_crypto_alloc_buffer(struct ext4_fname_crypto_ctx *ctx,
u32 ilen, struct ext4_str *crypto_str);
int _ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
const struct ext4_str *iname,
struct ext4_str *oname);
int ext4_fname_disk_to_usr(struct ext4_fname_crypto_ctx *ctx,
+ struct dx_hash_info *hinfo,
const struct ext4_dir_entry_2 *de,
struct ext4_str *oname);
int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
@@ -2104,11 +2101,12 @@ int ext4_fname_usr_to_disk(struct ext4_fname_crypto_ctx *ctx,
int ext4_fname_usr_to_hash(struct ext4_fname_crypto_ctx *ctx,
const struct qstr *iname,
struct dx_hash_info *hinfo);
-int ext4_fname_disk_to_hash(struct ext4_fname_crypto_ctx *ctx,
- const struct ext4_dir_entry_2 *de,
- struct dx_hash_info *hinfo);
int ext4_fname_crypto_namelen_on_disk(struct ext4_fname_crypto_ctx *ctx,
u32 namelen);
+int ext4_fname_match(struct ext4_fname_crypto_ctx *ctx, struct ext4_str *cstr,
+ int len, const char * const name,
+ struct ext4_dir_entry_2 *de);
+
#ifdef CONFIG_EXT4_FS_ENCRYPTION
void ext4_put_fname_crypto_ctx(struct ext4_fname_crypto_ctx **ctx);
diff --git a/fs/ext4/ext4_crypto.h b/fs/ext4/ext4_crypto.h
index c2ba35a..d75159c 100644
--- a/fs/ext4/ext4_crypto.h
+++ b/fs/ext4/ext4_crypto.h
@@ -20,12 +20,20 @@ struct ext4_encryption_policy {
char version;
char contents_encryption_mode;
char filenames_encryption_mode;
+ char flags;
char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
} __attribute__((__packed__));
#define EXT4_ENCRYPTION_CONTEXT_FORMAT_V1 1
#define EXT4_KEY_DERIVATION_NONCE_SIZE 16
+#define EXT4_POLICY_FLAGS_PAD_4 0x00
+#define EXT4_POLICY_FLAGS_PAD_8 0x01
+#define EXT4_POLICY_FLAGS_PAD_16 0x02
+#define EXT4_POLICY_FLAGS_PAD_32 0x03
+#define EXT4_POLICY_FLAGS_PAD_MASK 0x03
+#define EXT4_POLICY_FLAGS_VALID 0x03
+
/**
* Encryption context for inode
*
@@ -41,7 +49,7 @@ struct ext4_encryption_context {
char format;
char contents_encryption_mode;
char filenames_encryption_mode;
- char reserved;
+ char flags;
char master_key_descriptor[EXT4_KEY_DESCRIPTOR_SIZE];
char nonce[EXT4_KEY_DERIVATION_NONCE_SIZE];
} __attribute__((__packed__));
@@ -120,6 +128,7 @@ struct ext4_fname_crypto_ctx {
struct crypto_hash *htfm;
struct page *workpage;
struct ext4_encryption_key key;
+ unsigned flags : 8;
unsigned has_valid_key : 1;
unsigned ctfm_key_is_ready : 1;
};
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 973816b..d74e0802 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -4927,13 +4927,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
if (ret)
return ret;
- /*
- * currently supporting (pre)allocate mode for extent-based
- * files _only_
- */
- if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
- return -EOPNOTSUPP;
-
if (mode & FALLOC_FL_COLLAPSE_RANGE)
return ext4_collapse_range(inode, offset, len);
@@ -4955,6 +4948,14 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len)
mutex_lock(&inode->i_mutex);
+ /*
+ * We only support preallocation for extent-based files only
+ */
+ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
+ ret = -EOPNOTSUPP;
+ goto out;
+ }
+
if (!(mode & FALLOC_FL_KEEP_SIZE) &&
offset + len > i_size_read(inode)) {
new_size = offset + len;
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index d33d5a68..26724ae 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -703,6 +703,14 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
BUG_ON(end < lblk);
+ if ((status & EXTENT_STATUS_DELAYED) &&
+ (status & EXTENT_STATUS_WRITTEN)) {
+ ext4_warning(inode->i_sb, "Inserting extent [%u/%u] as "
+ " delayed and written which can potentially "
+ " cause data loss.\n", lblk, len);
+ WARN_ON(1);
+ }
+
newes.es_lblk = lblk;
newes.es_len = len;
ext4_es_store_pblock_status(&newes, pblk, status);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index cbd0654..55b187c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -531,6 +531,7 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+ !(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
@@ -635,6 +636,7 @@ found:
status = map->m_flags & EXT4_MAP_UNWRITTEN ?
EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
+ !(status & EXTENT_STATUS_WRITTEN) &&
ext4_find_delalloc_range(inode, map->m_lblk,
map->m_lblk + map->m_len - 1))
status |= EXTENT_STATUS_DELAYED;
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 7223b0b..814f3beb 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -640,7 +640,7 @@ static struct stats dx_show_leaf(struct inode *dir,
ext4_put_fname_crypto_ctx(&ctx);
ctx = NULL;
}
- res = ext4_fname_disk_to_usr(ctx, de,
+ res = ext4_fname_disk_to_usr(ctx, NULL, de,
&fname_crypto_str);
if (res < 0) {
printk(KERN_WARNING "Error "
@@ -653,15 +653,8 @@ static struct stats dx_show_leaf(struct inode *dir,
name = fname_crypto_str.name;
len = fname_crypto_str.len;
}
- res = ext4_fname_disk_to_hash(ctx, de,
- &h);
- if (res < 0) {
- printk(KERN_WARNING "Error "
- "converting filename "
- "from disk to htree"
- "\n");
- h.hash = 0xDEADBEEF;
- }
+ ext4fs_dirhash(de->name, de->name_len,
+ &h);
printk("%*.s:(E)%x.%u ", len, name,
h.hash, (unsigned) ((char *) de
- base));
@@ -1008,15 +1001,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
/* silently ignore the rest of the block */
break;
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- err = ext4_fname_disk_to_hash(ctx, de, hinfo);
- if (err < 0) {
- count = err;
- goto errout;
- }
-#else
ext4fs_dirhash(de->name, de->name_len, hinfo);
-#endif
if ((hinfo->hash < start_hash) ||
((hinfo->hash == start_hash) &&
(hinfo->minor_hash < start_minor_hash)))
@@ -1032,7 +1017,7 @@ static int htree_dirblock_to_tree(struct file *dir_file,
&tmp_str);
} else {
/* Directory is encrypted */
- err = ext4_fname_disk_to_usr(ctx, de,
+ err = ext4_fname_disk_to_usr(ctx, hinfo, de,
&fname_crypto_str);
if (err < 0) {
count = err;
@@ -1193,26 +1178,10 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
int count = 0;
char *base = (char *) de;
struct dx_hash_info h = *hinfo;
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- struct ext4_fname_crypto_ctx *ctx = NULL;
- int err;
-
- ctx = ext4_get_fname_crypto_ctx(dir, EXT4_NAME_LEN);
- if (IS_ERR(ctx))
- return PTR_ERR(ctx);
-#endif
while ((char *) de < base + blocksize) {
if (de->name_len && de->inode) {
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- err = ext4_fname_disk_to_hash(ctx, de, &h);
- if (err < 0) {
- ext4_put_fname_crypto_ctx(&ctx);
- return err;
- }
-#else
ext4fs_dirhash(de->name, de->name_len, &h);
-#endif
map_tail--;
map_tail->hash = h.hash;
map_tail->offs = ((char *) de - base)>>2;
@@ -1223,9 +1192,6 @@ static int dx_make_map(struct inode *dir, struct ext4_dir_entry_2 *de,
/* XXX: do we need to check rec_len == 0 case? -Chris */
de = ext4_next_entry(de, blocksize);
}
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
- ext4_put_fname_crypto_ctx(&ctx);
-#endif
return count;
}
@@ -1287,16 +1253,8 @@ static inline int ext4_match(struct ext4_fname_crypto_ctx *ctx,
return 0;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
- if (ctx) {
- /* Directory is encrypted */
- res = ext4_fname_disk_to_usr(ctx, de, fname_crypto_str);
- if (res < 0)
- return res;
- if (len != res)
- return 0;
- res = memcmp(name, fname_crypto_str->name, len);
- return (res == 0) ? 1 : 0;
- }
+ if (ctx)
+ return ext4_fname_match(ctx, fname_crypto_str, len, name, de);
#endif
if (len != de->name_len)
return 0;
@@ -1324,16 +1282,6 @@ int search_dir(struct buffer_head *bh, char *search_buf, int buf_size,
if (IS_ERR(ctx))
return -1;
- if (ctx != NULL) {
- /* Allocate buffer to hold maximum name length */
- res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
- &fname_crypto_str);
- if (res < 0) {
- ext4_put_fname_crypto_ctx(&ctx);
- return -1;
- }
- }
-
de = (struct ext4_dir_entry_2 *)search_buf;
dlimit = search_buf + buf_size;
while ((char *) de < dlimit) {
@@ -1872,14 +1820,6 @@ int ext4_find_dest_de(struct inode *dir, struct inode *inode,
return res;
}
reclen = EXT4_DIR_REC_LEN(res);
-
- /* Allocate buffer to hold maximum name length */
- res = ext4_fname_crypto_alloc_buffer(ctx, EXT4_NAME_LEN,
- &fname_crypto_str);
- if (res < 0) {
- ext4_put_fname_crypto_ctx(&ctx);
- return -1;
- }
}
de = (struct ext4_dir_entry_2 *)buf;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 8a8ec62..cf0c472 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1432,12 +1432,15 @@ static int ext4_flex_group_add(struct super_block *sb,
goto exit;
/*
* We will always be modifying at least the superblock and GDT
- * block. If we are adding a group past the last current GDT block,
+ * blocks. If we are adding a group past the last current GDT block,
* we will also modify the inode and the dindirect block. If we
* are adding a group with superblock/GDT backups we will also
* modify each of the reserved GDT dindirect blocks.
*/
- credit = flex_gd->count * 4 + reserved_gdb;
+ credit = 3; /* sb, resize inode, resize inode dindirect */
+ /* GDT blocks */
+ credit += 1 + DIV_ROUND_UP(flex_gd->count, EXT4_DESC_PER_BLOCK(sb));
+ credit += reserved_gdb; /* Reserved GDT dindirect blocks */
handle = ext4_journal_start_sb(sb, EXT4_HT_RESIZE, credit);
if (IS_ERR(handle)) {
err = PTR_ERR(handle);
diff --git a/fs/ext4/symlink.c b/fs/ext4/symlink.c
index 19f78f2..187b789 100644
--- a/fs/ext4/symlink.c
+++ b/fs/ext4/symlink.c
@@ -74,7 +74,7 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
goto errout;
}
pstr.name = paddr;
- res = _ext4_fname_disk_to_usr(ctx, &cstr, &pstr);
+ res = _ext4_fname_disk_to_usr(ctx, NULL, &cstr, &pstr);
if (res < 0)
goto errout;
/* Null-terminate the name */
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index b91b0e1..1e1aae6 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1513,6 +1513,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
{
struct inode *inode = mapping->host;
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ bool locked = false;
int ret;
long diff;
@@ -1533,7 +1534,13 @@ static int f2fs_write_data_pages(struct address_space *mapping,
diff = nr_pages_to_write(sbi, DATA, wbc);
+ if (!S_ISDIR(inode->i_mode)) {
+ mutex_lock(&sbi->writepages);
+ locked = true;
+ }
ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+ if (locked)
+ mutex_unlock(&sbi->writepages);
f2fs_submit_merged_bio(sbi, DATA, WRITE);
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index d8921cf..8de34ab 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -625,6 +625,7 @@ struct f2fs_sb_info {
struct mutex cp_mutex; /* checkpoint procedure lock */
struct rw_semaphore cp_rwsem; /* blocking FS operations */
struct rw_semaphore node_write; /* locking node writes */
+ struct mutex writepages; /* mutex for writepages() */
wait_queue_head_t cp_wait;
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 7e3794e..658e807 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -298,16 +298,14 @@ fail:
static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct page *page;
+ struct page *page = page_follow_link_light(dentry, nd);
- page = page_follow_link_light(dentry, nd);
- if (IS_ERR(page))
+ if (IS_ERR_OR_NULL(page))
return page;
/* this is broken symlink case */
if (*nd_get_link(nd) == 0) {
- kunmap(page);
- page_cache_release(page);
+ page_put_link(dentry, nd, page);
return ERR_PTR(-ENOENT);
}
return page;
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 160b883..b2dd1b0 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -1035,6 +1035,7 @@ try_onemore:
sbi->raw_super = raw_super;
sbi->raw_super_buf = raw_super_buf;
mutex_init(&sbi->gc_mutex);
+ mutex_init(&sbi->writepages);
mutex_init(&sbi->cp_mutex);
init_rwsem(&sbi->node_write);
clear_sbi_flag(sbi, SBI_POR_DOING);
diff --git a/fs/namei.c b/fs/namei.c
index 4a8d998b..fe30d3b 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1415,6 +1415,7 @@ static int lookup_fast(struct nameidata *nd,
*/
if (nd->flags & LOOKUP_RCU) {
unsigned seq;
+ bool negative;
dentry = __d_lookup_rcu(parent, &nd->last, &seq);
if (!dentry)
goto unlazy;
@@ -1424,8 +1425,11 @@ static int lookup_fast(struct nameidata *nd,
* the dentry name information from lookup.
*/
*inode = dentry->d_inode;
+ negative = d_is_negative(dentry);
if (read_seqcount_retry(&dentry->d_seq, seq))
return -ECHILD;
+ if (negative)
+ return -ENOENT;
/*
* This sequence count validates that the parent had no
@@ -1472,6 +1476,10 @@ unlazy:
goto need_lookup;
}
+ if (unlikely(d_is_negative(dentry))) {
+ dput(dentry);
+ return -ENOENT;
+ }
path->mnt = mnt;
path->dentry = dentry;
err = follow_managed(path, nd->flags);
@@ -1583,10 +1591,10 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
goto out_err;
inode = path->dentry->d_inode;
+ err = -ENOENT;
+ if (d_is_negative(path->dentry))
+ goto out_path_put;
}
- err = -ENOENT;
- if (d_is_negative(path->dentry))
- goto out_path_put;
if (should_follow_link(path->dentry, follow)) {
if (nd->flags & LOOKUP_RCU) {
@@ -3036,14 +3044,13 @@ retry_lookup:
BUG_ON(nd->flags & LOOKUP_RCU);
inode = path->dentry->d_inode;
-finish_lookup:
- /* we _can_ be in RCU mode here */
error = -ENOENT;
if (d_is_negative(path->dentry)) {
path_to_nameidata(path, nd);
goto out;
}
-
+finish_lookup:
+ /* we _can_ be in RCU mode here */
if (should_follow_link(path->dentry, !symlink_ok)) {
if (nd->flags & LOOKUP_RCU) {
if (unlikely(nd->path.mnt != path->mnt ||
@@ -3226,7 +3233,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
if (unlikely(file->f_flags & __O_TMPFILE)) {
error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
- goto out;
+ goto out2;
}
error = path_init(dfd, pathname, flags, nd);
@@ -3256,6 +3263,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
}
out:
path_cleanup(nd);
+out2:
if (!(opened & FILE_OPENED)) {
BUG_ON(!error);
put_filp(file);
diff --git a/fs/namespace.c b/fs/namespace.c
index 1f4f9da..1b9e111 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -3179,6 +3179,12 @@ bool fs_fully_visible(struct file_system_type *type)
if (mnt->mnt.mnt_sb->s_type != type)
continue;
+ /* This mount is not fully visible if it's root directory
+ * is not the root directory of the filesystem.
+ */
+ if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+ continue;
+
/* This mount is not fully visible if there are any child mounts
* that cover anything except for empty directories.
*/
diff --git a/fs/nilfs2/btree.c b/fs/nilfs2/btree.c
index 059f371..919fd5b 100644
--- a/fs/nilfs2/btree.c
+++ b/fs/nilfs2/btree.c
@@ -388,7 +388,7 @@ static int nilfs_btree_root_broken(const struct nilfs_btree_node *node,
nchildren = nilfs_btree_node_get_nchildren(node);
if (unlikely(level < NILFS_BTREE_LEVEL_NODE_MIN ||
- level > NILFS_BTREE_LEVEL_MAX ||
+ level >= NILFS_BTREE_LEVEL_MAX ||
nchildren < 0 ||
nchildren > NILFS_BTREE_ROOT_NCHILDREN_MAX)) {
pr_crit("NILFS: bad btree root (inode number=%lu): level = %d, flags = 0x%x, nchildren = %d\n",
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a6944b2..fdf4b41 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -757,6 +757,19 @@ lookup:
if (tmpres) {
spin_unlock(&dlm->spinlock);
spin_lock(&tmpres->spinlock);
+
+ /*
+ * Right after dlm spinlock was released, dlm_thread could have
+ * purged the lockres. Check if lockres got unhashed. If so
+ * start over.
+ */
+ if (hlist_unhashed(&tmpres->hash_node)) {
+ spin_unlock(&tmpres->spinlock);
+ dlm_lockres_put(tmpres);
+ tmpres = NULL;
+ goto lookup;
+ }
+
/* Wait on the thread that is mastering the resource */
if (tmpres->owner == DLM_LOCK_RES_OWNER_UNKNOWN) {
__dlm_wait_on_lockres(tmpres);
diff --git a/fs/splice.c b/fs/splice.c
index 476024b..bfe62ae 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1161,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
long ret, bytes;
umode_t i_mode;
size_t len;
- int i, flags;
+ int i, flags, more;
/*
* We require the input being a regular file, as we don't want to
@@ -1204,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* Don't block on output, we have to drain the direct pipe.
*/
sd->flags &= ~SPLICE_F_NONBLOCK;
+ more = sd->flags & SPLICE_F_MORE;
while (len) {
size_t read_len;
@@ -1217,6 +1218,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
sd->total_len = read_len;
/*
+ * If more data is pending, set SPLICE_F_MORE
+ * If this is the last data and SPLICE_F_MORE was not set
+ * initially, clears it.
+ */
+ if (read_len < len)
+ sd->flags |= SPLICE_F_MORE;
+ else if (!more)
+ sd->flags &= ~SPLICE_F_MORE;
+ /*
* NOTE: nonblocking mode only applies to the input. We
* must not do the output in nonblocking mode as then we
* could get stuck data in the internal pipe:
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index f5ca0e9..1c3002e 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -124,7 +124,6 @@
#ifndef ACPI_USE_SYSTEM_INTTYPES
typedef unsigned char u8;
-typedef unsigned char u8;
typedef unsigned short u16;
typedef short s16;
typedef COMPILER_DEPENDENT_UINT64 u64;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a1b25e3..b7299fe 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -220,7 +220,7 @@ enum rq_flag_bits {
/* This mask is used for both bio and request merge checking */
#define REQ_NOMERGE_FLAGS \
- (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_THROTTLED (1ULL << __REQ_THROTTLED)
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index cdf13ca..371e560 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -9,10 +9,24 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
-
/* Optimization barrier */
+
/* The "volatile" is due to gcc bugs */
#define barrier() __asm__ __volatile__("": : :"memory")
+/*
+ * This version is i.e. to prevent dead stores elimination on @ptr
+ * where gcc and llvm may behave differently when otherwise using
+ * normal barrier(): while gcc behavior gets along with a normal
+ * barrier(), llvm needs an explicit input variable to be assumed
+ * clobbered. The issue is as follows: while the inline asm might
+ * access any memory it wants, the compiler could have fit all of
+ * @ptr into memory registers instead, and since @ptr never escaped
+ * from that, it proofed that the inline asm wasn't touching any of
+ * it. This version works well with both compilers, i.e. we're telling
+ * the compiler that the inline asm absolutely may see the contents
+ * of @ptr. See also: https://llvm.org/bugs/show_bug.cgi?id=15495
+ */
+#define barrier_data(ptr) __asm__ __volatile__("": :"r"(ptr) :"memory")
/*
* This macro obfuscates arithmetic on a variable address so that gcc
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index ba147a1..0c9a2f2 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -13,9 +13,12 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
+#undef barrier_data
#undef RELOC_HIDE
#undef OPTIMIZER_HIDE_VAR
+#define barrier_data(ptr) barrier()
+
#define RELOC_HIDE(ptr, off) \
({ unsigned long __ptr; \
__ptr = (unsigned long) (ptr); \
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 0e41ca0..8677225 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -169,6 +169,10 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
# define barrier() __memory_barrier()
#endif
+#ifndef barrier_data
+# define barrier_data(ptr) barrier()
+#endif
+
/* Unreachable code */
#ifndef unreachable
# define unreachable() do { } while (1)
diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h
index 46e83c2..f9ecf63 100644
--- a/include/linux/ftrace_event.h
+++ b/include/linux/ftrace_event.h
@@ -46,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len);
const char *ftrace_print_array_seq(struct trace_seq *p,
- const void *buf, int buf_len,
+ const void *buf, int count,
size_t el_size);
struct trace_iterator;
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 36ec4ae..9de976b 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -95,8 +95,6 @@
struct device_node;
-extern struct irq_chip gic_arch_extn;
-
void gic_set_irqchip_flags(unsigned long flags);
void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
u32 offset, struct device_node *);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index e60a745..e804306 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -40,6 +40,10 @@
#error KEXEC_CONTROL_MEMORY_LIMIT not defined
#endif
+#ifndef KEXEC_CONTROL_MEMORY_GFP
+#define KEXEC_CONTROL_MEMORY_GFP GFP_KERNEL
+#endif
+
#ifndef KEXEC_CONTROL_PAGE_SIZE
#error KEXEC_CONTROL_PAGE_SIZE not defined
#endif
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index bcbde79..1899c74 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -60,6 +60,7 @@ struct phy_device;
struct wireless_dev;
/* 802.15.4 specific */
struct wpan_dev;
+struct mpls_dev;
void netdev_set_default_ethtool_ops(struct net_device *dev,
const struct ethtool_ops *ops);
@@ -976,7 +977,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
* int (*ndo_bridge_setlink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags)
* int (*ndo_bridge_getlink)(struct sk_buff *skb, u32 pid, u32 seq,
- * struct net_device *dev, u32 filter_mask)
+ * struct net_device *dev, u32 filter_mask,
+ * int nlflags)
* int (*ndo_bridge_dellink)(struct net_device *dev, struct nlmsghdr *nlh,
* u16 flags);
*
@@ -1172,7 +1174,8 @@ struct net_device_ops {
int (*ndo_bridge_getlink)(struct sk_buff *skb,
u32 pid, u32 seq,
struct net_device *dev,
- u32 filter_mask);
+ u32 filter_mask,
+ int nlflags);
int (*ndo_bridge_dellink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags);
@@ -1627,6 +1630,9 @@ struct net_device {
void *ax25_ptr;
struct wireless_dev *ieee80211_ptr;
struct wpan_dev *ieee802154_ptr;
+#if IS_ENABLED(CONFIG_MPLS_ROUTING)
+ struct mpls_dev __rcu *mpls_ptr;
+#endif
/*
* Cache lines mostly used on receive path (including eth_type_trans())
@@ -2021,10 +2027,10 @@ struct pcpu_sw_netstats {
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
if (pcpu_stats) { \
- int i; \
- for_each_possible_cpu(i) { \
+ int __cpu; \
+ for_each_possible_cpu(__cpu) { \
typeof(type) *stat; \
- stat = per_cpu_ptr(pcpu_stats, i); \
+ stat = per_cpu_ptr(pcpu_stats, __cpu); \
u64_stats_init(&stat->syncp); \
} \
} \
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index ab8f76d..f2fdb5a 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -39,12 +39,24 @@ static inline void br_drop_fake_rtable(struct sk_buff *skb)
static inline int nf_bridge_get_physinif(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physindev->ifindex : 0;
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physindev ? nf_bridge->physindev->ifindex : 0;
}
static inline int nf_bridge_get_physoutif(const struct sk_buff *skb)
{
- return skb->nf_bridge ? skb->nf_bridge->physoutdev->ifindex : 0;
+ struct nf_bridge_info *nf_bridge;
+
+ if (skb->nf_bridge == NULL)
+ return 0;
+
+ nf_bridge = skb->nf_bridge;
+ return nf_bridge->physoutdev ? nf_bridge->physoutdev->ifindex : 0;
}
static inline struct net_device *
diff --git a/include/linux/nilfs2_fs.h b/include/linux/nilfs2_fs.h
index ff3fea3..9abb763 100644
--- a/include/linux/nilfs2_fs.h
+++ b/include/linux/nilfs2_fs.h
@@ -460,7 +460,7 @@ struct nilfs_btree_node {
/* level */
#define NILFS_BTREE_LEVEL_DATA 0
#define NILFS_BTREE_LEVEL_NODE_MIN (NILFS_BTREE_LEVEL_DATA + 1)
-#define NILFS_BTREE_LEVEL_MAX 14
+#define NILFS_BTREE_LEVEL_MAX 14 /* Max level (exclusive) */
/**
* struct nilfs_palloc_group_desc - block group descriptor
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 38cff8f..2f7b9a4 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2541,10 +2541,6 @@
#define PCI_VENDOR_ID_INTEL 0x8086
#define PCI_DEVICE_ID_INTEL_EESSC 0x0008
-#define PCI_DEVICE_ID_INTEL_SNB_IMC 0x0100
-#define PCI_DEVICE_ID_INTEL_IVB_IMC 0x0154
-#define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
-#define PCI_DEVICE_ID_INTEL_HSW_IMC 0x0c00
#define PCI_DEVICE_ID_INTEL_PXHD_0 0x0320
#define PCI_DEVICE_ID_INTEL_PXHD_1 0x0321
#define PCI_DEVICE_ID_INTEL_PXH_0 0x0329
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index e23d242..dbcbcc5 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -282,7 +282,8 @@ static inline bool rht_shrink_below_30(const struct rhashtable *ht,
static inline bool rht_grow_above_100(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
- return atomic_read(&ht->nelems) > tbl->size;
+ return atomic_read(&ht->nelems) > tbl->size &&
+ (!ht->p.max_size || tbl->size < ht->p.max_size);
}
/* The bucket lock is selected based on the hash and protects mutations
diff --git a/include/linux/rtnetlink.h b/include/linux/rtnetlink.h
index 2da5d10..7b8e260 100644
--- a/include/linux/rtnetlink.h
+++ b/include/linux/rtnetlink.h
@@ -122,5 +122,5 @@ extern int ndo_dflt_fdb_del(struct ndmsg *ndm,
extern int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask);
+ u32 flags, u32 mask, int nlflags);
#endif /* __LINUX_RTNETLINK_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 8222ae4..26a2e61 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -175,14 +175,6 @@ extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
extern void calc_global_load(unsigned long ticks);
extern void update_cpu_load_nohz(void);
-/* Notifier for when a task gets migrated to a new CPU */
-struct task_migration_notifier {
- struct task_struct *task;
- int from_cpu;
- int to_cpu;
-};
-extern void register_task_migration_notifier(struct notifier_block *n);
-
extern unsigned long get_parent_ip(unsigned long addr);
extern void dump_cpu_task(int cpu);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 06793b5..66e374d 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -773,6 +773,7 @@ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
struct sk_buff *__alloc_skb(unsigned int size, gfp_t priority, int flags,
int node);
+struct sk_buff *__build_skb(void *data, unsigned int frag_size);
struct sk_buff *build_skb(void *data, unsigned int frag_size);
static inline struct sk_buff *alloc_skb(unsigned int size,
gfp_t priority)
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 358a337..fe5623c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -491,6 +491,7 @@ static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
extern void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old);
extern int tty_termios_hw_change(struct ktermios *a, struct ktermios *b);
+extern int tty_set_termios(struct tty_struct *tty, struct ktermios *kt);
extern struct tty_ldisc *tty_ldisc_ref(struct tty_struct *);
extern void tty_ldisc_deref(struct tty_ldisc *);
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index a7f2604..7f5f78b 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -77,6 +77,8 @@
/* Cannot handle ATA_12 or ATA_16 CDBs */ \
US_FLAG(NO_REPORT_OPCODES, 0x04000000) \
/* Cannot handle MI_REPORT_SUPPORTED_OPERATION_CODES */ \
+ US_FLAG(MAX_SECTORS_240, 0x08000000) \
+ /* Sets max_sectors to 240 */ \
#define US_FLAG(name, value) US_FL_##name = value ,
enum { US_DO_ALL_FLAGS };
diff --git a/include/linux/util_macros.h b/include/linux/util_macros.h
index d5f4fb6..f9b2ce5 100644
--- a/include/linux/util_macros.h
+++ b/include/linux/util_macros.h
@@ -5,7 +5,7 @@
({ \
typeof(as) __fc_i, __fc_as = (as) - 1; \
typeof(x) __fc_x = (x); \
- typeof(*a) *__fc_a = (a); \
+ typeof(*a) const *__fc_a = (a); \
for (__fc_i = 0; __fc_i < __fc_as; __fc_i++) { \
if (__fc_x op DIV_ROUND_CLOSEST(__fc_a[__fc_i] + \
__fc_a[__fc_i + 1], 2)) \
diff --git a/include/net/bonding.h b/include/net/bonding.h
index fda6fee..78ed135 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -30,13 +30,6 @@
#include <net/bond_alb.h>
#include <net/bond_options.h>
-#define DRV_VERSION "3.7.1"
-#define DRV_RELDATE "April 27, 2011"
-#define DRV_NAME "bonding"
-#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
-
-#define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
-
#define BOND_MAX_ARP_TARGETS 16
#define BOND_DEFAULT_MIIMON 100
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 7b5887c..48a81582 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -279,12 +279,6 @@ static inline void inet_csk_reqsk_queue_add(struct sock *sk,
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
-static inline void inet_csk_reqsk_queue_removed(struct sock *sk,
- struct request_sock *req)
-{
- reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
-}
-
static inline void inet_csk_reqsk_queue_added(struct sock *sk,
const unsigned long timeout)
{
@@ -306,19 +300,7 @@ static inline int inet_csk_reqsk_queue_is_full(const struct sock *sk)
return reqsk_queue_is_full(&inet_csk(sk)->icsk_accept_queue);
}
-static inline void inet_csk_reqsk_queue_unlink(struct sock *sk,
- struct request_sock *req)
-{
- reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req);
-}
-
-static inline void inet_csk_reqsk_queue_drop(struct sock *sk,
- struct request_sock *req)
-{
- inet_csk_reqsk_queue_unlink(sk, req);
- inet_csk_reqsk_queue_removed(sk, req);
- reqsk_put(req);
-}
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req);
void inet_csk_destroy_sock(struct sock *sk);
void inet_csk_prepare_forced_close(struct sock *sk);
diff --git a/include/net/request_sock.h b/include/net/request_sock.h
index fe41f3c..9f4265c 100644
--- a/include/net/request_sock.h
+++ b/include/net/request_sock.h
@@ -212,24 +212,6 @@ static inline int reqsk_queue_empty(struct request_sock_queue *queue)
return queue->rskq_accept_head == NULL;
}
-static inline void reqsk_queue_unlink(struct request_sock_queue *queue,
- struct request_sock *req)
-{
- struct listen_sock *lopt = queue->listen_opt;
- struct request_sock **prev;
-
- spin_lock(&queue->syn_wait_lock);
-
- prev = &lopt->syn_table[req->rsk_hash];
- while (*prev != req)
- prev = &(*prev)->dl_next;
- *prev = req->dl_next;
-
- spin_unlock(&queue->syn_wait_lock);
- if (del_timer(&req->rsk_timer))
- reqsk_put(req);
-}
-
static inline void reqsk_queue_add(struct request_sock_queue *queue,
struct request_sock *req,
struct sock *parent,
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index ce55906..ac54c27 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
}
/* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
-static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
+static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
{
if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
struct sockaddr_in *out_in = (struct sockaddr_in *)out;
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
out_in->sin6_family = AF_INET6;
memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
}
- return 0;
}
static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 0e3ff30..39ed2d2 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -105,7 +105,8 @@ enum ib_cm_data_size {
IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
IB_CM_SIDR_REP_INFO_LENGTH = 72,
- IB_CM_COMPARE_SIZE = 64
+ /* compare done u32 at a time */
+ IB_CM_COMPARE_SIZE = (64 / sizeof(u32))
};
struct ib_cm_id;
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
#define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
struct ib_cm_compare_data {
- u8 data[IB_CM_COMPARE_SIZE];
- u8 mask[IB_CM_COMPARE_SIZE];
+ u32 data[IB_CM_COMPARE_SIZE];
+ u32 mask[IB_CM_COMPARE_SIZE];
};
/**
diff --git a/include/rdma/iw_portmap.h b/include/rdma/iw_portmap.h
index 928b277..fda3167 100644
--- a/include/rdma/iw_portmap.h
+++ b/include/rdma/iw_portmap.h
@@ -148,6 +148,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ * the port mapper has received from the connecting peer
+ *
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
+ */
+int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
+
+/**
* iwpm_mapping_error_cb - Process port mapper notification for error
*
* @skb:
@@ -175,6 +185,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+ struct sockaddr_storage *mapped_rem_addr,
+ struct sockaddr_storage *remote_addr, u8 nl_client);
+
+/**
* iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
* info in a hash table
* @local_addr: Local ip/tcp address
diff --git a/include/scsi/scsi_devinfo.h b/include/scsi/scsi_devinfo.h
index 183eaab..96e3f56 100644
--- a/include/scsi/scsi_devinfo.h
+++ b/include/scsi/scsi_devinfo.h
@@ -36,5 +36,6 @@
for sequential scan */
#define BLIST_TRY_VPD_PAGES 0x10000000 /* Attempt to read VPD pages */
#define BLIST_NO_RSOC 0x20000000 /* don't try to issue RSOC */
+#define BLIST_MAX_1024 0x40000000 /* maximum 1024 sector cdb length */
#endif
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index a3318f3..915980a 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -155,7 +155,7 @@ static inline unsigned vring_size(unsigned int num, unsigned long align)
}
/* The following is used with USED_EVENT_IDX and AVAIL_EVENT_IDX */
-/* Assuming a given event_idx value from the other size, if
+/* Assuming a given event_idx value from the other side, if
* we have just incremented index from old to new_idx,
* should we trigger an event? */
static inline int vring_need_event(__u16 event_idx, __u16 new_idx, __u16 old)
diff --git a/include/uapi/rdma/rdma_netlink.h b/include/uapi/rdma/rdma_netlink.h
index de69170..6e4bb42 100644
--- a/include/uapi/rdma/rdma_netlink.h
+++ b/include/uapi/rdma/rdma_netlink.h
@@ -37,6 +37,7 @@ enum {
RDMA_NL_IWPM_ADD_MAPPING,
RDMA_NL_IWPM_QUERY_MAPPING,
RDMA_NL_IWPM_REMOVE_MAPPING,
+ RDMA_NL_IWPM_REMOTE_INFO,
RDMA_NL_IWPM_HANDLE_ERR,
RDMA_NL_IWPM_MAPINFO,
RDMA_NL_IWPM_MAPINFO_NUM,
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 143ca5f..4478f4b 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -191,6 +191,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
struct gnttab_unmap_grant_ref *kunmap_ops,
struct page **pages, unsigned int count);
void gnttab_unmap_refs_async(struct gntab_unmap_queue_data* item);
+int gnttab_unmap_refs_sync(struct gntab_unmap_queue_data *item);
/* Perform a batch of grant map/copy operations. Retry every batch slot
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index c643e6a..0ce4f32 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -13,6 +13,7 @@ void xen_arch_post_suspend(int suspend_cancelled);
void xen_timer_resume(void);
void xen_arch_resume(void);
+void xen_arch_suspend(void);
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 8369ffa..a95bbdb 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -225,10 +225,11 @@ dev_t name_to_dev_t(const char *name)
#endif
if (strncmp(name, "/dev/", 5) != 0) {
- unsigned maj, min;
+ unsigned maj, min, offset;
char dummy;
- if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) {
+ if ((sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) ||
+ (sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3)) {
res = MKDEV(maj, min);
if (maj != MAJOR(res) || min != MINOR(res))
goto fail;
diff --git a/kernel/Makefile b/kernel/Makefile
index 0f8f8b0..60c302c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -197,9 +197,9 @@ x509.genkey:
@echo >>x509.genkey "x509_extensions = myexts"
@echo >>x509.genkey
@echo >>x509.genkey "[ req_distinguished_name ]"
- @echo >>x509.genkey "O = Magrathea"
- @echo >>x509.genkey "CN = Glacier signing key"
- @echo >>x509.genkey "emailAddress = slartibartfast@magrathea.h2g2"
+ @echo >>x509.genkey "#O = Unspecified company"
+ @echo >>x509.genkey "CN = Build time autogenerated kernel key"
+ @echo >>x509.genkey "#emailAddress = unspecified.user@unspecified.company"
@echo >>x509.genkey
@echo >>x509.genkey "[ myexts ]"
@echo >>x509.genkey "basicConstraints=critical,CA:FALSE"
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 4139a0f..54f0e7f 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -357,8 +357,8 @@ select_insn:
ALU64_MOD_X:
if (unlikely(SRC == 0))
return 0;
- tmp = DST;
- DST = do_div(tmp, SRC);
+ div64_u64_rem(DST, SRC, &tmp);
+ DST = tmp;
CONT;
ALU_MOD_X:
if (unlikely(SRC == 0))
@@ -367,8 +367,8 @@ select_insn:
DST = do_div(tmp, (u32) SRC);
CONT;
ALU64_MOD_K:
- tmp = DST;
- DST = do_div(tmp, IMM);
+ div64_u64_rem(DST, IMM, &tmp);
+ DST = tmp;
CONT;
ALU_MOD_K:
tmp = (u32) DST;
@@ -377,7 +377,7 @@ select_insn:
ALU64_DIV_X:
if (unlikely(SRC == 0))
return 0;
- do_div(DST, SRC);
+ DST = div64_u64(DST, SRC);
CONT;
ALU_DIV_X:
if (unlikely(SRC == 0))
@@ -387,7 +387,7 @@ select_insn:
DST = (u32) tmp;
CONT;
ALU64_DIV_K:
- do_div(DST, IMM);
+ DST = div64_u64(DST, IMM);
CONT;
ALU_DIV_K:
tmp = (u32) DST;
diff --git a/kernel/irq/dummychip.c b/kernel/irq/dummychip.c
index 988dc58..2feb6fe 100644
--- a/kernel/irq/dummychip.c
+++ b/kernel/irq/dummychip.c
@@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = {
.irq_ack = noop,
.irq_mask = noop,
.irq_unmask = noop,
+ .flags = IRQCHIP_SKIP_SET_WAKE,
};
EXPORT_SYMBOL_GPL(dummy_irq_chip);
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 38c25b1..7a36fdc 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -707,7 +707,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
do {
unsigned long pfn, epfn, addr, eaddr;
- pages = kimage_alloc_pages(GFP_KERNEL, order);
+ pages = kimage_alloc_pages(KEXEC_CONTROL_MEMORY_GFP, order);
if (!pages)
break;
pfn = page_to_pfn(pages);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 233165d..8cf7304 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -162,11 +162,14 @@ static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
module_param(kthread_prio, int, 0644);
-/* Delay in jiffies for grace-period initialization delays. */
-static int gp_init_delay = IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT)
- ? CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY
- : 0;
+/* Delay in jiffies for grace-period initialization delays, debug only. */
+#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
+static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
module_param(gp_init_delay, int, 0644);
+#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
+static const int gp_init_delay;
+#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
+#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
/*
* Track the rcutorture test sequence number and the update version
@@ -1843,9 +1846,8 @@ static int rcu_gp_init(struct rcu_state *rsp)
raw_spin_unlock_irq(&rnp->lock);
cond_resched_rcu_qs();
ACCESS_ONCE(rsp->gp_activity) = jiffies;
- if (IS_ENABLED(CONFIG_RCU_TORTURE_TEST_SLOW_INIT) &&
- gp_init_delay > 0 &&
- !(rsp->gpnum % (rcu_num_nodes * 10)))
+ if (gp_init_delay > 0 &&
+ !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
schedule_timeout_uninterruptible(gp_init_delay);
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f9123a8..fe22f75 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1016,13 +1016,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
rq_clock_skip_update(rq, true);
}
-static ATOMIC_NOTIFIER_HEAD(task_migration_notifier);
-
-void register_task_migration_notifier(struct notifier_block *n)
-{
- atomic_notifier_chain_register(&task_migration_notifier, n);
-}
-
#ifdef CONFIG_SMP
void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
{
@@ -1053,18 +1046,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
trace_sched_migrate_task(p, new_cpu);
if (task_cpu(p) != new_cpu) {
- struct task_migration_notifier tmn;
-
if (p->sched_class->migrate_task_rq)
p->sched_class->migrate_task_rq(p, new_cpu);
p->se.nr_migrations++;
perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
-
- tmn.task = p;
- tmn.from_cpu = task_cpu(p);
- tmn.to_cpu = new_cpu;
-
- atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn);
}
__set_task_cpu(p, new_cpu);
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index deef1ca..fefcb1f 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -81,7 +81,6 @@ static void cpuidle_idle_call(void)
struct cpuidle_device *dev = __this_cpu_read(cpuidle_devices);
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
int next_state, entered_state;
- unsigned int broadcast;
bool reflect;
/*
@@ -150,17 +149,6 @@ static void cpuidle_idle_call(void)
goto exit_idle;
}
- broadcast = drv->states[next_state].flags & CPUIDLE_FLAG_TIMER_STOP;
-
- /*
- * Tell the time framework to switch to a broadcast timer
- * because our local timer will be shutdown. If a local timer
- * is used from another cpu as a broadcast timer, this call may
- * fail if it is not available
- */
- if (broadcast && tick_broadcast_enter())
- goto use_default;
-
/* Take note of the planned idle state. */
idle_set_state(this_rq(), &drv->states[next_state]);
@@ -174,8 +162,8 @@ static void cpuidle_idle_call(void)
/* The cpu is no longer idle or about to enter idle. */
idle_set_state(this_rq(), NULL);
- if (broadcast)
- tick_broadcast_exit();
+ if (entered_state == -EBUSY)
+ goto use_default;
/*
* Give the governor an opportunity to reflect on the outcome
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 11dc22a..637a094 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
/* Transition with new state-specific callbacks */
switch (state) {
case CLOCK_EVT_STATE_DETACHED:
- /*
- * This is an internal state, which is guaranteed to go from
- * SHUTDOWN to DETACHED. No driver interaction required.
- */
- return 0;
+ /* The clockevent device is getting replaced. Shut it down. */
case CLOCK_EVT_STATE_SHUTDOWN:
return dev->set_state_shutdown(dev);
diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
index 692bf71..25a086b 100644
--- a/kernel/trace/trace_output.c
+++ b/kernel/trace/trace_output.c
@@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
EXPORT_SYMBOL(ftrace_print_hex_seq);
const char *
-ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
+ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count,
size_t el_size)
{
const char *ret = trace_seq_buffer_ptr(p);
const char *prefix = "";
void *ptr = (void *)buf;
+ size_t buf_len = count * el_size;
trace_seq_putc(p, '{');
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 1767057..ba2b0c8 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1281,6 +1281,7 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY
int "How much to slow down RCU grace-period initialization"
range 0 5
default 3
+ depends on RCU_TORTURE_TEST_SLOW_INIT
help
This option specifies the number of jiffies to wait between
each rcu_node structure initialization.
diff --git a/lib/Kconfig.kasan b/lib/Kconfig.kasan
index 4fecaedc..777eda7 100644
--- a/lib/Kconfig.kasan
+++ b/lib/Kconfig.kasan
@@ -10,8 +10,11 @@ config KASAN
help
Enables kernel address sanitizer - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
- This is strictly debugging feature. It consumes about 1/8
- of available memory and brings about ~x3 performance slowdown.
+ This is strictly a debugging feature and it requires a gcc version
+ of 4.9.2 or later. Detection of out of bounds accesses to stack or
+ global variables requires gcc 5.0 or later.
+ This feature consumes about 1/8 of available memory and brings about
+ ~x3 performance slowdown.
For better error detection enable CONFIG_STACKTRACE,
and add slub_debug=U to boot cmdline.
@@ -40,6 +43,7 @@ config KASAN_INLINE
memory accesses. This is faster than outline (in some workloads
it gives about x2 boost over outline instrumentation), but
make kernel's .text size much bigger.
+ This requires a gcc version of 5.0 or later.
endchoice
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c
deleted file mode 100644
index 3e3be40..0000000
--- a/lib/find_last_bit.c
+++ /dev/null
@@ -1,41 +0,0 @@
-/* find_last_bit.c: fallback find next bit implementation
- *
- * Copyright (C) 2008 IBM Corporation
- * Written by Rusty Russell <rusty@rustcorp.com.au>
- * (Inspired by David Howell's find_next_bit implementation)
- *
- * Rewritten by Yury Norov <yury.norov@gmail.com> to decrease
- * size and improve performance, 2015.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/bitops.h>
-#include <linux/bitmap.h>
-#include <linux/export.h>
-#include <linux/kernel.h>
-
-#ifndef find_last_bit
-
-unsigned long find_last_bit(const unsigned long *addr, unsigned long size)
-{
- if (size) {
- unsigned long val = BITMAP_LAST_WORD_MASK(size);
- unsigned long idx = (size-1) / BITS_PER_LONG;
-
- do {
- val &= addr[idx];
- if (val)
- return idx * BITS_PER_LONG + __fls(val);
-
- val = ~0ul;
- } while (idx--);
- }
- return size;
-}
-EXPORT_SYMBOL(find_last_bit);
-
-#endif
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 4898442..b28df40 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -405,13 +405,18 @@ int rhashtable_insert_rehash(struct rhashtable *ht)
if (rht_grow_above_75(ht, tbl))
size *= 2;
- /* More than two rehashes (not resizes) detected. */
- else if (WARN_ON(old_tbl != tbl && old_tbl->size == size))
+ /* Do not schedule more than one rehash */
+ else if (old_tbl != tbl)
return -EBUSY;
new_tbl = bucket_table_alloc(ht, size, GFP_ATOMIC);
- if (new_tbl == NULL)
+ if (new_tbl == NULL) {
+ /* Schedule async resize/rehash to try allocation
+ * non-atomic context.
+ */
+ schedule_work(&ht->run_work);
return -ENOMEM;
+ }
err = rhashtable_rehash_attach(ht, tbl, new_tbl);
if (err) {
diff --git a/lib/string.c b/lib/string.c
index a579201..bb3d4b6 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -607,7 +607,7 @@ EXPORT_SYMBOL(memset);
void memzero_explicit(void *s, size_t count)
{
memset(s, 0, count);
- barrier();
+ barrier_data(s);
}
EXPORT_SYMBOL(memzero_explicit);
diff --git a/mm/hwpoison-inject.c b/mm/hwpoison-inject.c
index 329caf5..4ca5fe0 100644
--- a/mm/hwpoison-inject.c
+++ b/mm/hwpoison-inject.c
@@ -34,13 +34,13 @@ static int hwpoison_inject(void *data, u64 val)
if (!hwpoison_filter_enable)
goto inject;
- if (!PageLRU(p) && !PageHuge(p))
- shake_page(p, 0);
+ if (!PageLRU(hpage) && !PageHuge(p))
+ shake_page(hpage, 0);
/*
* This implies unable to support non-LRU pages.
*/
- if (!PageLRU(p) && !PageHuge(p))
- return 0;
+ if (!PageLRU(hpage) && !PageHuge(p))
+ goto put_out;
/*
* do a racy check with elevated page count, to make sure PG_hwpoison
@@ -52,11 +52,14 @@ static int hwpoison_inject(void *data, u64 val)
err = hwpoison_filter(hpage);
unlock_page(hpage);
if (err)
- return 0;
+ goto put_out;
inject:
pr_info("Injecting memory failure at pfn %#lx\n", pfn);
return memory_failure(pfn, 18, MF_COUNT_INCREASED);
+put_out:
+ put_page(hpage);
+ return 0;
}
static int hwpoison_unpoison(void *data, u64 val)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index d9359b7..501820c 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1187,10 +1187,10 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
* The check (unnecessarily) ignores LRU pages being isolated and
* walked by the page reclaim code, however that's not a big loss.
*/
- if (!PageHuge(p) && !PageTransTail(p)) {
- if (!PageLRU(p))
- shake_page(p, 0);
- if (!PageLRU(p)) {
+ if (!PageHuge(p)) {
+ if (!PageLRU(hpage))
+ shake_page(hpage, 0);
+ if (!PageLRU(hpage)) {
/*
* shake_page could have turned it free.
*/
@@ -1777,12 +1777,12 @@ int soft_offline_page(struct page *page, int flags)
} else if (ret == 0) { /* for free pages */
if (PageHuge(page)) {
set_page_hwpoison_huge_page(hpage);
- dequeue_hwpoisoned_huge_page(hpage);
- atomic_long_add(1 << compound_order(hpage),
+ if (!dequeue_hwpoisoned_huge_page(hpage))
+ atomic_long_add(1 << compound_order(hpage),
&num_poisoned_pages);
} else {
- SetPageHWPoison(page);
- atomic_long_inc(&num_poisoned_pages);
+ if (!TestSetPageHWPoison(page))
+ atomic_long_inc(&num_poisoned_pages);
}
}
unset_migratetype_isolate(page, MIGRATE_MOVABLE);
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 5daf556..eb59f7e 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
long x;
x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
- limit - setpoint + 1);
+ (limit - setpoint) | 1);
pos_ratio = x;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
* scale global setpoint to bdi's:
* bdi_setpoint = setpoint * bdi_thresh / thresh
*/
- x = div_u64((u64)bdi_thresh << 16, thresh + 1);
+ x = div_u64((u64)bdi_thresh << 16, thresh | 1);
bdi_setpoint = setpoint * (u64)x >> 16;
/*
* Use span=(8*write_bw) in single bdi case as indicated by
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
if (bdi_dirty < x_intercept - span / 4) {
pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
- x_intercept - bdi_setpoint + 1);
+ (x_intercept - bdi_setpoint) | 1);
} else
pos_ratio /= 4;
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 4096089..e29ad70 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -170,7 +170,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,
struct br_port_msg *bpm;
struct nlattr *nest, *nest2;
- nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, type, sizeof(*bpm), 0);
if (!nlh)
return -EMSGSIZE;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 0e4ddb8..4b5c236 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -394,7 +394,7 @@ errout:
* Dump information about all ports, in response to GETLINK
*/
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
- struct net_device *dev, u32 filter_mask)
+ struct net_device *dev, u32 filter_mask, int nlflags)
{
struct net_bridge_port *port = br_port_get_rtnl(dev);
@@ -402,7 +402,7 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
!(filter_mask & RTEXT_FILTER_BRVLAN_COMPRESSED))
return 0;
- return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, NLM_F_MULTI,
+ return br_fill_ifinfo(skb, port, pid, seq, RTM_NEWLINK, nlflags,
filter_mask, dev);
}
diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h
index 6ca0251..3362c29 100644
--- a/net/bridge/br_private.h
+++ b/net/bridge/br_private.h
@@ -828,7 +828,7 @@ void br_ifinfo_notify(int event, struct net_bridge_port *port);
int br_setlink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
int br_dellink(struct net_device *dev, struct nlmsghdr *nlmsg, u16 flags);
int br_getlink(struct sk_buff *skb, u32 pid, u32 seq, struct net_device *dev,
- u32 filter_mask);
+ u32 filter_mask, int nlflags);
#ifdef CONFIG_SYSFS
/* br_sysfs_if.c */
diff --git a/net/core/dev.c b/net/core/dev.c
index 1796cef5..c7ba038 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3079,7 +3079,7 @@ static struct rps_dev_flow *
set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
struct rps_dev_flow *rflow, u16 next_cpu)
{
- if (next_cpu != RPS_NO_CPU) {
+ if (next_cpu < nr_cpu_ids) {
#ifdef CONFIG_RFS_ACCEL
struct netdev_rx_queue *rxqueue;
struct rps_dev_flow_table *flow_table;
@@ -3184,7 +3184,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* If the desired CPU (where last recvmsg was done) is
* different from current CPU (one in the rx-queue flow
* table entry), switch if one of the following holds:
- * - Current CPU is unset (equal to RPS_NO_CPU).
+ * - Current CPU is unset (>= nr_cpu_ids).
* - Current CPU is offline.
* - The current CPU's queue tail has advanced beyond the
* last packet that was enqueued using this table entry.
@@ -3192,14 +3192,14 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
* have been dequeued, thus preserving in order delivery.
*/
if (unlikely(tcpu != next_cpu) &&
- (tcpu == RPS_NO_CPU || !cpu_online(tcpu) ||
+ (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
((int)(per_cpu(softnet_data, tcpu).input_queue_head -
rflow->last_qtail)) >= 0)) {
tcpu = next_cpu;
rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
}
- if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) {
+ if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
*rflowp = rflow;
cpu = tcpu;
goto done;
@@ -3240,14 +3240,14 @@ bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
struct rps_dev_flow_table *flow_table;
struct rps_dev_flow *rflow;
bool expire = true;
- int cpu;
+ unsigned int cpu;
rcu_read_lock();
flow_table = rcu_dereference(rxqueue->rps_flow_table);
if (flow_table && flow_id <= flow_table->mask) {
rflow = &flow_table->flows[flow_id];
cpu = ACCESS_ONCE(rflow->cpu);
- if (rflow->filter == filter_id && cpu != RPS_NO_CPU &&
+ if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
((int)(per_cpu(softnet_data, cpu).input_queue_head -
rflow->last_qtail) <
(int)(10 * flow_table->mask)))
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 358d52a..666e092 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2854,7 +2854,7 @@ static int brport_nla_put_flag(struct sk_buff *skb, u32 flags, u32 mask,
int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
struct net_device *dev, u16 mode,
- u32 flags, u32 mask)
+ u32 flags, u32 mask, int nlflags)
{
struct nlmsghdr *nlh;
struct ifinfomsg *ifm;
@@ -2863,7 +2863,7 @@ int ndo_dflt_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq,
u8 operstate = netif_running(dev) ? dev->operstate : IF_OPER_DOWN;
struct net_device *br_dev = netdev_master_upper_dev_get(dev);
- nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), NLM_F_MULTI);
+ nlh = nlmsg_put(skb, pid, seq, RTM_NEWLINK, sizeof(*ifm), nlflags);
if (nlh == NULL)
return -EMSGSIZE;
@@ -2969,7 +2969,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
if (br_dev && br_dev->netdev_ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
br_dev->netdev_ops->ndo_bridge_getlink(
- skb, portid, seq, dev, filter_mask) < 0)
+ skb, portid, seq, dev, filter_mask,
+ NLM_F_MULTI) < 0)
break;
idx++;
}
@@ -2977,7 +2978,8 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
if (ops->ndo_bridge_getlink) {
if (idx >= cb->args[0] &&
ops->ndo_bridge_getlink(skb, portid, seq, dev,
- filter_mask) < 0)
+ filter_mask,
+ NLM_F_MULTI) < 0)
break;
idx++;
}
@@ -3018,7 +3020,7 @@ static int rtnl_bridge_notify(struct net_device *dev)
goto errout;
}
- err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0);
+ err = dev->netdev_ops->ndo_bridge_getlink(skb, 0, 0, dev, 0, 0);
if (err < 0)
goto errout;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index d1967da..3cfff2a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -280,13 +280,14 @@ nodata:
EXPORT_SYMBOL(__alloc_skb);
/**
- * build_skb - build a network buffer
+ * __build_skb - build a network buffer
* @data: data buffer provided by caller
- * @frag_size: size of fragment, or 0 if head was kmalloced
+ * @frag_size: size of data, or 0 if head was kmalloced
*
* Allocate a new &sk_buff. Caller provides space holding head and
* skb_shared_info. @data must have been allocated by kmalloc() only if
- * @frag_size is 0, otherwise data should come from the page allocator.
+ * @frag_size is 0, otherwise data should come from the page allocator
+ * or vmalloc()
* The return is the new skb buffer.
* On a failure the return is %NULL, and @data is not freed.
* Notes :
@@ -297,7 +298,7 @@ EXPORT_SYMBOL(__alloc_skb);
* before giving packet to stack.
* RX rings only contains data buffers, not full skbs.
*/
-struct sk_buff *build_skb(void *data, unsigned int frag_size)
+struct sk_buff *__build_skb(void *data, unsigned int frag_size)
{
struct skb_shared_info *shinfo;
struct sk_buff *skb;
@@ -311,7 +312,6 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
memset(skb, 0, offsetof(struct sk_buff, tail));
skb->truesize = SKB_TRUESIZE(size);
- skb->head_frag = frag_size != 0;
atomic_set(&skb->users, 1);
skb->head = data;
skb->data = data;
@@ -328,6 +328,23 @@ struct sk_buff *build_skb(void *data, unsigned int frag_size)
return skb;
}
+
+/* build_skb() is wrapper over __build_skb(), that specifically
+ * takes care of skb->head and skb->pfmemalloc
+ * This means that if @frag_size is not zero, then @data must be backed
+ * by a page fragment, not kmalloc() or vmalloc()
+ */
+struct sk_buff *build_skb(void *data, unsigned int frag_size)
+{
+ struct sk_buff *skb = __build_skb(data, frag_size);
+
+ if (skb && frag_size) {
+ skb->head_frag = 1;
+ if (virt_to_head_page(data)->pfmemalloc)
+ skb->pfmemalloc = 1;
+ }
+ return skb;
+}
EXPORT_SYMBOL(build_skb);
struct netdev_alloc_cache {
@@ -348,7 +365,8 @@ static struct page *__page_frag_refill(struct netdev_alloc_cache *nc,
gfp_t gfp = gfp_mask;
if (order) {
- gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY;
+ gfp_mask |= __GFP_COMP | __GFP_NOWARN | __GFP_NORETRY |
+ __GFP_NOMEMALLOC;
page = alloc_pages_node(NUMA_NO_NODE, gfp_mask, order);
nc->frag.size = PAGE_SIZE << (page ? order : 0);
}
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 2b4f21d..ccf4c56 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -453,7 +453,8 @@ static struct sock *dccp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
iph->saddr, iph->daddr);
if (req) {
nsk = dccp_check_req(sk, skb, req);
- reqsk_put(req);
+ if (!nsk)
+ reqsk_put(req);
return nsk;
}
nsk = inet_lookup_established(sock_net(sk), &dccp_hashinfo,
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9d05510..5165571 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -301,7 +301,8 @@ static struct sock *dccp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
&iph->daddr, inet6_iif(skb));
if (req) {
nsk = dccp_check_req(sk, skb, req);
- reqsk_put(req);
+ if (!nsk)
+ reqsk_put(req);
return nsk;
}
nsk = __inet6_lookup_established(sock_net(sk), &dccp_hashinfo,
diff --git a/net/dccp/minisocks.c b/net/dccp/minisocks.c
index 5f56666..30addee 100644
--- a/net/dccp/minisocks.c
+++ b/net/dccp/minisocks.c
@@ -186,8 +186,7 @@ struct sock *dccp_check_req(struct sock *sk, struct sk_buff *skb,
if (child == NULL)
goto listen_overflow;
- inet_csk_reqsk_queue_unlink(sk, req);
- inet_csk_reqsk_queue_removed(sk, req);
+ inet_csk_reqsk_queue_drop(sk, req);
inet_csk_reqsk_queue_add(sk, req, child);
out:
return child;
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 079a224..e6f6cc3 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -633,7 +633,7 @@ static int dsa_of_probe(struct device *dev)
if (cd->sw_addr > PHY_MAX_ADDR)
continue;
- if (!of_property_read_u32(np, "eeprom-length", &eeprom_len))
+ if (!of_property_read_u32(child, "eeprom-length", &eeprom_len))
cd->eeprom_len = eeprom_len;
for_each_available_child_of_node(child, port) {
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 5c3dd62..8976ca4 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -564,6 +564,40 @@ int inet_rtx_syn_ack(struct sock *parent, struct request_sock *req)
}
EXPORT_SYMBOL(inet_rtx_syn_ack);
+/* return true if req was found in the syn_table[] */
+static bool reqsk_queue_unlink(struct request_sock_queue *queue,
+ struct request_sock *req)
+{
+ struct listen_sock *lopt = queue->listen_opt;
+ struct request_sock **prev;
+ bool found = false;
+
+ spin_lock(&queue->syn_wait_lock);
+
+ for (prev = &lopt->syn_table[req->rsk_hash]; *prev != NULL;
+ prev = &(*prev)->dl_next) {
+ if (*prev == req) {
+ *prev = req->dl_next;
+ found = true;
+ break;
+ }
+ }
+
+ spin_unlock(&queue->syn_wait_lock);
+ if (del_timer(&req->rsk_timer))
+ reqsk_put(req);
+ return found;
+}
+
+void inet_csk_reqsk_queue_drop(struct sock *sk, struct request_sock *req)
+{
+ if (reqsk_queue_unlink(&inet_csk(sk)->icsk_accept_queue, req)) {
+ reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
+ reqsk_put(req);
+ }
+}
+EXPORT_SYMBOL(inet_csk_reqsk_queue_drop);
+
static void reqsk_timer_handler(unsigned long data)
{
struct request_sock *req = (struct request_sock *)data;
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index a93f260..05ff44b 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -158,6 +158,7 @@ void ping_unhash(struct sock *sk)
if (sk_hashed(sk)) {
write_lock_bh(&ping_table.lock);
hlist_nulls_del(&sk->sk_nulls_node);
+ sk_nulls_node_init(&sk->sk_nulls_node);
sock_put(sk);
isk->inet_num = 0;
isk->inet_sport = 0;
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index a78540f..bff62fc 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -962,10 +962,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
if (dst_metric_locked(dst, RTAX_MTU))
return;
- if (dst->dev->mtu < mtu)
- return;
-
- if (rt->rt_pmtu && rt->rt_pmtu < mtu)
+ if (ipv4_mtu(dst) < mtu)
return;
if (mtu < ip_rt_min_pmtu)
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 3571f2b..fc1c658 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,8 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- reqsk_put(req);
+ if (!nsk)
+ reqsk_put(req);
return nsk;
}
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index 63d6311..e5d7649 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -755,10 +755,11 @@ struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
if (!child)
goto listen_overflow;
- inet_csk_reqsk_queue_unlink(sk, req);
- inet_csk_reqsk_queue_removed(sk, req);
-
+ inet_csk_reqsk_queue_drop(sk, req);
inet_csk_reqsk_queue_add(sk, req, child);
+ /* Warning: caller must not call reqsk_put(req);
+ * child stole last reference on it.
+ */
return child;
listen_overflow:
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 8c8d7e0..a369e8a 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2812,39 +2812,65 @@ begin_fwd:
}
}
-/* Send a fin. The caller locks the socket for us. This cannot be
- * allowed to fail queueing a FIN frame under any circumstances.
+/* We allow to exceed memory limits for FIN packets to expedite
+ * connection tear down and (memory) recovery.
+ * Otherwise tcp_send_fin() could be tempted to either delay FIN
+ * or even be forced to close flow without any FIN.
+ */
+static void sk_forced_wmem_schedule(struct sock *sk, int size)
+{
+ int amt, status;
+
+ if (size <= sk->sk_forward_alloc)
+ return;
+ amt = sk_mem_pages(size);
+ sk->sk_forward_alloc += amt * SK_MEM_QUANTUM;
+ sk_memory_allocated_add(sk, amt, &status);
+}
+
+/* Send a FIN. The caller locks the socket for us.
+ * We should try to send a FIN packet really hard, but eventually give up.
*/
void tcp_send_fin(struct sock *sk)
{
+ struct sk_buff *skb, *tskb = tcp_write_queue_tail(sk);
struct tcp_sock *tp = tcp_sk(sk);
- struct sk_buff *skb = tcp_write_queue_tail(sk);
- int mss_now;
- /* Optimization, tack on the FIN if we have a queue of
- * unsent frames. But be careful about outgoing SACKS
- * and IP options.
+ /* Optimization, tack on the FIN if we have one skb in write queue and
+ * this skb was not yet sent, or we are under memory pressure.
+ * Note: in the latter case, FIN packet will be sent after a timeout,
+ * as TCP stack thinks it has already been transmitted.
*/
- mss_now = tcp_current_mss(sk);
-
- if (tcp_send_head(sk)) {
- TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN;
- TCP_SKB_CB(skb)->end_seq++;
+ if (tskb && (tcp_send_head(sk) || sk_under_memory_pressure(sk))) {
+coalesce:
+ TCP_SKB_CB(tskb)->tcp_flags |= TCPHDR_FIN;
+ TCP_SKB_CB(tskb)->end_seq++;
tp->write_seq++;
+ if (!tcp_send_head(sk)) {
+ /* This means tskb was already sent.
+ * Pretend we included the FIN on previous transmit.
+ * We need to set tp->snd_nxt to the value it would have
+ * if FIN had been sent. This is because retransmit path
+ * does not change tp->snd_nxt.
+ */
+ tp->snd_nxt++;
+ return;
+ }
} else {
- /* Socket is locked, keep trying until memory is available. */
- for (;;) {
- skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
- if (skb)
- break;
- yield();
+ skb = alloc_skb_fclone(MAX_TCP_HEADER, sk->sk_allocation);
+ if (unlikely(!skb)) {
+ if (tskb)
+ goto coalesce;
+ return;
}
+ skb_reserve(skb, MAX_TCP_HEADER);
+ sk_forced_wmem_schedule(sk, skb->truesize);
/* FIN eats a sequence byte, write_seq advanced by tcp_queue_skb(). */
tcp_init_nondata_skb(skb, tp->write_seq,
TCPHDR_ACK | TCPHDR_FIN);
tcp_queue_skb(sk, skb);
}
- __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
+ __tcp_push_pending_frames(sk, tcp_current_mss(sk), TCP_NAGLE_OFF);
}
/* We get here when a process closes a file descriptor (either due to
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index b5e6cc1..a38d3ac 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1246,7 +1246,6 @@ static void ip6gre_tunnel_setup(struct net_device *dev)
static int ip6gre_tunnel_init(struct net_device *dev)
{
struct ip6_tnl *tunnel;
- int i;
tunnel = netdev_priv(dev);
@@ -1260,16 +1259,10 @@ static int ip6gre_tunnel_init(struct net_device *dev)
if (ipv6_addr_any(&tunnel->parms.raddr))
dev->header_ops = &ip6gre_header_ops;
- dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+ dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!dev->tstats)
return -ENOMEM;
- for_each_possible_cpu(i) {
- struct pcpu_sw_netstats *ip6gre_tunnel_stats;
- ip6gre_tunnel_stats = per_cpu_ptr(dev->tstats, i);
- u64_stats_init(&ip6gre_tunnel_stats->syncp);
- }
-
return 0;
}
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index ad51df8..b6575d6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -946,7 +946,8 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
&ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
if (req) {
nsk = tcp_check_req(sk, skb, req, false);
- reqsk_put(req);
+ if (!nsk)
+ reqsk_put(req);
return nsk;
}
nsk = __inet6_lookup_established(sock_net(sk), &tcp_hashinfo,
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index db8a2ea..954810c 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -53,6 +53,11 @@ static struct mpls_route *mpls_route_input_rcu(struct net *net, unsigned index)
return rt;
}
+static inline struct mpls_dev *mpls_dev_get(const struct net_device *dev)
+{
+ return rcu_dereference_rtnl(dev->mpls_ptr);
+}
+
static bool mpls_output_possible(const struct net_device *dev)
{
return dev && (dev->flags & IFF_UP) && netif_carrier_ok(dev);
@@ -136,6 +141,7 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
struct mpls_route *rt;
struct mpls_entry_decoded dec;
struct net_device *out_dev;
+ struct mpls_dev *mdev;
unsigned int hh_len;
unsigned int new_header_size;
unsigned int mtu;
@@ -143,6 +149,10 @@ static int mpls_forward(struct sk_buff *skb, struct net_device *dev,
/* Careful this entire function runs inside of an rcu critical section */
+ mdev = mpls_dev_get(dev);
+ if (!mdev || !mdev->input_enabled)
+ goto drop;
+
if (skb->pkt_type != PACKET_HOST)
goto drop;
@@ -352,9 +362,9 @@ static int mpls_route_add(struct mpls_route_config *cfg)
if (!dev)
goto errout;
- /* For now just support ethernet devices */
+ /* Ensure this is a supported device */
err = -EINVAL;
- if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
+ if (!mpls_dev_get(dev))
goto errout;
err = -EINVAL;
@@ -428,10 +438,89 @@ errout:
return err;
}
+#define MPLS_PERDEV_SYSCTL_OFFSET(field) \
+ (&((struct mpls_dev *)0)->field)
+
+static const struct ctl_table mpls_dev_table[] = {
+ {
+ .procname = "input",
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec,
+ .data = MPLS_PERDEV_SYSCTL_OFFSET(input_enabled),
+ },
+ { }
+};
+
+static int mpls_dev_sysctl_register(struct net_device *dev,
+ struct mpls_dev *mdev)
+{
+ char path[sizeof("net/mpls/conf/") + IFNAMSIZ];
+ struct ctl_table *table;
+ int i;
+
+ table = kmemdup(&mpls_dev_table, sizeof(mpls_dev_table), GFP_KERNEL);
+ if (!table)
+ goto out;
+
+ /* Table data contains only offsets relative to the base of
+ * the mdev at this point, so make them absolute.
+ */
+ for (i = 0; i < ARRAY_SIZE(mpls_dev_table); i++)
+ table[i].data = (char *)mdev + (uintptr_t)table[i].data;
+
+ snprintf(path, sizeof(path), "net/mpls/conf/%s", dev->name);
+
+ mdev->sysctl = register_net_sysctl(dev_net(dev), path, table);
+ if (!mdev->sysctl)
+ goto free;
+
+ return 0;
+
+free:
+ kfree(table);
+out:
+ return -ENOBUFS;
+}
+
+static void mpls_dev_sysctl_unregister(struct mpls_dev *mdev)
+{
+ struct ctl_table *table;
+
+ table = mdev->sysctl->ctl_table_arg;
+ unregister_net_sysctl_table(mdev->sysctl);
+ kfree(table);
+}
+
+static struct mpls_dev *mpls_add_dev(struct net_device *dev)
+{
+ struct mpls_dev *mdev;
+ int err = -ENOMEM;
+
+ ASSERT_RTNL();
+
+ mdev = kzalloc(sizeof(*mdev), GFP_KERNEL);
+ if (!mdev)
+ return ERR_PTR(err);
+
+ err = mpls_dev_sysctl_register(dev, mdev);
+ if (err)
+ goto free;
+
+ rcu_assign_pointer(dev->mpls_ptr, mdev);
+
+ return mdev;
+
+free:
+ kfree(mdev);
+ return ERR_PTR(err);
+}
+
static void mpls_ifdown(struct net_device *dev)
{
struct mpls_route __rcu **platform_label;
struct net *net = dev_net(dev);
+ struct mpls_dev *mdev;
unsigned index;
platform_label = rtnl_dereference(net->mpls.platform_label);
@@ -443,14 +532,35 @@ static void mpls_ifdown(struct net_device *dev)
continue;
rt->rt_dev = NULL;
}
+
+ mdev = mpls_dev_get(dev);
+ if (!mdev)
+ return;
+
+ mpls_dev_sysctl_unregister(mdev);
+
+ RCU_INIT_POINTER(dev->mpls_ptr, NULL);
+
+ kfree(mdev);
}
static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct mpls_dev *mdev;
switch(event) {
+ case NETDEV_REGISTER:
+ /* For now just support ethernet devices */
+ if ((dev->type == ARPHRD_ETHER) ||
+ (dev->type == ARPHRD_LOOPBACK)) {
+ mdev = mpls_add_dev(dev);
+ if (IS_ERR(mdev))
+ return notifier_from_errno(PTR_ERR(mdev));
+ }
+ break;
+
case NETDEV_UNREGISTER:
mpls_ifdown(dev);
break;
@@ -536,6 +646,15 @@ int nla_get_labels(const struct nlattr *nla,
if ((dec.bos != bos) || dec.ttl || dec.tc)
return -EINVAL;
+ switch (dec.label) {
+ case LABEL_IMPLICIT_NULL:
+ /* RFC3032: This is a label that an LSR may
+ * assign and distribute, but which never
+ * actually appears in the encapsulation.
+ */
+ return -EINVAL;
+ }
+
label[i] = dec.label;
}
*labels = nla_labels;
@@ -912,7 +1031,7 @@ static int mpls_platform_labels(struct ctl_table *table, int write,
return ret;
}
-static struct ctl_table mpls_table[] = {
+static const struct ctl_table mpls_table[] = {
{
.procname = "platform_labels",
.data = NULL,
diff --git a/net/mpls/internal.h b/net/mpls/internal.h
index fb6de920..693877d 100644
--- a/net/mpls/internal.h
+++ b/net/mpls/internal.h
@@ -22,6 +22,12 @@ struct mpls_entry_decoded {
u8 bos;
};
+struct mpls_dev {
+ int input_enabled;
+
+ struct ctl_table_header *sysctl;
+};
+
struct sk_buff;
static inline struct mpls_shim_hdr *mpls_hdr(const struct sk_buff *skb)
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 78af83b..ad9d11f 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -4340,7 +4340,6 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
case NFT_CONTINUE:
case NFT_BREAK:
case NFT_RETURN:
- desc->len = sizeof(data->verdict);
break;
case NFT_JUMP:
case NFT_GOTO:
@@ -4355,10 +4354,10 @@ static int nft_verdict_init(const struct nft_ctx *ctx, struct nft_data *data,
chain->use++;
data->verdict.chain = chain;
- desc->len = sizeof(data);
break;
}
+ desc->len = sizeof(data->verdict);
desc->type = NFT_DATA_VERDICT;
return 0;
}
diff --git a/net/netfilter/nft_reject.c b/net/netfilter/nft_reject.c
index 57d3e1a..0522fc9 100644
--- a/net/netfilter/nft_reject.c
+++ b/net/netfilter/nft_reject.c
@@ -63,6 +63,8 @@ int nft_reject_dump(struct sk_buff *skb, const struct nft_expr *expr)
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
+ default:
+ break;
}
return 0;
diff --git a/net/netfilter/nft_reject_inet.c b/net/netfilter/nft_reject_inet.c
index 62cabee..635dbba 100644
--- a/net/netfilter/nft_reject_inet.c
+++ b/net/netfilter/nft_reject_inet.c
@@ -108,6 +108,8 @@ static int nft_reject_inet_dump(struct sk_buff *skb,
if (nla_put_u8(skb, NFTA_REJECT_ICMP_CODE, priv->icmp_code))
goto nla_put_failure;
break;
+ default:
+ break;
}
return 0;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 19909d0..ec4adbd 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1629,13 +1629,11 @@ static struct sk_buff *netlink_alloc_large_skb(unsigned int size,
if (data == NULL)
return NULL;
- skb = build_skb(data, size);
+ skb = __build_skb(data, size);
if (skb == NULL)
vfree(data);
- else {
- skb->head_frag = 0;
+ else
skb->destructor = netlink_skb_destructor;
- }
return skb;
}
diff --git a/net/sched/act_connmark.c b/net/sched/act_connmark.c
index 8e47251..295d14b 100644
--- a/net/sched/act_connmark.c
+++ b/net/sched/act_connmark.c
@@ -63,7 +63,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
skb->mark = c->mark;
/* using overlimits stats to count how many packets marked */
ca->tcf_qstats.overlimits++;
- nf_ct_put(c);
goto out;
}
@@ -82,7 +81,6 @@ static int tcf_connmark(struct sk_buff *skb, const struct tc_action *a,
nf_ct_put(c);
out:
- skb->nfct = NULL;
spin_unlock(&ca->tcf_lock);
return ca->tcf_action;
}
diff --git a/net/tipc/bearer.c b/net/tipc/bearer.c
index 3613e72..70e3dac 100644
--- a/net/tipc/bearer.c
+++ b/net/tipc/bearer.c
@@ -591,14 +591,14 @@ void tipc_bearer_stop(struct net *net)
/* Caller should hold rtnl_lock to protect the bearer */
static int __tipc_nl_add_bearer(struct tipc_nl_msg *msg,
- struct tipc_bearer *bearer)
+ struct tipc_bearer *bearer, int nlflags)
{
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
- NLM_F_MULTI, TIPC_NL_BEARER_GET);
+ nlflags, TIPC_NL_BEARER_GET);
if (!hdr)
return -EMSGSIZE;
@@ -657,7 +657,7 @@ int tipc_nl_bearer_dump(struct sk_buff *skb, struct netlink_callback *cb)
if (!bearer)
continue;
- err = __tipc_nl_add_bearer(&msg, bearer);
+ err = __tipc_nl_add_bearer(&msg, bearer, NLM_F_MULTI);
if (err)
break;
}
@@ -705,7 +705,7 @@ int tipc_nl_bearer_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_bearer(&msg, bearer);
+ err = __tipc_nl_add_bearer(&msg, bearer, 0);
if (err)
goto err_out;
rtnl_unlock();
@@ -857,14 +857,14 @@ int tipc_nl_bearer_set(struct sk_buff *skb, struct genl_info *info)
}
static int __tipc_nl_add_media(struct tipc_nl_msg *msg,
- struct tipc_media *media)
+ struct tipc_media *media, int nlflags)
{
void *hdr;
struct nlattr *attrs;
struct nlattr *prop;
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
- NLM_F_MULTI, TIPC_NL_MEDIA_GET);
+ nlflags, TIPC_NL_MEDIA_GET);
if (!hdr)
return -EMSGSIZE;
@@ -916,7 +916,8 @@ int tipc_nl_media_dump(struct sk_buff *skb, struct netlink_callback *cb)
rtnl_lock();
for (; media_info_array[i] != NULL; i++) {
- err = __tipc_nl_add_media(&msg, media_info_array[i]);
+ err = __tipc_nl_add_media(&msg, media_info_array[i],
+ NLM_F_MULTI);
if (err)
break;
}
@@ -963,7 +964,7 @@ int tipc_nl_media_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_media(&msg, media);
+ err = __tipc_nl_add_media(&msg, media, 0);
if (err)
goto err_out;
rtnl_unlock();
diff --git a/net/tipc/link.c b/net/tipc/link.c
index a6b30df..43a515d 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1145,11 +1145,8 @@ void tipc_rcv(struct net *net, struct sk_buff *skb, struct tipc_bearer *b_ptr)
}
/* Synchronize with parallel link if applicable */
if (unlikely((l_ptr->flags & LINK_SYNCHING) && !msg_dup(msg))) {
- link_handle_out_of_seq_msg(l_ptr, skb);
- if (link_synch(l_ptr))
- link_retrieve_defq(l_ptr, &head);
- skb = NULL;
- goto unlock;
+ if (!link_synch(l_ptr))
+ goto unlock;
}
l_ptr->next_in_no++;
if (unlikely(!skb_queue_empty(&l_ptr->deferdq)))
@@ -2013,7 +2010,7 @@ msg_full:
/* Caller should hold appropriate locks to protect the link */
static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
- struct tipc_link *link)
+ struct tipc_link *link, int nlflags)
{
int err;
void *hdr;
@@ -2022,7 +2019,7 @@ static int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
struct tipc_net *tn = net_generic(net, tipc_net_id);
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
- NLM_F_MULTI, TIPC_NL_LINK_GET);
+ nlflags, TIPC_NL_LINK_GET);
if (!hdr)
return -EMSGSIZE;
@@ -2095,7 +2092,7 @@ static int __tipc_nl_add_node_links(struct net *net, struct tipc_nl_msg *msg,
if (!node->links[i])
continue;
- err = __tipc_nl_add_link(net, msg, node->links[i]);
+ err = __tipc_nl_add_link(net, msg, node->links[i], NLM_F_MULTI);
if (err)
return err;
}
@@ -2143,7 +2140,6 @@ int tipc_nl_link_dump(struct sk_buff *skb, struct netlink_callback *cb)
err = __tipc_nl_add_node_links(net, &msg, node,
&prev_link);
tipc_node_unlock(node);
- tipc_node_put(node);
if (err)
goto out;
@@ -2210,7 +2206,7 @@ int tipc_nl_link_get(struct sk_buff *skb, struct genl_info *info)
goto err_out;
}
- err = __tipc_nl_add_link(net, &msg, link);
+ err = __tipc_nl_add_link(net, &msg, link, 0);
if (err)
goto err_out;
diff --git a/net/tipc/server.c b/net/tipc/server.c
index ab6183c..77ff03e 100644
--- a/net/tipc/server.c
+++ b/net/tipc/server.c
@@ -102,7 +102,7 @@ static void tipc_conn_kref_release(struct kref *kref)
}
saddr->scope = -TIPC_NODE_SCOPE;
kernel_bind(sock, (struct sockaddr *)saddr, sizeof(*saddr));
- sk_release_kernel(sk);
+ sock_release(sock);
con->sock = NULL;
}
@@ -321,12 +321,9 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
struct socket *sock = NULL;
int ret;
- ret = sock_create_kern(AF_TIPC, SOCK_SEQPACKET, 0, &sock);
+ ret = __sock_create(s->net, AF_TIPC, SOCK_SEQPACKET, 0, &sock, 1);
if (ret < 0)
return NULL;
-
- sk_change_net(sock->sk, s->net);
-
ret = kernel_setsockopt(sock, SOL_TIPC, TIPC_IMPORTANCE,
(char *)&s->imp, sizeof(s->imp));
if (ret < 0)
@@ -376,7 +373,7 @@ static struct socket *tipc_create_listen_sock(struct tipc_conn *con)
create_err:
kernel_sock_shutdown(sock, SHUT_RDWR);
- sk_release_kernel(sock->sk);
+ sock_release(sock);
return NULL;
}
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index ee90d74..9074b5c 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1764,13 +1764,14 @@ static int tipc_sk_enqueue(struct sk_buff_head *inputq, struct sock *sk,
int tipc_sk_rcv(struct net *net, struct sk_buff_head *inputq)
{
u32 dnode, dport = 0;
- int err = -TIPC_ERR_NO_PORT;
+ int err;
struct sk_buff *skb;
struct tipc_sock *tsk;
struct tipc_net *tn;
struct sock *sk;
while (skb_queue_len(inputq)) {
+ err = -TIPC_ERR_NO_PORT;
skb = NULL;
dport = tipc_skb_peek_port(inputq, dport);
tsk = tipc_sk_lookup(net, dport);
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 99f7012..a73a226 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -95,39 +95,36 @@ static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
unsigned int unix_tot_inflight;
-
struct sock *unix_get_socket(struct file *filp)
{
struct sock *u_sock = NULL;
struct inode *inode = file_inode(filp);
- /*
- * Socket ?
- */
+ /* Socket ? */
if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
struct socket *sock = SOCKET_I(inode);
struct sock *s = sock->sk;
- /*
- * PF_UNIX ?
- */
+ /* PF_UNIX ? */
if (s && sock->ops && sock->ops->family == PF_UNIX)
u_sock = s;
}
return u_sock;
}
-/*
- * Keep the number of times in flight count for the file
- * descriptor if it is for an AF_UNIX socket.
+/* Keep the number of times in flight count for the file
+ * descriptor if it is for an AF_UNIX socket.
*/
void unix_inflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
+
if (s) {
struct unix_sock *u = unix_sk(s);
+
spin_lock(&unix_gc_lock);
+
if (atomic_long_inc_return(&u->inflight) == 1) {
BUG_ON(!list_empty(&u->link));
list_add_tail(&u->link, &gc_inflight_list);
@@ -142,10 +139,13 @@ void unix_inflight(struct file *fp)
void unix_notinflight(struct file *fp)
{
struct sock *s = unix_get_socket(fp);
+
if (s) {
struct unix_sock *u = unix_sk(s);
+
spin_lock(&unix_gc_lock);
BUG_ON(list_empty(&u->link));
+
if (atomic_long_dec_and_test(&u->inflight))
list_del_init(&u->link);
unix_tot_inflight--;
@@ -161,32 +161,27 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
- /*
- * Do we have file descriptors ?
- */
+ /* Do we have file descriptors ? */
if (UNIXCB(skb).fp) {
bool hit = false;
- /*
- * Process the descriptors of this socket
- */
+ /* Process the descriptors of this socket */
int nfd = UNIXCB(skb).fp->count;
struct file **fp = UNIXCB(skb).fp->fp;
+
while (nfd--) {
- /*
- * Get the socket the fd matches
- * if it indeed does so
- */
+ /* Get the socket the fd matches if it indeed does so */
struct sock *sk = unix_get_socket(*fp++);
+
if (sk) {
struct unix_sock *u = unix_sk(sk);
- /*
- * Ignore non-candidates, they could
+ /* Ignore non-candidates, they could
* have been added to the queues after
* starting the garbage collection
*/
if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
hit = true;
+
func(u);
}
}
@@ -203,24 +198,22 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
struct sk_buff_head *hitlist)
{
- if (x->sk_state != TCP_LISTEN)
+ if (x->sk_state != TCP_LISTEN) {
scan_inflight(x, func, hitlist);
- else {
+ } else {
struct sk_buff *skb;
struct sk_buff *next;
struct unix_sock *u;
LIST_HEAD(embryos);
- /*
- * For a listening socket collect the queued embryos
+ /* For a listening socket collect the queued embryos
* and perform a scan on them as well.
*/
spin_lock(&x->sk_receive_queue.lock);
skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
u = unix_sk(skb->sk);
- /*
- * An embryo cannot be in-flight, so it's safe
+ /* An embryo cannot be in-flight, so it's safe
* to use the list link.
*/
BUG_ON(!list_empty(&u->link));
@@ -249,8 +242,7 @@ static void inc_inflight(struct unix_sock *usk)
static void inc_inflight_move_tail(struct unix_sock *u)
{
atomic_long_inc(&u->inflight);
- /*
- * If this still might be part of a cycle, move it to the end
+ /* If this still might be part of a cycle, move it to the end
* of the list, so that it's checked even if it was already
* passed over
*/
@@ -263,8 +255,7 @@ static bool gc_in_progress;
void wait_for_unix_gc(void)
{
- /*
- * If number of inflight sockets is insane,
+ /* If number of inflight sockets is insane,
* force a garbage collect right now.
*/
if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
@@ -288,8 +279,7 @@ void unix_gc(void)
goto out;
gc_in_progress = true;
- /*
- * First, select candidates for garbage collection. Only
+ /* First, select candidates for garbage collection. Only
* in-flight sockets are considered, and from those only ones
* which don't have any external reference.
*
@@ -320,15 +310,13 @@ void unix_gc(void)
}
}
- /*
- * Now remove all internal in-flight reference to children of
+ /* Now remove all internal in-flight reference to children of
* the candidates.
*/
list_for_each_entry(u, &gc_candidates, link)
scan_children(&u->sk, dec_inflight, NULL);
- /*
- * Restore the references for children of all candidates,
+ /* Restore the references for children of all candidates,
* which have remaining references. Do this recursively, so
* only those remain, which form cyclic references.
*
@@ -350,8 +338,7 @@ void unix_gc(void)
}
list_del(&cursor);
- /*
- * not_cycle_list contains those sockets which do not make up a
+ /* not_cycle_list contains those sockets which do not make up a
* cycle. Restore these to the inflight list.
*/
while (!list_empty(&not_cycle_list)) {
@@ -360,8 +347,7 @@ void unix_gc(void)
list_move_tail(&u->link, &gc_inflight_list);
}
- /*
- * Now gc_candidates contains only garbage. Restore original
+ /* Now gc_candidates contains only garbage. Restore original
* inflight counters for these as well, and remove the skbuffs
* which are creating the cycle(s).
*/
diff --git a/sound/soc/codecs/mc13783.c b/sound/soc/codecs/mc13783.c
index 2ffb9a0..3d44fc5 100644
--- a/sound/soc/codecs/mc13783.c
+++ b/sound/soc/codecs/mc13783.c
@@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
AUDIO_SSI_SEL, 0);
else
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
- 0, AUDIO_SSI_SEL);
+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
if (priv->dac_ssi_port == MC13783_SSI1_PORT)
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
AUDIO_SSI_SEL, 0);
else
mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
- 0, AUDIO_SSI_SEL);
+ AUDIO_SSI_SEL, AUDIO_SSI_SEL);
return 0;
}
diff --git a/sound/soc/codecs/uda1380.c b/sound/soc/codecs/uda1380.c
index dc7778b..c3c33bd 100644
--- a/sound/soc/codecs/uda1380.c
+++ b/sound/soc/codecs/uda1380.c
@@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai,
if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
return -EINVAL;
- uda1380_write(codec, UDA1380_IFACE, iface);
+ uda1380_write_reg_cache(codec, UDA1380_IFACE, iface);
return 0;
}
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index 3035d98..e97a761 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
{ "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
{ "Right Input Mixer", NULL, "RINPUT1", }, /* Really Boost Switch */
{ "Right Input Mixer", NULL, "RINPUT2" },
- { "Right Input Mixer", NULL, "LINPUT3" },
+ { "Right Input Mixer", NULL, "RINPUT3" },
{ "Left ADC", NULL, "Left Input Mixer" },
{ "Right ADC", NULL, "Right Input Mixer" },
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 4fbc768..a1c04da 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2754,7 +2754,7 @@ static struct {
};
static int fs_ratios[] = {
- 64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
+ 64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
};
static int bclk_divs[] = {
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index bb4b78e..23c91fa 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -1247,7 +1247,7 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
u32 reg;
int i;
- context->pm_state = pm_runtime_enabled(mcasp->dev);
+ context->pm_state = pm_runtime_active(mcasp->dev);
if (!context->pm_state)
pm_runtime_get_sync(mcasp->dev);
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index defe0f0..158204d 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3100,11 +3100,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
}
prefix = soc_dapm_prefix(dapm);
- if (prefix)
+ if (prefix) {
w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
- else
+ if (widget->sname)
+ w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
+ widget->sname);
+ } else {
w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-
+ if (widget->sname)
+ w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
+ }
if (w->name == NULL) {
kfree(w);
return NULL;
diff --git a/tools/lib/api/Makefile b/tools/lib/api/Makefile
index d8fe29f..8bd9606 100644
--- a/tools/lib/api/Makefile
+++ b/tools/lib/api/Makefile
@@ -16,7 +16,7 @@ MAKEFLAGS += --no-print-directory
LIBFILE = $(OUTPUT)libapi.a
CFLAGS := $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
-CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -D_FORTIFY_SOURCE=2 -fPIC
+CFLAGS += -ggdb3 -Wall -Wextra -std=gnu99 -Werror -O6 -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2 -fPIC
CFLAGS += -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
RM = rm -f
diff --git a/tools/lib/traceevent/event-parse.c b/tools/lib/traceevent/event-parse.c
index e0917c0..29f94f6 100644
--- a/tools/lib/traceevent/event-parse.c
+++ b/tools/lib/traceevent/event-parse.c
@@ -3865,7 +3865,7 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
} else if (el_size == 4) {
trace_seq_printf(s, "%u", *(uint32_t *)num);
} else if (el_size == 8) {
- trace_seq_printf(s, "%lu", *(uint64_t *)num);
+ trace_seq_printf(s, "%"PRIu64, *(uint64_t *)num);
} else {
trace_seq_printf(s, "BAD SIZE:%d 0x%x",
el_size, *(uint8_t *)num);
diff --git a/tools/perf/bench/futex-requeue.c b/tools/perf/bench/futex-requeue.c
index bedff6b..ad0d9b5 100644
--- a/tools/perf/bench/futex-requeue.c
+++ b/tools/perf/bench/futex-requeue.c
@@ -132,6 +132,9 @@ int bench_futex_requeue(int argc, const char **argv,
if (!fshared)
futex_flag = FUTEX_PRIVATE_FLAG;
+ if (nrequeue > nthreads)
+ nrequeue = nthreads;
+
printf("Run summary [PID %d]: Requeuing %d threads (from [%s] %p to %p), "
"%d at a time.\n\n", getpid(), nthreads,
fshared ? "shared":"private", &futex1, &futex2, nrequeue);
@@ -161,20 +164,18 @@ int bench_futex_requeue(int argc, const char **argv,
/* Ok, all threads are patiently blocked, start requeueing */
gettimeofday(&start, NULL);
- for (nrequeued = 0; nrequeued < nthreads; nrequeued += nrequeue) {
+ while (nrequeued < nthreads) {
/*
* Do not wakeup any tasks blocked on futex1, allowing
* us to really measure futex_wait functionality.
*/
- futex_cmp_requeue(&futex1, 0, &futex2, 0,
- nrequeue, futex_flag);
+ nrequeued += futex_cmp_requeue(&futex1, 0, &futex2, 0,
+ nrequeue, futex_flag);
}
+
gettimeofday(&end, NULL);
timersub(&end, &start, &runtime);
- if (nrequeued > nthreads)
- nrequeued = nthreads;
-
update_stats(&requeued_stats, nrequeued);
update_stats(&requeuetime_stats, runtime.tv_usec);
@@ -184,7 +185,7 @@ int bench_futex_requeue(int argc, const char **argv,
}
/* everybody should be blocked on futex2, wake'em up */
- nrequeued = futex_wake(&futex2, nthreads, futex_flag);
+ nrequeued = futex_wake(&futex2, nrequeued, futex_flag);
if (nthreads != nrequeued)
warnx("couldn't wakeup all tasks (%d/%d)", nrequeued, nthreads);
diff --git a/tools/perf/bench/numa.c b/tools/perf/bench/numa.c
index ebfa163..ba5efa47 100644
--- a/tools/perf/bench/numa.c
+++ b/tools/perf/bench/numa.c
@@ -180,7 +180,7 @@ static const struct option options[] = {
OPT_INTEGER('H', "thp" , &p0.thp, "MADV_NOHUGEPAGE < 0 < MADV_HUGEPAGE"),
OPT_BOOLEAN('c', "show_convergence", &p0.show_convergence, "show convergence details"),
OPT_BOOLEAN('m', "measure_convergence", &p0.measure_convergence, "measure convergence latency"),
- OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "bzero the initial allocations"),
+ OPT_BOOLEAN('q', "quiet" , &p0.show_quiet, "quiet mode"),
OPT_BOOLEAN('S', "serialize-startup", &p0.serialize_startup,"serialize thread startup"),
/* Special option string parsing callbacks: */
@@ -828,6 +828,9 @@ static int count_process_nodes(int process_nr)
td = g->threads + task_nr;
node = numa_node_of_cpu(td->curr_cpu);
+ if (node < 0) /* curr_cpu was likely still -1 */
+ return 0;
+
node_present[node] = 1;
}
@@ -882,6 +885,11 @@ static void calc_convergence_compression(int *strong)
for (p = 0; p < g->p.nr_proc; p++) {
unsigned int nodes = count_process_nodes(p);
+ if (!nodes) {
+ *strong = 0;
+ return;
+ }
+
nodes_min = min(nodes, nodes_min);
nodes_max = max(nodes, nodes_max);
}
@@ -1395,7 +1403,7 @@ static void print_res(const char *name, double val,
if (!name)
name = "main,";
- if (g->p.show_quiet)
+ if (!g->p.show_quiet)
printf(" %-30s %15.3f, %-15s %s\n", name, val, txt_unit, txt_short);
else
printf(" %14.3f %s\n", val, txt_long);
diff --git a/tools/perf/builtin-kmem.c b/tools/perf/builtin-kmem.c
index 63ea013..1634186 100644
--- a/tools/perf/builtin-kmem.c
+++ b/tools/perf/builtin-kmem.c
@@ -319,7 +319,7 @@ static int page_stat_cmp(struct page_stat *a, struct page_stat *b)
return 0;
}
-static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool create)
+static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create)
{
struct rb_node **node = &page_alloc_tree.rb_node;
struct rb_node *parent = NULL;
@@ -331,7 +331,7 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre
parent = *node;
data = rb_entry(*node, struct page_stat, node);
- cmp = page_stat_cmp(data, stat);
+ cmp = page_stat_cmp(data, pstat);
if (cmp < 0)
node = &parent->rb_left;
else if (cmp > 0)
@@ -345,10 +345,10 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *stat, bool cre
data = zalloc(sizeof(*data));
if (data != NULL) {
- data->page = stat->page;
- data->order = stat->order;
- data->gfp_flags = stat->gfp_flags;
- data->migrate_type = stat->migrate_type;
+ data->page = pstat->page;
+ data->order = pstat->order;
+ data->gfp_flags = pstat->gfp_flags;
+ data->migrate_type = pstat->migrate_type;
rb_link_node(&data->node, parent, node);
rb_insert_color(&data->node, &page_alloc_tree);
@@ -375,7 +375,7 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
unsigned int migrate_type = perf_evsel__intval(evsel, sample,
"migratetype");
u64 bytes = kmem_page_size << order;
- struct page_stat *stat;
+ struct page_stat *pstat;
struct page_stat this = {
.order = order,
.gfp_flags = gfp_flags,
@@ -401,21 +401,21 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
* This is to find the current page (with correct gfp flags and
* migrate type) at free event.
*/
- stat = search_page(page, true);
- if (stat == NULL)
+ pstat = search_page(page, true);
+ if (pstat == NULL)
return -ENOMEM;
- stat->order = order;
- stat->gfp_flags = gfp_flags;
- stat->migrate_type = migrate_type;
+ pstat->order = order;
+ pstat->gfp_flags = gfp_flags;
+ pstat->migrate_type = migrate_type;
this.page = page;
- stat = search_page_alloc_stat(&this, true);
- if (stat == NULL)
+ pstat = search_page_alloc_stat(&this, true);
+ if (pstat == NULL)
return -ENOMEM;
- stat->nr_alloc++;
- stat->alloc_bytes += bytes;
+ pstat->nr_alloc++;
+ pstat->alloc_bytes += bytes;
order_stats[order][migrate_type]++;
@@ -428,7 +428,7 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
u64 page;
unsigned int order = perf_evsel__intval(evsel, sample, "order");
u64 bytes = kmem_page_size << order;
- struct page_stat *stat;
+ struct page_stat *pstat;
struct page_stat this = {
.order = order,
};
@@ -441,8 +441,8 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
nr_page_frees++;
total_page_free_bytes += bytes;
- stat = search_page(page, false);
- if (stat == NULL) {
+ pstat = search_page(page, false);
+ if (pstat == NULL) {
pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
page, order);
@@ -453,18 +453,18 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
}
this.page = page;
- this.gfp_flags = stat->gfp_flags;
- this.migrate_type = stat->migrate_type;
+ this.gfp_flags = pstat->gfp_flags;
+ this.migrate_type = pstat->migrate_type;
- rb_erase(&stat->node, &page_tree);
- free(stat);
+ rb_erase(&pstat->node, &page_tree);
+ free(pstat);
- stat = search_page_alloc_stat(&this, false);
- if (stat == NULL)
+ pstat = search_page_alloc_stat(&this, false);
+ if (pstat == NULL)
return -ENOENT;
- stat->nr_free++;
- stat->free_bytes += bytes;
+ pstat->nr_free++;
+ pstat->free_bytes += bytes;
return 0;
}
@@ -640,9 +640,9 @@ static void print_page_summary(void)
nr_page_frees, total_page_free_bytes / 1024);
printf("\n");
- printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
+ printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc+freed requests",
nr_alloc_freed, (total_alloc_freed_bytes) / 1024);
- printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
+ printf("%-30s: %'16"PRIu64" [ %'16"PRIu64" KB ]\n", "Total alloc-only requests",
nr_page_allocs - nr_alloc_freed,
(total_page_alloc_bytes - total_alloc_freed_bytes) / 1024);
printf("%-30s: %'16lu [ %'16"PRIu64" KB ]\n", "Total free-only requests",
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 476cdf7..b63aeda 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -329,7 +329,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
fprintf(stdout, "\n\n");
}
- if (sort_order == default_sort_order &&
+ if (sort_order == NULL &&
parent_pattern == default_parent_pattern) {
fprintf(stdout, "#\n# (%s)\n#\n", help);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 1cb3436..6a4d5d4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -733,7 +733,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
"Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
"Check /proc/sys/kernel/kptr_restrict.\n\n"
"Kernel%s samples will not be resolved.\n",
- !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
+ al.map && !RB_EMPTY_ROOT(&al.map->dso->symbols[MAP__FUNCTION]) ?
" modules" : "");
if (use_browser <= 0)
sleep(5);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index e124741..e122970 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2241,10 +2241,11 @@ static int trace__run(struct trace *trace, int argc, const char **argv)
if (err < 0)
goto out_error_mmap;
+ if (!target__none(&trace->opts.target))
+ perf_evlist__enable(evlist);
+
if (forks)
perf_evlist__start_workload(evlist);
- else
- perf_evlist__enable(evlist);
trace->multiple_threads = evlist->threads->map[0] == -1 ||
evlist->threads->nr > 1 ||
@@ -2272,6 +2273,11 @@ next_event:
if (interrupted)
goto out_disable;
+
+ if (done && !draining) {
+ perf_evlist__disable(evlist);
+ draining = true;
+ }
}
}
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index d8bb616..d05b77c 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -1084,6 +1084,8 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
*
* TODO:Group name support
*/
+ if (!arg)
+ return -EINVAL;
ptr = strpbrk(arg, ";=@+%");
if (ptr && *ptr == '=') { /* Event name */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index b5bf9d5..2a76e14 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -578,10 +578,12 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
/* Search child die for local variables and parameters. */
if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
/* Search again in global variables */
- if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
+ if (!die_find_variable_at(&pf->cu_die, pf->pvar->var,
+ 0, &vr_die)) {
pr_warning("Failed to find '%s' in this function.\n",
pf->pvar->var);
ret = -ENOENT;
+ }
}
if (ret >= 0)
ret = convert_variable(&vr_die, pf);
diff --git a/tools/testing/selftests/powerpc/pmu/Makefile b/tools/testing/selftests/powerpc/pmu/Makefile
index 5a16117..a9099d9 100644
--- a/tools/testing/selftests/powerpc/pmu/Makefile
+++ b/tools/testing/selftests/powerpc/pmu/Makefile
@@ -26,7 +26,7 @@ override define EMIT_TESTS
$(MAKE) -s -C ebb emit_tests
endef
-DEFAULT_INSTALL := $(INSTALL_RULE)
+DEFAULT_INSTALL_RULE := $(INSTALL_RULE)
override define INSTALL_RULE
$(DEFAULT_INSTALL_RULE)
$(MAKE) -C ebb install
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 1b616fa..6bff955 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -1,4 +1,4 @@
-TEST_PROGS := tm-resched-dscr tm-syscall
+TEST_PROGS := tm-resched-dscr
all: $(TEST_PROGS)
OpenPOWER on IntegriCloud